decode of CXL memory resources.  The 'Y' integer reflects the
                hardware port unique-id used in the hardware decoder target
                list.
+
+What:          /sys/bus/cxl/devices/decoderX.Y
+Date:          June, 2021
+KernelVersion: v5.14
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               CXL decoder objects are enumerated from either a platform
+               firmware description, or a CXL HDM decoder register set in a
+               PCIe device (see CXL 2.0 section 8.2.5.12 CXL HDM Decoder
+               Capability Structure). The 'X' in decoderX.Y represents the
+               cxl_port container of this decoder, and 'Y' represents the
+               instance id of a given decoder resource.
+
+What:          /sys/bus/cxl/devices/decoderX.Y/{start,size}
+Date:          June, 2021
+KernelVersion: v5.14
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               The 'start' and 'size' attributes together convey the physical
+               address base and number of bytes mapped in the decoder's decode
+               window. For decoders of devtype "cxl_decoder_root" the address
+               range is fixed. For decoders of devtype "cxl_decoder_switch" the
+               address is bounded by the decode range of the cxl_port ancestor
+               of the decoder's cxl_port, and dynamically updates based on the
+               active memory regions in that address space.
+
+What:          /sys/bus/cxl/devices/decoderX.Y/locked
+Date:          June, 2021
+KernelVersion: v5.14
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               CXL HDM decoders have the capability to lock the configuration
+               until the next device reset. For decoders of devtype
+               "cxl_decoder_root" there is no standard facility to unlock them.
+               For decoders of devtype "cxl_decoder_switch" a secondary bus
+               reset, of the PCIe bridge that provides the bus for this
+               decoders uport, unlocks / resets the decoder.
+
+What:          /sys/bus/cxl/devices/decoderX.Y/target_list
+Date:          June, 2021
+KernelVersion: v5.14
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               Display a comma separated list of the current decoder target
+               configuration. The list is ordered by the current configured
+               interleave order of the decoder's dport instances. Each entry in
+               the list is a dport id.
+
+What:          /sys/bus/cxl/devices/decoderX.Y/cap_{pmem,ram,type2,type3}
+Date:          June, 2021
+KernelVersion: v5.14
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               When a CXL decoder is of devtype "cxl_decoder_root", it
+               represents a fixed memory window identified by platform
+               firmware. A fixed window may only support a subset of memory
+               types. The 'cap_*' attributes indicate whether persistent
+               memory, volatile memory, accelerator memory, and / or expander
+               memory may be mapped behind this decoder's memory window.
+
+What:          /sys/bus/cxl/devices/decoderX.Y/target_type
+Date:          June, 2021
+KernelVersion: v5.14
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               When a CXL decoder is of devtype "cxl_decoder_switch", it can
+               optionally decode either accelerator memory (type-2) or expander
+               memory (type-3). The 'target_type' attribute indicates the
+               current setting which may dynamically change based on what
+               memory regions are activated in this decode hierarchy.
 
        .attrs = cxl_base_attributes,
 };
 
+static ssize_t start_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+       return sysfs_emit(buf, "%#llx\n", cxld->range.start);
+}
+static DEVICE_ATTR_RO(start);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+       return sysfs_emit(buf, "%#llx\n", range_len(&cxld->range));
+}
+static DEVICE_ATTR_RO(size);
+
+#define CXL_DECODER_FLAG_ATTR(name, flag)                            \
+static ssize_t name##_show(struct device *dev,                       \
+                          struct device_attribute *attr, char *buf) \
+{                                                                    \
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);              \
+                                                                     \
+       return sysfs_emit(buf, "%s\n",                               \
+                         (cxld->flags & (flag)) ? "1" : "0");       \
+}                                                                    \
+static DEVICE_ATTR_RO(name)
+
+CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM);
+CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM);
+CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2);
+CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3);
+CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK);
+
+static ssize_t target_type_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+
+       switch (cxld->target_type) {
+       case CXL_DECODER_ACCELERATOR:
+               return sysfs_emit(buf, "accelerator\n");
+       case CXL_DECODER_EXPANDER:
+               return sysfs_emit(buf, "expander\n");
+       }
+       return -ENXIO;
+}
+static DEVICE_ATTR_RO(target_type);
+
+static ssize_t target_list_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+       ssize_t offset = 0;
+       int i, rc = 0;
+
+       device_lock(dev);
+       for (i = 0; i < cxld->interleave_ways; i++) {
+               struct cxl_dport *dport = cxld->target[i];
+               struct cxl_dport *next = NULL;
+
+               if (!dport)
+                       break;
+
+               if (i + 1 < cxld->interleave_ways)
+                       next = cxld->target[i + 1];
+               rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id,
+                                  next ? "," : "");
+               if (rc < 0)
+                       break;
+               offset += rc;
+       }
+       device_unlock(dev);
+
+       if (rc < 0)
+               return rc;
+
+       rc = sysfs_emit_at(buf, offset, "\n");
+       if (rc < 0)
+               return rc;
+
+       return offset + rc;
+}
+static DEVICE_ATTR_RO(target_list);
+
+static struct attribute *cxl_decoder_base_attrs[] = {
+       &dev_attr_start.attr,
+       &dev_attr_size.attr,
+       &dev_attr_locked.attr,
+       &dev_attr_target_list.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_decoder_base_attribute_group = {
+       .attrs = cxl_decoder_base_attrs,
+};
+
+static struct attribute *cxl_decoder_root_attrs[] = {
+       &dev_attr_cap_pmem.attr,
+       &dev_attr_cap_ram.attr,
+       &dev_attr_cap_type2.attr,
+       &dev_attr_cap_type3.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_decoder_root_attribute_group = {
+       .attrs = cxl_decoder_root_attrs,
+};
+
+static const struct attribute_group *cxl_decoder_root_attribute_groups[] = {
+       &cxl_decoder_root_attribute_group,
+       &cxl_decoder_base_attribute_group,
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+static struct attribute *cxl_decoder_switch_attrs[] = {
+       &dev_attr_target_type.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_decoder_switch_attribute_group = {
+       .attrs = cxl_decoder_switch_attrs,
+};
+
+static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = {
+       &cxl_decoder_switch_attribute_group,
+       &cxl_decoder_base_attribute_group,
+       &cxl_base_attribute_group,
+       NULL,
+};
+
+static void cxl_decoder_release(struct device *dev)
+{
+       struct cxl_decoder *cxld = to_cxl_decoder(dev);
+       struct cxl_port *port = to_cxl_port(dev->parent);
+
+       ida_free(&port->decoder_ida, cxld->id);
+       kfree(cxld);
+}
+
+static const struct device_type cxl_decoder_switch_type = {
+       .name = "cxl_decoder_switch",
+       .release = cxl_decoder_release,
+       .groups = cxl_decoder_switch_attribute_groups,
+};
+
+static const struct device_type cxl_decoder_root_type = {
+       .name = "cxl_decoder_root",
+       .release = cxl_decoder_release,
+       .groups = cxl_decoder_root_attribute_groups,
+};
+
+struct cxl_decoder *to_cxl_decoder(struct device *dev)
+{
+       if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release,
+                         "not a cxl_decoder device\n"))
+               return NULL;
+       return container_of(dev, struct cxl_decoder, dev);
+}
+
 static void cxl_dport_release(struct cxl_dport *dport)
 {
        list_del(&dport->list);
 
        port->uport = uport;
        port->component_reg_phys = component_reg_phys;
+       ida_init(&port->decoder_ida);
        INIT_LIST_HEAD(&port->dports);
 
        device_initialize(dev);
 }
 EXPORT_SYMBOL_GPL(cxl_add_dport);
 
+static struct cxl_decoder *
+cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
+                 resource_size_t len, int interleave_ways,
+                 int interleave_granularity, enum cxl_decoder_type type,
+                 unsigned long flags)
+{
+       struct cxl_decoder *cxld;
+       struct device *dev;
+       int rc = 0;
+
+       if (interleave_ways < 1)
+               return ERR_PTR(-EINVAL);
+
+       device_lock(&port->dev);
+       if (list_empty(&port->dports))
+               rc = -EINVAL;
+       device_unlock(&port->dev);
+       if (rc)
+               return ERR_PTR(rc);
+
+       cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
+       if (!cxld)
+               return ERR_PTR(-ENOMEM);
+
+       rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
+       if (rc < 0)
+               goto err;
+
+       *cxld = (struct cxl_decoder) {
+               .id = rc,
+               .range = {
+                       .start = base,
+                       .end = base + len - 1,
+               },
+               .flags = flags,
+               .interleave_ways = interleave_ways,
+               .interleave_granularity = interleave_granularity,
+               .target_type = type,
+       };
+
+       /* handle implied target_list */
+       if (interleave_ways == 1)
+               cxld->target[0] =
+                       list_first_entry(&port->dports, struct cxl_dport, list);
+       dev = &cxld->dev;
+       device_initialize(dev);
+       device_set_pm_not_required(dev);
+       dev->parent = &port->dev;
+       dev->bus = &cxl_bus_type;
+
+       /* root ports do not have a cxl_port_type parent */
+       if (port->dev.parent->type == &cxl_port_type)
+               dev->type = &cxl_decoder_switch_type;
+       else
+               dev->type = &cxl_decoder_root_type;
+
+       return cxld;
+err:
+       kfree(cxld);
+       return ERR_PTR(rc);
+}
+
+static void unregister_dev(void *dev)
+{
+       device_unregister(dev);
+}
+
+struct cxl_decoder *
+devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
+                    resource_size_t base, resource_size_t len,
+                    int interleave_ways, int interleave_granularity,
+                    enum cxl_decoder_type type, unsigned long flags)
+{
+       struct cxl_decoder *cxld;
+       struct device *dev;
+       int rc;
+
+       cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
+                                interleave_granularity, type, flags);
+       if (IS_ERR(cxld))
+               return cxld;
+
+       dev = &cxld->dev;
+       rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
+       if (rc)
+               goto err;
+
+       rc = device_add(dev);
+       if (rc)
+               goto err;
+
+       rc = devm_add_action_or_reset(host, unregister_dev, dev);
+       if (rc)
+               return ERR_PTR(rc);
+       return cxld;
+
+err:
+       put_device(dev);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
+
 /**
  * cxl_probe_component_regs() - Detect CXL Component register blocks
  * @dev: Host device of the @base mapping
 
 #define CXL_RESOURCE_NONE ((resource_size_t) -1)
 #define CXL_TARGET_STRLEN 20
 
+/*
+ * cxl_decoder flags that define the type of memory / devices this
+ * decoder supports as well as configuration lock status See "CXL 2.0
+ * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
+ */
+#define CXL_DECODER_F_RAM   BIT(0)
+#define CXL_DECODER_F_PMEM  BIT(1)
+#define CXL_DECODER_F_TYPE2 BIT(2)
+#define CXL_DECODER_F_TYPE3 BIT(3)
+#define CXL_DECODER_F_LOCK  BIT(4)
+#define CXL_DECODER_F_MASK  GENMASK(4, 0)
+
+enum cxl_decoder_type {
+       CXL_DECODER_ACCELERATOR = 2,
+       CXL_DECODER_EXPANDER = 3,
+};
+
+/**
+ * struct cxl_decoder - CXL address range decode configuration
+ * @dev: this decoder's device
+ * @id: kernel device name id
+ * @range: address range considered by this decoder
+ * @interleave_ways: number of cxl_dports in this decode
+ * @interleave_granularity: data stride per dport
+ * @target_type: accelerator vs expander (type2 vs type3) selector
+ * @flags: memory type capabilities and locking
+ * @target: active ordered target list in current decoder configuration
+ */
+struct cxl_decoder {
+       struct device dev;
+       int id;
+       struct range range;
+       int interleave_ways;
+       int interleave_granularity;
+       enum cxl_decoder_type target_type;
+       unsigned long flags;
+       struct cxl_dport *target[];
+};
+
 /**
  * struct cxl_port - logical collection of upstream port devices and
  *                  downstream port devices to construct a CXL memory
  * @uport: PCI or platform device implementing the upstream port capability
  * @id: id for port device-name
  * @dports: cxl_dport instances referenced by decoders
+ * @decoder_ida: allocator for decoder ids
  * @component_reg_phys: component register capability base address (optional)
  */
 struct cxl_port {
        struct device *uport;
        int id;
        struct list_head dports;
+       struct ida decoder_ida;
        resource_size_t component_reg_phys;
 };
 
 
 int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
                  resource_size_t component_reg_phys);
+
+struct cxl_decoder *to_cxl_decoder(struct device *dev);
+struct cxl_decoder *
+devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
+                    resource_size_t base, resource_size_t len,
+                    int interleave_ways, int interleave_granularity,
+                    enum cxl_decoder_type type, unsigned long flags);
+
+/*
+ * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
+ * single ported host-bridges need not publish a decoder capability when a
+ * passthrough decode can be assumed, i.e. all transactions that the uport sees
+ * are claimed and passed to the single dport. Default the range a 0-base
+ * 0-length until the first CXL region is activated.
+ */
+static inline struct cxl_decoder *
+devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
+{
+       return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
+                                   CXL_DECODER_EXPANDER, 0);
+}
+
 extern struct bus_type cxl_bus_type;
 #endif /* __CXL_H__ */