cxl/hdm: Track next decoder to allocate
authorDan Williams <dan.j.williams@intel.com>
Tue, 24 May 2022 19:04:58 +0000 (12:04 -0700)
committerDan Williams <dan.j.williams@intel.com>
Fri, 22 Jul 2022 00:19:23 +0000 (17:19 -0700)
The CXL specification enforces that endpoint decoders are committed in
hw instance id order. In preparation for adding dynamic DPA allocation,
record the hw instance id in endpoint decoders, and enforce allocations
to occur in hw instance id order.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/165784328827.1758207.9627538529944559954.stgit@dwillia2-xfh.jf.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/cxl/core/hdm.c
drivers/cxl/core/port.c
drivers/cxl/cxl.h

index c2cff5783fda8bd3398d1699c2d0674cd6ba1100..14354f4cd92e5f576e52f961ee7b791d3209e361 100644 (file)
@@ -160,6 +160,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
 {
        struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct cxl_port *port = cxled_to_port(cxled);
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct resource *res = cxled->dpa_res;
        resource_size_t skip_start;
@@ -173,6 +174,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
                __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
        cxled->skip = 0;
        cxled->dpa_res = NULL;
+       port->hdm_end--;
 }
 
 static void cxl_dpa_release(void *cxled)
@@ -203,6 +205,18 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
                return -EBUSY;
        }
 
+       if (port->hdm_end + 1 != cxled->cxld.id) {
+               /*
+                * Assumes alloc and commit order is always in hardware instance
+                * order per expectations from 8.2.5.12.20 Committing Decoder
+                * Programming that enforce decoder[m] committed before
+                * decoder[m+1] commit start.
+                */
+               dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id,
+                       cxled->cxld.id, port->id, port->hdm_end + 1);
+               return -EBUSY;
+       }
+
        if (skipped) {
                res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
                                       dev_name(&cxled->cxld.dev), 0);
@@ -235,6 +249,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
                        cxled->cxld.id, cxled->dpa_res);
                cxled->mode = CXL_DECODER_MIXED;
        }
+       port->hdm_end++;
 
        return 0;
 }
index 9a312f2e2d3542f782042d2eda5f18f140fa67c1..635d63e2f8a8d872bb2d73485d48945119817e2c 100644 (file)
@@ -502,6 +502,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
 
        port->component_reg_phys = component_reg_phys;
        ida_init(&port->decoder_ida);
+       port->hdm_end = -1;
        INIT_LIST_HEAD(&port->dports);
        INIT_LIST_HEAD(&port->endpoints);
 
index afaa76f065f9bc43828e76565e5ae3ea35cc1829..14e1c2c08e09dfa1f828362c1db4bf7e91e05970 100644 (file)
@@ -333,6 +333,7 @@ struct cxl_nvdimm {
  * @dports: cxl_dport instances referenced by decoders
  * @endpoints: cxl_ep instances, endpoints that are a descendant of this port
  * @decoder_ida: allocator for decoder ids
+ * @hdm_end: track last allocated HDM decoder instance for allocation ordering
  * @component_reg_phys: component register capability base address (optional)
  * @dead: last ep has been removed, force port re-creation
  * @depth: How deep this port is relative to the root. depth 0 is the root.
@@ -347,6 +348,7 @@ struct cxl_port {
        struct list_head dports;
        struct list_head endpoints;
        struct ida decoder_ida;
+       int hdm_end;
        resource_size_t component_reg_phys;
        bool dead;
        unsigned int depth;