}
 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
 
+/*
+ * Must be called in a context that synchronizes against this decoder's
+ * port ->remove() callback (like an endpoint decoder sysfs attribute)
+ */
+static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
+{
+       struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct resource *res = cxled->dpa_res;
+       resource_size_t skip_start;
+
+       lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+       /* save @skip_start, before @res is released */
+       skip_start = res->start - cxled->skip;
+       __release_region(&cxlds->dpa_res, res->start, resource_size(res));
+       if (cxled->skip)
+               __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+       cxled->skip = 0;
+       cxled->dpa_res = NULL;
+}
+
+static void cxl_dpa_release(void *cxled)
+{
+       down_write(&cxl_dpa_rwsem);
+       __cxl_dpa_release(cxled);
+       up_write(&cxl_dpa_rwsem);
+}
+
+static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+                            resource_size_t base, resource_size_t len,
+                            resource_size_t skipped)
+{
+       struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct cxl_port *port = cxled_to_port(cxled);
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct device *dev = &port->dev;
+       struct resource *res;
+
+       lockdep_assert_held_write(&cxl_dpa_rwsem);
+
+       if (!len)
+               return 0;
+
+       if (cxled->dpa_res) {
+               dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+                       port->id, cxled->cxld.id, cxled->dpa_res);
+               return -EBUSY;
+       }
+
+       if (skipped) {
+               res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
+                                      dev_name(&cxled->cxld.dev), 0);
+               if (!res) {
+                       dev_dbg(dev,
+                               "decoder%d.%d: failed to reserve skipped space\n",
+                               port->id, cxled->cxld.id);
+                       return -EBUSY;
+               }
+       }
+       res = __request_region(&cxlds->dpa_res, base, len,
+                              dev_name(&cxled->cxld.dev), 0);
+       if (!res) {
+               dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
+                       port->id, cxled->cxld.id);
+               if (skipped)
+                       __release_region(&cxlds->dpa_res, base - skipped,
+                                        skipped);
+               return -EBUSY;
+       }
+       cxled->dpa_res = res;
+       cxled->skip = skipped;
+
+       return 0;
+}
+
+static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+                               resource_size_t base, resource_size_t len,
+                               resource_size_t skipped)
+{
+       struct cxl_port *port = cxled_to_port(cxled);
+       int rc;
+
+       down_write(&cxl_dpa_rwsem);
+       rc = __cxl_dpa_reserve(cxled, base, len, skipped);
+       up_write(&cxl_dpa_rwsem);
+
+       if (rc)
+               return rc;
+
+       return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
+}
+
 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
-                           int *target_map, void __iomem *hdm, int which)
+                           int *target_map, void __iomem *hdm, int which,
+                           u64 *dpa_base)
 {
-       u64 size, base;
+       struct cxl_endpoint_decoder *cxled = NULL;
+       u64 size, base, skip, dpa_size;
+       bool committed;
+       u32 remainder;
        int i, rc;
        u32 ctrl;
        union {
                unsigned char target_id[8];
        } target_list;
 
+       if (is_endpoint_decoder(&cxld->dev))
+               cxled = to_cxl_endpoint_decoder(&cxld->dev);
+
        ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
        base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
        size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+       committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
 
-       if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED))
+       if (!committed)
                size = 0;
        if (base == U64_MAX || size == U64_MAX) {
                dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n",
                .end = base + size - 1,
        };
 
-       /* switch decoders are always enabled if committed */
-       if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) {
+       /* decoders are enabled if committed */
+       if (committed) {
                cxld->flags |= CXL_DECODER_F_ENABLE;
                if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
                        cxld->flags |= CXL_DECODER_F_LOCK;
        if (rc)
                return rc;
 
-       if (is_endpoint_decoder(&cxld->dev))
+       if (!cxled) {
+               target_list.value =
+                       ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+               for (i = 0; i < cxld->interleave_ways; i++)
+                       target_map[i] = target_list.target_id[i];
+
                return 0;
+       }
 
-       target_list.value =
-               ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
-       for (i = 0; i < cxld->interleave_ways; i++)
-               target_map[i] = target_list.target_id[i];
+       if (!committed)
+               return 0;
 
+       dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder);
+       if (remainder) {
+               dev_err(&port->dev,
+                       "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n",
+                       port->id, cxld->id, size, cxld->interleave_ways);
+               return -ENXIO;
+       }
+       skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+       rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
+       if (rc) {
+               dev_err(&port->dev,
+                       "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+                       port->id, cxld->id, *dpa_base,
+                       *dpa_base + dpa_size + skip - 1, rc);
+               return rc;
+       }
+       *dpa_base += dpa_size + skip;
        return 0;
 }
 
        void __iomem *hdm = cxlhdm->regs.hdm_decoder;
        struct cxl_port *port = cxlhdm->port;
        int i, committed;
+       u64 dpa_base = 0;
        u32 ctrl;
 
        /*
                        cxld = &cxlsd->cxld;
                }
 
-               rc = init_hdm_decoder(port, cxld, target_map, hdm, i);
+               rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
                if (rc) {
                        put_device(&cxld->dev);
                        return rc;