dmaengine: idxd: add support for readonly config mode
authorDave Jiang <dave.jiang@intel.com>
Tue, 20 Apr 2021 18:46:28 +0000 (11:46 -0700)
committerVinod Koul <vkoul@kernel.org>
Fri, 23 Apr 2021 17:38:44 +0000 (23:08 +0530)
The read-only configuration mode is defined by the DSA spec as a mode of
the device WQ configuration. When GENCAP register bit 31 is set to 0,
the device is in RO mode and group configuration and some fields of the
workqueue configuration registers are read-only and reflect the fixed
configuration of the device. Add support for RO mode. The driver will
load the values from the registers directly setup all the internally
cached data structures based on the device configuration.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/161894438847.3202472.6317563824045432727.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/sysfs.c

index 6d674fdedb4d327206bb560009ce24f022eaee6f..3ddb1c7310800df47f5f95b0d1efe8a8b91c5a40 100644 (file)
@@ -884,3 +884,119 @@ int idxd_device_config(struct idxd_device *idxd)
 
        return 0;
 }
+
+static int idxd_wq_load_config(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       int wqcfg_offset;
+       int i;
+
+       wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
+       memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
+
+       wq->size = wq->wqcfg->wq_size;
+       wq->threshold = wq->wqcfg->wq_thresh;
+       if (wq->wqcfg->priv)
+               wq->type = IDXD_WQT_KERNEL;
+
+       /* The driver does not support shared WQ mode in read-only config yet */
+       if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
+               return -EOPNOTSUPP;
+
+       set_bit(WQ_FLAG_DEDICATED, &wq->flags);
+
+       wq->priority = wq->wqcfg->priority;
+
+       for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+               wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
+               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
+       }
+
+       return 0;
+}
+
+static void idxd_group_load_config(struct idxd_group *group)
+{
+       struct idxd_device *idxd = group->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       int i, j, grpcfg_offset;
+
+       /*
+        * Load WQS bit fields
+        * Iterate through all 256 bits 64 bits at a time
+        */
+       for (i = 0; i < GRPWQCFG_STRIDES; i++) {
+               struct idxd_wq *wq;
+
+               grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
+               group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
+               dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
+                       group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
+
+               if (i * 64 >= idxd->max_wqs)
+                       break;
+
+               /* Iterate through all 64 bits and check for wq set */
+               for (j = 0; j < 64; j++) {
+                       int id = i * 64 + j;
+
+                       /* No need to check beyond max wqs */
+                       if (id >= idxd->max_wqs)
+                               break;
+
+                       /* Set group assignment for wq if wq bit is set */
+                       if (group->grpcfg.wqs[i] & BIT(j)) {
+                               wq = idxd->wqs[id];
+                               wq->group = group;
+                       }
+               }
+       }
+
+       grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
+       group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
+       dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
+               grpcfg_offset, group->grpcfg.engines);
+
+       /* Iterate through all 64 bits to check engines set */
+       for (i = 0; i < 64; i++) {
+               if (i >= idxd->max_engines)
+                       break;
+
+               if (group->grpcfg.engines & BIT(i)) {
+                       struct idxd_engine *engine = idxd->engines[i];
+
+                       engine->group = group;
+               }
+       }
+
+       grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
+       group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
+       dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
+               group->id, grpcfg_offset, group->grpcfg.flags.bits);
+}
+
+int idxd_device_load_config(struct idxd_device *idxd)
+{
+       union gencfg_reg reg;
+       int i, rc;
+
+       reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       idxd->token_limit = reg.token_limit;
+
+       for (i = 0; i < idxd->max_groups; i++) {
+               struct idxd_group *group = idxd->groups[i];
+
+               idxd_group_load_config(group);
+       }
+
+       for (i = 0; i < idxd->max_wqs; i++) {
+               struct idxd_wq *wq = idxd->wqs[i];
+
+               rc = idxd_wq_load_config(wq);
+               if (rc < 0)
+                       return rc;
+       }
+
+       return 0;
+}
index 1b539cbf3c14a205e5c49c4707c160b6090287a6..940a2e1ddf12b48116343dd53ffcc8540586e82d 100644 (file)
@@ -384,6 +384,7 @@ void idxd_device_cleanup(struct idxd_device *idxd);
 int idxd_device_config(struct idxd_device *idxd);
 void idxd_device_wqs_clear_state(struct idxd_device *idxd);
 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
+int idxd_device_load_config(struct idxd_device *idxd);
 
 /* work queue control */
 int idxd_wq_alloc_resources(struct idxd_wq *wq);
index eda5ffd307c60ade49540b9fc569229374c1a360..a07e6d8eec00fabd300ee9364452439ac608c050 100644 (file)
@@ -482,6 +482,14 @@ static int idxd_probe(struct idxd_device *idxd)
        if (rc)
                goto err;
 
+       /* If the configs are readonly, then load them from device */
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+               dev_dbg(dev, "Loading RO device config\n");
+               rc = idxd_device_load_config(idxd);
+               if (rc < 0)
+                       goto err;
+       }
+
        rc = idxd_setup_interrupts(idxd);
        if (rc)
                goto err;
index dad5c0be9ae87a5ddefcdceca9ce7d925bdcfa5e..d45cb61f300bceb79bdc86dbed3a19dfe1b2b7cf 100644 (file)
@@ -110,7 +110,8 @@ static int enable_wq(struct idxd_wq *wq)
        }
 
        spin_lock_irqsave(&idxd->dev_lock, flags);
-       rc = idxd_device_config(idxd);
+       if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               rc = idxd_device_config(idxd);
        spin_unlock_irqrestore(&idxd->dev_lock, flags);
        if (rc < 0) {
                mutex_unlock(&wq->wq_lock);
@@ -170,7 +171,7 @@ static int enable_wq(struct idxd_wq *wq)
 
 static int idxd_config_bus_probe(struct device *dev)
 {
-       int rc;
+       int rc = 0;
        unsigned long flags;
 
        dev_dbg(dev, "%s called\n", __func__);
@@ -188,7 +189,8 @@ static int idxd_config_bus_probe(struct device *dev)
 
                /* Perform IDXD configuration and enabling */
                spin_lock_irqsave(&idxd->dev_lock, flags);
-               rc = idxd_device_config(idxd);
+               if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+                       rc = idxd_device_config(idxd);
                spin_unlock_irqrestore(&idxd->dev_lock, flags);
                if (rc < 0) {
                        module_put(THIS_MODULE);
@@ -287,12 +289,14 @@ static int idxd_config_bus_remove(struct device *dev)
 
                idxd_unregister_dma_device(idxd);
                rc = idxd_device_disable(idxd);
-               for (i = 0; i < idxd->max_wqs; i++) {
-                       struct idxd_wq *wq = idxd->wqs[i];
-
-                       mutex_lock(&wq->wq_lock);
-                       idxd_wq_disable_cleanup(wq);
-                       mutex_unlock(&wq->wq_lock);
+               if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
+                       for (i = 0; i < idxd->max_wqs; i++) {
+                               struct idxd_wq *wq = idxd->wqs[i];
+
+                               mutex_lock(&wq->wq_lock);
+                               idxd_wq_disable_cleanup(wq);
+                               mutex_unlock(&wq->wq_lock);
+                       }
                }
                module_put(THIS_MODULE);
                if (rc < 0)