vfio-pci: Re-indent what was vfio_pci_core_ioctl()
authorJason Gunthorpe <jgg@nvidia.com>
Wed, 31 Aug 2022 20:15:58 +0000 (17:15 -0300)
committerAlex Williamson <alex.williamson@redhat.com>
Thu, 1 Sep 2022 21:29:11 +0000 (15:29 -0600)
Done mechanically with:

 $ git clang-format-14 -i --lines 675:1210 drivers/vfio/pci/vfio_pci_core.c

And manually reflow the multi-line comments clang-format doesn't fix.

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v2-0f9e632d54fb+d6-vfio_ioctl_split_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/pci/vfio_pci_core.c

index 85b9720e77d284e705e546dcac368670cf7b44e5..8bff8ab5e807b9fe63d159e215b11e7edee112db 100644 (file)
@@ -693,309 +693,300 @@ static int vfio_pci_ioctl_get_info(struct vfio_pci_core_device *vdev,
                                   void __user *arg)
 {
        unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
-               struct vfio_device_info info;
-               struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
-               unsigned long capsz;
-               int ret;
+       struct vfio_device_info info;
+       struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+       unsigned long capsz;
+       int ret;
 
-               /* For backward compatibility, cannot require this */
-               capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
+       /* For backward compatibility, cannot require this */
+       capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
 
-               if (copy_from_user(&info, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&info, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (info.argsz < minsz)
-                       return -EINVAL;
+       if (info.argsz < minsz)
+               return -EINVAL;
 
-               if (info.argsz >= capsz) {
-                       minsz = capsz;
-                       info.cap_offset = 0;
-               }
+       if (info.argsz >= capsz) {
+               minsz = capsz;
+               info.cap_offset = 0;
+       }
 
-               info.flags = VFIO_DEVICE_FLAGS_PCI;
+       info.flags = VFIO_DEVICE_FLAGS_PCI;
 
-               if (vdev->reset_works)
-                       info.flags |= VFIO_DEVICE_FLAGS_RESET;
+       if (vdev->reset_works)
+               info.flags |= VFIO_DEVICE_FLAGS_RESET;
 
-               info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
-               info.num_irqs = VFIO_PCI_NUM_IRQS;
+       info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
+       info.num_irqs = VFIO_PCI_NUM_IRQS;
 
-               ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
-               if (ret && ret != -ENODEV) {
-                       pci_warn(vdev->pdev, "Failed to setup zPCI info capabilities\n");
-                       return ret;
-               }
+       ret = vfio_pci_info_zdev_add_caps(vdev, &caps);
+       if (ret && ret != -ENODEV) {
+               pci_warn(vdev->pdev,
+                        "Failed to setup zPCI info capabilities\n");
+               return ret;
+       }
 
-               if (caps.size) {
-                       info.flags |= VFIO_DEVICE_FLAGS_CAPS;
-                       if (info.argsz < sizeof(info) + caps.size) {
-                               info.argsz = sizeof(info) + caps.size;
-                       } else {
-                               vfio_info_cap_shift(&caps, sizeof(info));
-                               if (copy_to_user((void __user *)arg +
-                                                 sizeof(info), caps.buf,
-                                                 caps.size)) {
-                                       kfree(caps.buf);
-                                       return -EFAULT;
-                               }
-                               info.cap_offset = sizeof(info);
+       if (caps.size) {
+               info.flags |= VFIO_DEVICE_FLAGS_CAPS;
+               if (info.argsz < sizeof(info) + caps.size) {
+                       info.argsz = sizeof(info) + caps.size;
+               } else {
+                       vfio_info_cap_shift(&caps, sizeof(info));
+                       if (copy_to_user((void __user *)arg + sizeof(info),
+                                        caps.buf, caps.size)) {
+                               kfree(caps.buf);
+                               return -EFAULT;
                        }
-
-                       kfree(caps.buf);
+                       info.cap_offset = sizeof(info);
                }
 
-               return copy_to_user((void __user *)arg, &info, minsz) ?
-                       -EFAULT : 0;
+               kfree(caps.buf);
+       }
+
+       return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
 }
 
 static int vfio_pci_ioctl_get_region_info(struct vfio_pci_core_device *vdev,
                                          void __user *arg)
 {
        unsigned long minsz = offsetofend(struct vfio_region_info, offset);
-               struct pci_dev *pdev = vdev->pdev;
-               struct vfio_region_info info;
-               struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
-               int i, ret;
+       struct pci_dev *pdev = vdev->pdev;
+       struct vfio_region_info info;
+       struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+       int i, ret;
 
-               if (copy_from_user(&info, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&info, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (info.argsz < minsz)
-                       return -EINVAL;
+       if (info.argsz < minsz)
+               return -EINVAL;
 
-               switch (info.index) {
-               case VFIO_PCI_CONFIG_REGION_INDEX:
-                       info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
-                       info.size = pdev->cfg_size;
-                       info.flags = VFIO_REGION_INFO_FLAG_READ |
-                                    VFIO_REGION_INFO_FLAG_WRITE;
+       switch (info.index) {
+       case VFIO_PCI_CONFIG_REGION_INDEX:
+               info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+               info.size = pdev->cfg_size;
+               info.flags = VFIO_REGION_INFO_FLAG_READ |
+                            VFIO_REGION_INFO_FLAG_WRITE;
+               break;
+       case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+               info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+               info.size = pci_resource_len(pdev, info.index);
+               if (!info.size) {
+                       info.flags = 0;
                        break;
-               case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
-                       info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
-                       info.size = pci_resource_len(pdev, info.index);
-                       if (!info.size) {
-                               info.flags = 0;
-                               break;
-                       }
+               }
 
-                       info.flags = VFIO_REGION_INFO_FLAG_READ |
-                                    VFIO_REGION_INFO_FLAG_WRITE;
-                       if (vdev->bar_mmap_supported[info.index]) {
-                               info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
-                               if (info.index == vdev->msix_bar) {
-                                       ret = msix_mmappable_cap(vdev, &caps);
-                                       if (ret)
-                                               return ret;
-                               }
+               info.flags = VFIO_REGION_INFO_FLAG_READ |
+                            VFIO_REGION_INFO_FLAG_WRITE;
+               if (vdev->bar_mmap_supported[info.index]) {
+                       info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+                       if (info.index == vdev->msix_bar) {
+                               ret = msix_mmappable_cap(vdev, &caps);
+                               if (ret)
+                                       return ret;
                        }
+               }
 
-                       break;
-               case VFIO_PCI_ROM_REGION_INDEX:
-               {
-                       void __iomem *io;
-                       size_t size;
-                       u16 cmd;
+               break;
+       case VFIO_PCI_ROM_REGION_INDEX: {
+               void __iomem *io;
+               size_t size;
+               u16 cmd;
+
+               info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+               info.flags = 0;
+
+               /* Report the BAR size, not the ROM size */
+               info.size = pci_resource_len(pdev, info.index);
+               if (!info.size) {
+                       /* Shadow ROMs appear as PCI option ROMs */
+                       if (pdev->resource[PCI_ROM_RESOURCE].flags &
+                           IORESOURCE_ROM_SHADOW)
+                               info.size = 0x20000;
+                       else
+                               break;
+               }
 
-                       info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
-                       info.flags = 0;
+               /*
+                * Is it really there?  Enable memory decode for implicit access
+                * in pci_map_rom().
+                */
+               cmd = vfio_pci_memory_lock_and_enable(vdev);
+               io = pci_map_rom(pdev, &size);
+               if (io) {
+                       info.flags = VFIO_REGION_INFO_FLAG_READ;
+                       pci_unmap_rom(pdev, io);
+               } else {
+                       info.size = 0;
+               }
+               vfio_pci_memory_unlock_and_restore(vdev, cmd);
 
-                       /* Report the BAR size, not the ROM size */
-                       info.size = pci_resource_len(pdev, info.index);
-                       if (!info.size) {
-                               /* Shadow ROMs appear as PCI option ROMs */
-                               if (pdev->resource[PCI_ROM_RESOURCE].flags &
-                                                       IORESOURCE_ROM_SHADOW)
-                                       info.size = 0x20000;
-                               else
-                                       break;
-                       }
+               break;
+       }
+       case VFIO_PCI_VGA_REGION_INDEX:
+               if (!vdev->has_vga)
+                       return -EINVAL;
 
-                       /*
-                        * Is it really there?  Enable memory decode for
-                        * implicit access in pci_map_rom().
-                        */
-                       cmd = vfio_pci_memory_lock_and_enable(vdev);
-                       io = pci_map_rom(pdev, &size);
-                       if (io) {
-                               info.flags = VFIO_REGION_INFO_FLAG_READ;
-                               pci_unmap_rom(pdev, io);
-                       } else {
-                               info.size = 0;
-                       }
-                       vfio_pci_memory_unlock_and_restore(vdev, cmd);
+               info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+               info.size = 0xc0000;
+               info.flags = VFIO_REGION_INFO_FLAG_READ |
+                            VFIO_REGION_INFO_FLAG_WRITE;
 
-                       break;
-               }
-               case VFIO_PCI_VGA_REGION_INDEX:
-                       if (!vdev->has_vga)
-                               return -EINVAL;
+               break;
+       default: {
+               struct vfio_region_info_cap_type cap_type = {
+                       .header.id = VFIO_REGION_INFO_CAP_TYPE,
+                       .header.version = 1
+               };
 
-                       info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
-                       info.size = 0xc0000;
-                       info.flags = VFIO_REGION_INFO_FLAG_READ |
-                                    VFIO_REGION_INFO_FLAG_WRITE;
-
-                       break;
-               default:
-               {
-                       struct vfio_region_info_cap_type cap_type = {
-                                       .header.id = VFIO_REGION_INFO_CAP_TYPE,
-                                       .header.version = 1 };
+               if (info.index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+                       return -EINVAL;
+               info.index = array_index_nospec(
+                       info.index, VFIO_PCI_NUM_REGIONS + vdev->num_regions);
 
-                       if (info.index >=
-                           VFIO_PCI_NUM_REGIONS + vdev->num_regions)
-                               return -EINVAL;
-                       info.index = array_index_nospec(info.index,
-                                                       VFIO_PCI_NUM_REGIONS +
-                                                       vdev->num_regions);
+               i = info.index - VFIO_PCI_NUM_REGIONS;
 
-                       i = info.index - VFIO_PCI_NUM_REGIONS;
+               info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+               info.size = vdev->region[i].size;
+               info.flags = vdev->region[i].flags;
 
-                       info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
-                       info.size = vdev->region[i].size;
-                       info.flags = vdev->region[i].flags;
+               cap_type.type = vdev->region[i].type;
+               cap_type.subtype = vdev->region[i].subtype;
 
-                       cap_type.type = vdev->region[i].type;
-                       cap_type.subtype = vdev->region[i].subtype;
+               ret = vfio_info_add_capability(&caps, &cap_type.header,
+                                              sizeof(cap_type));
+               if (ret)
+                       return ret;
 
-                       ret = vfio_info_add_capability(&caps, &cap_type.header,
-                                                      sizeof(cap_type));
+               if (vdev->region[i].ops->add_capability) {
+                       ret = vdev->region[i].ops->add_capability(
+                               vdev, &vdev->region[i], &caps);
                        if (ret)
                                return ret;
-
-                       if (vdev->region[i].ops->add_capability) {
-                               ret = vdev->region[i].ops->add_capability(vdev,
-                                               &vdev->region[i], &caps);
-                               if (ret)
-                                       return ret;
-                       }
-               }
                }
+       }
+       }
 
-               if (caps.size) {
-                       info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
-                       if (info.argsz < sizeof(info) + caps.size) {
-                               info.argsz = sizeof(info) + caps.size;
-                               info.cap_offset = 0;
-                       } else {
-                               vfio_info_cap_shift(&caps, sizeof(info));
-                               if (copy_to_user((void __user *)arg +
-                                                 sizeof(info), caps.buf,
-                                                 caps.size)) {
-                                       kfree(caps.buf);
-                                       return -EFAULT;
-                               }
-                               info.cap_offset = sizeof(info);
+       if (caps.size) {
+               info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+               if (info.argsz < sizeof(info) + caps.size) {
+                       info.argsz = sizeof(info) + caps.size;
+                       info.cap_offset = 0;
+               } else {
+                       vfio_info_cap_shift(&caps, sizeof(info));
+                       if (copy_to_user((void __user *)arg + sizeof(info),
+                                        caps.buf, caps.size)) {
+                               kfree(caps.buf);
+                               return -EFAULT;
                        }
-
-                       kfree(caps.buf);
+                       info.cap_offset = sizeof(info);
                }
 
-               return copy_to_user((void __user *)arg, &info, minsz) ?
-                       -EFAULT : 0;
+               kfree(caps.buf);
+       }
+
+       return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
 }
 
 static int vfio_pci_ioctl_get_irq_info(struct vfio_pci_core_device *vdev,
                                       void __user *arg)
 {
        unsigned long minsz = offsetofend(struct vfio_irq_info, count);
-               struct vfio_irq_info info;
+       struct vfio_irq_info info;
 
-               if (copy_from_user(&info, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&info, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
-                       return -EINVAL;
+       if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+               return -EINVAL;
 
-               switch (info.index) {
-               case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
-               case VFIO_PCI_REQ_IRQ_INDEX:
+       switch (info.index) {
+       case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+       case VFIO_PCI_REQ_IRQ_INDEX:
+               break;
+       case VFIO_PCI_ERR_IRQ_INDEX:
+               if (pci_is_pcie(vdev->pdev))
                        break;
-               case VFIO_PCI_ERR_IRQ_INDEX:
-                       if (pci_is_pcie(vdev->pdev))
-                               break;
-                       fallthrough;
-               default:
-                       return -EINVAL;
-               }
+               fallthrough;
+       default:
+               return -EINVAL;
+       }
 
-               info.flags = VFIO_IRQ_INFO_EVENTFD;
+       info.flags = VFIO_IRQ_INFO_EVENTFD;
 
-               info.count = vfio_pci_get_irq_count(vdev, info.index);
+       info.count = vfio_pci_get_irq_count(vdev, info.index);
 
-               if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
-                       info.flags |= (VFIO_IRQ_INFO_MASKABLE |
-                                      VFIO_IRQ_INFO_AUTOMASKED);
-               else
-                       info.flags |= VFIO_IRQ_INFO_NORESIZE;
+       if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+               info.flags |=
+                       (VFIO_IRQ_INFO_MASKABLE | VFIO_IRQ_INFO_AUTOMASKED);
+       else
+               info.flags |= VFIO_IRQ_INFO_NORESIZE;
 
-               return copy_to_user((void __user *)arg, &info, minsz) ?
-                       -EFAULT : 0;
+       return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
 }
 
 static int vfio_pci_ioctl_set_irqs(struct vfio_pci_core_device *vdev,
                                   void __user *arg)
 {
        unsigned long minsz = offsetofend(struct vfio_irq_set, count);
-               struct vfio_irq_set hdr;
-               u8 *data = NULL;
-               int max, ret = 0;
-               size_t data_size = 0;
+       struct vfio_irq_set hdr;
+       u8 *data = NULL;
+       int max, ret = 0;
+       size_t data_size = 0;
 
-               if (copy_from_user(&hdr, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&hdr, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               max = vfio_pci_get_irq_count(vdev, hdr.index);
+       max = vfio_pci_get_irq_count(vdev, hdr.index);
 
-               ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
-                                                VFIO_PCI_NUM_IRQS, &data_size);
-               if (ret)
-                       return ret;
+       ret = vfio_set_irqs_validate_and_prepare(&hdr, max, VFIO_PCI_NUM_IRQS,
+                                                &data_size);
+       if (ret)
+               return ret;
 
-               if (data_size) {
-                       data = memdup_user((void __user *)(arg + minsz),
-                                           data_size);
-                       if (IS_ERR(data))
-                               return PTR_ERR(data);
-               }
+       if (data_size) {
+               data = memdup_user((void __user *)(arg + minsz), data_size);
+               if (IS_ERR(data))
+                       return PTR_ERR(data);
+       }
 
-               mutex_lock(&vdev->igate);
+       mutex_lock(&vdev->igate);
 
-               ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
-                                             hdr.start, hdr.count, data);
+       ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index, hdr.start,
+                                     hdr.count, data);
 
-               mutex_unlock(&vdev->igate);
-               kfree(data);
+       mutex_unlock(&vdev->igate);
+       kfree(data);
 
-               return ret;
+       return ret;
 }
 
 static int vfio_pci_ioctl_reset(struct vfio_pci_core_device *vdev,
                                void __user *arg)
 {
-               int ret;
+       int ret;
 
-               if (!vdev->reset_works)
-                       return -EINVAL;
+       if (!vdev->reset_works)
+               return -EINVAL;
 
-               vfio_pci_zap_and_down_write_memory_lock(vdev);
+       vfio_pci_zap_and_down_write_memory_lock(vdev);
 
-               /*
-                * This function can be invoked while the power state is non-D0.
-                * If pci_try_reset_function() has been called while the power
-                * state is non-D0, then pci_try_reset_function() will
-                * internally set the power state to D0 without vfio driver
-                * involvement. For the devices which have NoSoftRst-, the
-                * reset function can cause the PCI config space reset without
-                * restoring the original state (saved locally in
-                * 'vdev->pm_save').
-                */
-               vfio_pci_set_power_state(vdev, PCI_D0);
+       /*
+        * This function can be invoked while the power state is non-D0. If
+        * pci_try_reset_function() has been called while the power state is
+        * non-D0, then pci_try_reset_function() will internally set the power
+        * state to D0 without vfio driver involvement. For the devices which
+        * have NoSoftRst-, the reset function can cause the PCI config space
+        * reset without restoring the original state (saved locally in
+        * 'vdev->pm_save').
+        */
+       vfio_pci_set_power_state(vdev, PCI_D0);
 
-               ret = pci_try_reset_function(vdev->pdev);
-               up_write(&vdev->memory_lock);
+       ret = pci_try_reset_function(vdev->pdev);
+       up_write(&vdev->memory_lock);
 
-               return ret;
+       return ret;
 }
 
 static int
@@ -1004,196 +995,192 @@ vfio_pci_ioctl_get_pci_hot_reset_info(struct vfio_pci_core_device *vdev,
 {
        unsigned long minsz =
                offsetofend(struct vfio_pci_hot_reset_info, count);
-               struct vfio_pci_hot_reset_info hdr;
-               struct vfio_pci_fill_info fill = { 0 };
-               struct vfio_pci_dependent_device *devices = NULL;
-               bool slot = false;
-               int ret = 0;
+       struct vfio_pci_hot_reset_info hdr;
+       struct vfio_pci_fill_info fill = { 0 };
+       struct vfio_pci_dependent_device *devices = NULL;
+       bool slot = false;
+       int ret = 0;
 
-               if (copy_from_user(&hdr, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&hdr, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (hdr.argsz < minsz)
-                       return -EINVAL;
+       if (hdr.argsz < minsz)
+               return -EINVAL;
 
-               hdr.flags = 0;
+       hdr.flags = 0;
 
-               /* Can we do a slot or bus reset or neither? */
-               if (!pci_probe_reset_slot(vdev->pdev->slot))
-                       slot = true;
-               else if (pci_probe_reset_bus(vdev->pdev->bus))
-                       return -ENODEV;
+       /* Can we do a slot or bus reset or neither? */
+       if (!pci_probe_reset_slot(vdev->pdev->slot))
+               slot = true;
+       else if (pci_probe_reset_bus(vdev->pdev->bus))
+               return -ENODEV;
 
-               /* How many devices are affected? */
-               ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
-                                                   vfio_pci_count_devs,
-                                                   &fill.max, slot);
-               if (ret)
-                       return ret;
+       /* How many devices are affected? */
+       ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+                                           &fill.max, slot);
+       if (ret)
+               return ret;
 
-               WARN_ON(!fill.max); /* Should always be at least one */
+       WARN_ON(!fill.max); /* Should always be at least one */
 
-               /*
-                * If there's enough space, fill it now, otherwise return
-                * -ENOSPC and the number of devices affected.
-                */
-               if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
-                       ret = -ENOSPC;
-                       hdr.count = fill.max;
-                       goto reset_info_exit;
-               }
+       /*
+        * If there's enough space, fill it now, otherwise return -ENOSPC and
+        * the number of devices affected.
+        */
+       if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
+               ret = -ENOSPC;
+               hdr.count = fill.max;
+               goto reset_info_exit;
+       }
 
-               devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
-               if (!devices)
-                       return -ENOMEM;
+       devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
+       if (!devices)
+               return -ENOMEM;
 
-               fill.devices = devices;
+       fill.devices = devices;
 
-               ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
-                                                   vfio_pci_fill_devs,
-                                                   &fill, slot);
+       ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_fill_devs,
+                                           &fill, slot);
 
-               /*
-                * If a device was removed between counting and filling,
-                * we may come up short of fill.max.  If a device was
-                * added, we'll have a return of -EAGAIN above.
-                */
-               if (!ret)
-                       hdr.count = fill.cur;
+       /*
+        * If a device was removed between counting and filling, we may come up
+        * short of fill.max.  If a device was added, we'll have a return of
+        * -EAGAIN above.
+        */
+       if (!ret)
+               hdr.count = fill.cur;
 
 reset_info_exit:
-               if (copy_to_user((void __user *)arg, &hdr, minsz))
-                       ret = -EFAULT;
+       if (copy_to_user((void __user *)arg, &hdr, minsz))
+               ret = -EFAULT;
 
-               if (!ret) {
-                       if (copy_to_user((void __user *)(arg + minsz), devices,
-                                        hdr.count * sizeof(*devices)))
-                               ret = -EFAULT;
-               }
+       if (!ret) {
+               if (copy_to_user((void __user *)(arg + minsz), devices,
+                                hdr.count * sizeof(*devices)))
+                       ret = -EFAULT;
+       }
 
-               kfree(devices);
-               return ret;
+       kfree(devices);
+       return ret;
 }
 
 static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
                                        void __user *arg)
 {
        unsigned long minsz = offsetofend(struct vfio_pci_hot_reset, count);
-               struct vfio_pci_hot_reset hdr;
-               int32_t *group_fds;
-               struct file **files;
-               struct vfio_pci_group_info info;
-               bool slot = false;
-               int file_idx, count = 0, ret = 0;
-
-               if (copy_from_user(&hdr, (void __user *)arg, minsz))
-                       return -EFAULT;
+       struct vfio_pci_hot_reset hdr;
+       int32_t *group_fds;
+       struct file **files;
+       struct vfio_pci_group_info info;
+       bool slot = false;
+       int file_idx, count = 0, ret = 0;
 
-               if (hdr.argsz < minsz || hdr.flags)
-                       return -EINVAL;
+       if (copy_from_user(&hdr, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               /* Can we do a slot or bus reset or neither? */
-               if (!pci_probe_reset_slot(vdev->pdev->slot))
-                       slot = true;
-               else if (pci_probe_reset_bus(vdev->pdev->bus))
-                       return -ENODEV;
+       if (hdr.argsz < minsz || hdr.flags)
+               return -EINVAL;
 
-               /*
-                * We can't let userspace give us an arbitrarily large
-                * buffer to copy, so verify how many we think there
-                * could be.  Note groups can have multiple devices so
-                * one group per device is the max.
-                */
-               ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
-                                                   vfio_pci_count_devs,
-                                                   &count, slot);
-               if (ret)
-                       return ret;
+       /* Can we do a slot or bus reset or neither? */
+       if (!pci_probe_reset_slot(vdev->pdev->slot))
+               slot = true;
+       else if (pci_probe_reset_bus(vdev->pdev->bus))
+               return -ENODEV;
 
-               /* Somewhere between 1 and count is OK */
-               if (!hdr.count || hdr.count > count)
-                       return -EINVAL;
+       /*
+        * We can't let userspace give us an arbitrarily large buffer to copy,
+        * so verify how many we think there could be.  Note groups can have
+        * multiple devices so one group per device is the max.
+        */
+       ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+                                           &count, slot);
+       if (ret)
+               return ret;
 
-               group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
-               files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
-               if (!group_fds || !files) {
-                       kfree(group_fds);
-                       kfree(files);
-                       return -ENOMEM;
-               }
+       /* Somewhere between 1 and count is OK */
+       if (!hdr.count || hdr.count > count)
+               return -EINVAL;
 
-               if (copy_from_user(group_fds, (void __user *)(arg + minsz),
-                                  hdr.count * sizeof(*group_fds))) {
-                       kfree(group_fds);
-                       kfree(files);
-                       return -EFAULT;
-               }
+       group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
+       files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL);
+       if (!group_fds || !files) {
+               kfree(group_fds);
+               kfree(files);
+               return -ENOMEM;
+       }
 
-               /*
-                * For each group_fd, get the group through the vfio external
-                * user interface and store the group and iommu ID.  This
-                * ensures the group is held across the reset.
-                */
-               for (file_idx = 0; file_idx < hdr.count; file_idx++) {
-                       struct file *file = fget(group_fds[file_idx]);
+       if (copy_from_user(group_fds, (void __user *)(arg + minsz),
+                          hdr.count * sizeof(*group_fds))) {
+               kfree(group_fds);
+               kfree(files);
+               return -EFAULT;
+       }
 
-                       if (!file) {
-                               ret = -EBADF;
-                               break;
-                       }
+       /*
+        * For each group_fd, get the group through the vfio external user
+        * interface and store the group and iommu ID.  This ensures the group
+        * is held across the reset.
+        */
+       for (file_idx = 0; file_idx < hdr.count; file_idx++) {
+               struct file *file = fget(group_fds[file_idx]);
 
-                       /* Ensure the FD is a vfio group FD.*/
-                       if (!vfio_file_iommu_group(file)) {
-                               fput(file);
-                               ret = -EINVAL;
-                               break;
-                       }
+               if (!file) {
+                       ret = -EBADF;
+                       break;
+               }
 
-                       files[file_idx] = file;
+               /* Ensure the FD is a vfio group FD.*/
+               if (!vfio_file_iommu_group(file)) {
+                       fput(file);
+                       ret = -EINVAL;
+                       break;
                }
 
-               kfree(group_fds);
+               files[file_idx] = file;
+       }
 
-               /* release reference to groups on error */
-               if (ret)
-                       goto hot_reset_release;
+       kfree(group_fds);
+
+       /* release reference to groups on error */
+       if (ret)
+               goto hot_reset_release;
 
-               info.count = hdr.count;
-               info.files = files;
+       info.count = hdr.count;
+       info.files = files;
 
-               ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
+       ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info);
 
 hot_reset_release:
-               for (file_idx--; file_idx >= 0; file_idx--)
-                       fput(files[file_idx]);
+       for (file_idx--; file_idx >= 0; file_idx--)
+               fput(files[file_idx]);
 
-               kfree(files);
-               return ret;
+       kfree(files);
+       return ret;
 }
 
 static int vfio_pci_ioctl_ioeventfd(struct vfio_pci_core_device *vdev,
                                    void __user *arg)
 {
        unsigned long minsz = offsetofend(struct vfio_device_ioeventfd, fd);
-               struct vfio_device_ioeventfd ioeventfd;
-               int count;
+       struct vfio_device_ioeventfd ioeventfd;
+       int count;
 
-               if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
-                       return -EFAULT;
+       if (copy_from_user(&ioeventfd, (void __user *)arg, minsz))
+               return -EFAULT;
 
-               if (ioeventfd.argsz < minsz)
-                       return -EINVAL;
+       if (ioeventfd.argsz < minsz)
+               return -EINVAL;
 
-               if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
-                       return -EINVAL;
+       if (ioeventfd.flags & ~VFIO_DEVICE_IOEVENTFD_SIZE_MASK)
+               return -EINVAL;
 
-               count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
+       count = ioeventfd.flags & VFIO_DEVICE_IOEVENTFD_SIZE_MASK;
 
-               if (hweight8(count) != 1 || ioeventfd.fd < -1)
-                       return -EINVAL;
+       if (hweight8(count) != 1 || ioeventfd.fd < -1)
+               return -EINVAL;
 
-               return vfio_pci_ioeventfd(vdev, ioeventfd.offset,
-                                         ioeventfd.data, count, ioeventfd.fd);
+       return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count,
+                                 ioeventfd.fd);
 }
 
 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,