NULL,
};
+static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev)
+{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
+
+ init_completion(&private->release_comp);
+ return 0;
+}
+
static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
if (atomic_dec_if_positive(&private->avail) < 0)
return -EPERM;
- memset(&private->vdev, 0, sizeof(private->vdev));
- vfio_init_group_dev(&private->vdev, &mdev->dev,
- &vfio_ccw_dev_ops);
+ ret = vfio_init_device(&private->vdev, &mdev->dev, &vfio_ccw_dev_ops);
+ if (ret)
+ return ret;
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
private->sch->schid.cssid,
ret = vfio_register_emulated_iommu_dev(&private->vdev);
if (ret)
- goto err_atomic;
+ goto err_put_vdev;
dev_set_drvdata(&mdev->dev, private);
return 0;
-err_atomic:
- vfio_uninit_group_dev(&private->vdev);
+err_put_vdev:
+ vfio_put_device(&private->vdev);
atomic_inc(&private->avail);
return ret;
}
+static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev)
+{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
+
+ /*
+ * We cannot free vfio_ccw_private here because it includes
+ * parent info which must be free'ed by css driver.
+ *
+ * Use a workaround by memset'ing the core device part and
+ * then notifying the remove path that all active references
+ * to this device have been released.
+ */
+ memset(vdev, 0, sizeof(*vdev));
+ complete(&private->release_comp);
+}
+
static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
vfio_unregister_group_dev(&private->vdev);
- vfio_uninit_group_dev(&private->vdev);
+ vfio_put_device(&private->vdev);
+ /*
+ * Wait for all active references on mdev are released so it
+ * is safe to defer kfree() to a later point.
+ *
+ * TODO: the clean fix is to split parent/mdev info from ccw
+ * private structure so each can be managed in its own life
+ * cycle.
+ */
+ wait_for_completion(&private->release_comp);
+
atomic_inc(&private->avail);
}
}
static const struct vfio_device_ops vfio_ccw_dev_ops = {
+ .init = vfio_ccw_mdev_init_dev,
+ .release = vfio_ccw_mdev_release_dev,
.open_device = vfio_ccw_mdev_open_device,
.close_device = vfio_ccw_mdev_close_device,
.read = vfio_ccw_mdev_read,
/*
* VFIO driver API
*/
-void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
- const struct vfio_device_ops *ops)
-{
- init_completion(&device->comp);
- device->dev = dev;
- device->ops = ops;
-}
-EXPORT_SYMBOL_GPL(vfio_init_group_dev);
-
-void vfio_uninit_group_dev(struct vfio_device *device)
-{
- vfio_release_device_set(device);
-}
-EXPORT_SYMBOL_GPL(vfio_uninit_group_dev);
-
/* Release helper called by vfio_put_device() */
void vfio_device_release(struct kref *kref)
{
struct vfio_device *device =
container_of(kref, struct vfio_device, kref);
- vfio_uninit_group_dev(device);
+ vfio_release_device_set(device);
/*
* kvfree() cannot be done here due to a life cycle mess in
{
int ret;
- vfio_init_group_dev(device, dev, ops);
+ init_completion(&device->comp);
+ device->dev = dev;
+ device->ops = ops;
if (ops->init) {
ret = ops->init(device);
return 0;
out_uninit:
- vfio_uninit_group_dev(device);
+ vfio_release_device_set(device);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_init_device);