#include <linux/vfio.h>
#include <linux/mdev.h>
-static const struct intel_gvt_ops intel_gvt_ops = {
- .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
- .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
- .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
- .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
- .vgpu_create = intel_gvt_create_vgpu,
- .vgpu_destroy = intel_gvt_destroy_vgpu,
- .vgpu_release = intel_gvt_release_vgpu,
- .vgpu_reset = intel_gvt_reset_vgpu,
- .vgpu_activate = intel_gvt_activate_vgpu,
- .vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .vgpu_query_plane = intel_vgpu_query_plane,
- .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
- .write_protect_handler = intel_vgpu_page_track_handler,
- .emulate_hotplug = intel_vgpu_emulate_hotplug,
-};
-
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
intel_gvt_debugfs_init(gvt);
- ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt,
- &intel_gvt_ops);
+ ret = intel_gvt_hypervisor_host_init(i915->drm.dev, gvt);
if (ret)
goto out_destroy_idle_vgpu;
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
-struct intel_gvt_ops {
- int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
- unsigned int);
- int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
- unsigned int);
- int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
- unsigned int);
- int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
- unsigned int);
- struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
- struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *vgpu);
- void (*vgpu_release)(struct intel_vgpu *vgpu);
- void (*vgpu_reset)(struct intel_vgpu *);
- void (*vgpu_activate)(struct intel_vgpu *);
- void (*vgpu_deactivate)(struct intel_vgpu *);
- int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
- int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
- int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
- unsigned int);
- void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
-};
-
-
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
MODULE_IMPORT_NS(DMA_BUF);
MODULE_IMPORT_NS(I915_GVT);
-static const struct intel_gvt_ops *intel_gvt_ops;
-
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
- intel_gvt_ops->emulate_hotplug(vgpu, true);
+ intel_vgpu_emulate_hotplug(vgpu, true);
} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
- intel_gvt_ops->emulate_hotplug(vgpu, false);
+ intel_vgpu_emulate_hotplug(vgpu, false);
else {
gvt_vgpu_err("invalid EDID link state %d\n",
regs->link_state);
goto out;
}
- vgpu = intel_gvt_ops->vgpu_create(gvt, type);
+ vgpu = intel_gvt_create_vgpu(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
gvt_err("failed to create intel vgpu: %d\n", ret);
if (handle_valid(vgpu->handle))
return -EBUSY;
- intel_gvt_ops->vgpu_destroy(vgpu);
+ intel_gvt_destroy_vgpu(vgpu);
return 0;
}
if (ret)
goto undo_group;
- intel_gvt_ops->vgpu_activate(vgpu);
+ intel_gvt_activate_vgpu(vgpu);
atomic_set(&vdev->released, 0);
return ret;
if (atomic_cmpxchg(&vdev->released, 0, 1))
return;
- intel_gvt_ops->vgpu_release(vgpu);
+ intel_gvt_release_vgpu(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
&vdev->iommu_notifier);
int ret;
if (is_write)
- ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ ret = intel_vgpu_emulate_mmio_write(vgpu,
bar_start + off, buf, count);
else
- ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ ret = intel_vgpu_emulate_mmio_read(vgpu,
bar_start + off, buf, count);
return ret;
}
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
if (is_write)
- ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
buf, count);
else
- ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- intel_gvt_ops->vgpu_reset(vgpu);
+ intel_gvt_reset_vgpu(vgpu);
return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
if (dmabuf.argsz < minsz)
return -EINVAL;
- ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+ ret = intel_vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
- __s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
-
- dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
- return dmabuf_fd;
-
+ return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
}
return -ENOTTY;
.ioctl = intel_vgpu_ioctl,
};
-static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+static int kvmgt_host_init(struct device *dev, void *gvt)
{
int ret;
if (ret)
return ret;
- intel_gvt_ops = ops;
intel_vgpu_mdev_ops.supported_type_groups = gvt_vgpu_type_groups;
ret = mdev_register_device(dev, &intel_vgpu_mdev_ops);
struct kvmgt_guest_info, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
- intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+ intel_vgpu_page_track_handler(info->vgpu, gpa,
(void *)val, len);
}