&res_iovs,
&num_iovs);
if (res_iovs != NULL && num_iovs != 0) {
- virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs);
+ virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
}
virgl_renderer_resource_unref(unref.resource_id);
}
VIRTIO_GPU_FILL_CMD(att_rb);
trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
- ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, NULL, &res_iovs);
+ ret = virtio_gpu_create_mapping_iov(g, &att_rb, cmd, NULL, &res_iovs);
if (ret != 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
res_iovs, att_rb.nr_entries);
if (ret != 0)
- virtio_gpu_cleanup_mapping_iov(res_iovs, att_rb.nr_entries);
+ virtio_gpu_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries);
}
static void virgl_resource_detach_backing(VirtIOGPU *g,
if (res_iovs == NULL || num_iovs == 0) {
return;
}
- virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs);
+ virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
}
static struct virtio_gpu_simple_resource*
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
-static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
+static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res);
static void
virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
}
pixman_image_unref(res->image);
- virtio_gpu_cleanup_mapping(res);
+ virtio_gpu_cleanup_mapping(g, res);
QTAILQ_REMOVE(&g->reslist, res, next);
g->hostmem -= res->hostmem;
g_free(res);
scanout->height = ss.r.height;
}
-int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
+int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
+ struct virtio_gpu_resource_attach_backing *ab,
struct virtio_gpu_ctrl_command *cmd,
uint64_t **addr, struct iovec **iov)
{
qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
" resource %d element %d\n",
__func__, ab->resource_id, i);
- virtio_gpu_cleanup_mapping_iov(*iov, i);
+ virtio_gpu_cleanup_mapping_iov(g, *iov, i);
g_free(ents);
*iov = NULL;
if (addr) {
return 0;
}
-void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
+void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
+ struct iovec *iov, uint32_t count)
{
int i;
g_free(iov);
}
-static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
+static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
+ struct virtio_gpu_simple_resource *res)
{
- virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
+ virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
res->iov = NULL;
res->iov_cnt = 0;
g_free(res->addrs);
return;
}
- ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
+ ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
if (ret != 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
- virtio_gpu_cleanup_mapping(res);
+ virtio_gpu_cleanup_mapping(g, res);
}
static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
hwaddr len = res->iov[i].iov_len;
res->iov[i].iov_base =
cpu_physical_memory_map(res->addrs[i], &len, 1);
+
if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
/* Clean up the half-a-mapping we just created... */
if (res->iov[i].iov_base) {
}
/* ...and the mappings for previous loop iterations */
res->iov_cnt = i;
- virtio_gpu_cleanup_mapping(res);
+ virtio_gpu_cleanup_mapping(g, res);
pixman_image_unref(res->image);
g_free(res);
return -EINVAL;
enum virtio_gpu_ctrl_type type);
void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd);
-int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
+int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
+ struct virtio_gpu_resource_attach_backing *ab,
struct virtio_gpu_ctrl_command *cmd,
uint64_t **addr, struct iovec **iov);
-void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count);
+void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
+ struct iovec *iov, uint32_t count);
void virtio_gpu_process_cmdq(VirtIOGPU *g);
/* virtio-gpu-3d.c */