return false;
}
memset(q->queue, 0, bytes);
- r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova);
+ r = qemu_vfio_dma_map(s->vfio, q->queue, bytes, false, &q->iova, errp);
if (r) {
- error_setg(errp, "Cannot map queue");
- return false;
+ error_prepend(errp, "Cannot map queue: ");
}
- return true;
+ return r == 0;
}
static void nvme_free_queue_pair(NVMeQueuePair *q)
qemu_co_queue_init(&q->free_req_queue);
q->completion_bh = aio_bh_new(aio_context, nvme_process_completion_bh, q);
r = qemu_vfio_dma_map(s->vfio, q->prp_list_pages, bytes,
- false, &prp_list_iova);
+ false, &prp_list_iova, errp);
if (r) {
- error_setg_errno(errp, -r, "Cannot map buffer for DMA");
+ error_prepend(errp, "Cannot map buffer for DMA: ");
goto fail;
}
q->free_req_head = -1;
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
}
- r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova);
+ r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova, errp);
if (r) {
- error_setg(errp, "Cannot map buffer for DMA");
+ error_prepend(errp, "Cannot map buffer for DMA: ");
goto out;
}
try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
- len, true, &iova);
+ len, true, &iova, NULL);
if (r == -ENOSPC) {
/*
* In addition to the -ENOMEM error, the VFIO_IOMMU_MAP_DMA
static void nvme_register_buf(BlockDriverState *bs, void *host, size_t size)
{
int ret;
+ Error *local_err = NULL;
BDRVNVMeState *s = bs->opaque;
- ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL);
+ ret = qemu_vfio_dma_map(s->vfio, host, size, false, NULL, &local_err);
if (ret) {
/* FIXME: we may run out of IOVA addresses after repeated
* bdrv_register_buf/bdrv_unregister_buf, because nvme_vfio_dma_unmap
* doesn't reclaim addresses for fixed mappings. */
- error_report("nvme_register_buf failed: %s", strerror(-ret));
+ error_reportf_err(local_err, "nvme_register_buf failed: ");
}
}
QEMUVFIOState *qemu_vfio_open_pci(const char *device, Error **errp);
void qemu_vfio_close(QEMUVFIOState *s);
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova_list);
+ bool temporary, uint64_t *iova_list, Error **errp);
int qemu_vfio_dma_reset_temporary(QEMUVFIOState *s);
void qemu_vfio_dma_unmap(QEMUVFIOState *s, void *host);
void *qemu_vfio_pci_map_bar(QEMUVFIOState *s, int index,
size_t size, size_t max_size)
{
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
+ Error *local_err = NULL;
int ret;
trace_qemu_vfio_ram_block_added(s, host, max_size);
- ret = qemu_vfio_dma_map(s, host, max_size, false, NULL);
+ ret = qemu_vfio_dma_map(s, host, max_size, false, NULL, &local_err);
if (ret) {
- error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, max_size,
- strerror(-ret));
+ error_reportf_err(local_err,
+ "qemu_vfio_dma_map(%p, %zu) failed: ",
+ host, max_size);
}
}
* mapping status within this area is not allowed).
*/
int qemu_vfio_dma_map(QEMUVFIOState *s, void *host, size_t size,
- bool temporary, uint64_t *iova)
+ bool temporary, uint64_t *iova, Error **errp)
{
int ret = 0;
int index;