static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
{
if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
- !v->iotlb_batch_begin_sent) {
+ !v->shared->iotlb_batch_begin_sent) {
vhost_vdpa_listener_begin_batch(v);
}
- v->iotlb_batch_begin_sent = true;
+ v->shared->iotlb_batch_begin_sent = true;
}
static void vhost_vdpa_listener_commit(MemoryListener *listener)
return;
}
- if (!v->iotlb_batch_begin_sent) {
+ if (!v->shared->iotlb_batch_begin_sent) {
return;
}
fd, errno, strerror(errno));
}
- v->iotlb_batch_begin_sent = false;
+ v->shared->iotlb_batch_begin_sent = false;
}
static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
/* IOVA mapping used by the Shadow Virtqueue */
VhostIOVATree *iova_tree;
+ bool iotlb_batch_begin_sent;
+
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
bool shadow_data;
} VhostVDPAShared;
typedef struct vhost_vdpa {
int index;
uint32_t msg_type;
- bool iotlb_batch_begin_sent;
uint32_t address_space_id;
MemoryListener listener;
uint64_t acked_features;