Add support for multiple IOMMU indexes to the IOMMU notifier APIs.
When initializing a notifier with iommu_notifier_init(), the caller
must pass the IOMMU index that it is interested in. When a change
happens, the IOMMU implementation must pass
memory_region_notify_iommu() the IOMMU index that has changed and
that notifiers must be called for.
IOMMUs which support only a single index don't need to change.
Callers which only really support working with IOMMUs with a single
index can use the result of passing MEMTXATTRS_UNSPECIFIED to
memory_region_iommu_attrs_to_index().
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id:
20180604152941.20374-3-peter.maydell@linaro.org
static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry,
void *private)
{
- memory_region_notify_iommu((IOMMUMemoryRegion *)private, *entry);
+ memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry);
return 0;
}
.addr_mask = size - 1,
.perm = IOMMU_NONE,
};
- memory_region_notify_iommu(&vtd_as->iommu, entry);
+ memory_region_notify_iommu(&vtd_as->iommu, 0, entry);
}
}
}
entry.iova = addr;
entry.perm = IOMMU_NONE;
entry.translated_addr = 0;
- memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
+ memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry);
done:
return true;
entry.translated_addr = tce & page_mask;
entry.addr_mask = ~page_mask;
entry.perm = spapr_tce_iommu_access_flags(tce);
- memory_region_notify_iommu(&tcet->iommu, entry);
+ memory_region_notify_iommu(&tcet->iommu, 0, entry);
return H_SUCCESS;
}
}
notify.perm = IOMMU_NONE;
- memory_region_notify_iommu(&iommu->iommu_mr, notify);
+ memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
notify.perm = entry->perm;
}
g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
}
- memory_region_notify_iommu(&iommu->iommu_mr, notify);
+ memory_region_notify_iommu(&iommu->iommu_mr, 0, notify);
}
int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
if (memory_region_is_iommu(section->mr)) {
VFIOGuestIOMMU *giommu;
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
+ int iommu_idx;
trace_vfio_listener_region_add_iommu(iova, end);
/*
llend = int128_add(int128_make64(section->offset_within_region),
section->size);
llend = int128_sub(llend, int128_one());
+ iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
+ MEMTXATTRS_UNSPECIFIED);
iommu_notifier_init(&giommu->n, vfio_iommu_map_notify,
IOMMU_NOTIFIER_ALL,
section->offset_within_region,
- int128_get64(llend));
+ int128_get64(llend),
+ iommu_idx);
QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
memory_region_register_iommu_notifier(section->mr, &giommu->n);
iommu_listener);
struct vhost_iommu *iommu;
Int128 end;
+ int iommu_idx;
+ IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
if (!memory_region_is_iommu(section->mr)) {
return;
end = int128_add(int128_make64(section->offset_within_region),
section->size);
end = int128_sub(end, int128_one());
+ iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
+ MEMTXATTRS_UNSPECIFIED);
iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
IOMMU_NOTIFIER_UNMAP,
section->offset_within_region,
- int128_get64(end));
+ int128_get64(end),
+ iommu_idx);
iommu->mr = section->mr;
iommu->iommu_offset = section->offset_within_address_space -
section->offset_within_region;
/* Notify for address space range start <= addr <= end */
hwaddr start;
hwaddr end;
+ int iommu_idx;
QLIST_ENTRY(IOMMUNotifier) node;
};
typedef struct IOMMUNotifier IOMMUNotifier;
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
IOMMUNotifierFlag flags,
- hwaddr start, hwaddr end)
+ hwaddr start, hwaddr end,
+ int iommu_idx)
{
n->notify = fn;
n->notifier_flags = flags;
n->start = start;
n->end = end;
+ n->iommu_idx = iommu_idx;
}
/*
* should be notified with an UNMAP followed by a MAP.
*
* @iommu_mr: the memory region that was changed
+ * @iommu_idx: the IOMMU index for the translation table which has changed
* @entry: the new entry in the IOMMU translation table. The entry
* replaces all old entries for the same virtual I/O address range.
* Deleted entries have .@perm == 0.
*/
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
IOMMUTLBEntry entry);
/**
iommu_mr = IOMMU_MEMORY_REGION(mr);
assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
assert(n->start <= n->end);
+ assert(n->iommu_idx >= 0 &&
+ n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
+
QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
memory_region_update_iommu_notify_flags(iommu_mr);
}
}
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
IOMMUTLBEntry entry)
{
IOMMUNotifier *iommu_notifier;
assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
- memory_region_notify_one(iommu_notifier, &entry);
+ if (iommu_notifier->iommu_idx == iommu_idx) {
+ memory_region_notify_one(iommu_notifier, &entry);
+ }
}
}