MemoryRegion *area = section->mr;
bool writeable = !area->readonly && !area->rom_device;
hv_memory_flags_t flags;
- uint64_t page_size = qemu_real_host_page_size;
+ uint64_t page_size = qemu_real_host_page_size();
if (!memory_region_is_ram(area)) {
if (writeable) {
#ifdef PAGE_SIZE
#undef PAGE_SIZE
#endif
-#define PAGE_SIZE qemu_real_host_page_size
+#define PAGE_SIZE qemu_real_host_page_size()
#ifndef KVM_GUESTDBG_BLOCKIRQ
#define KVM_GUESTDBG_BLOCKIRQ 0
with sub-page size and unaligned start address. Pad the start
address to next and truncate size to previous page boundary. */
aligned = ROUND_UP(section->offset_within_address_space,
- qemu_real_host_page_size);
+ qemu_real_host_page_size());
delta = aligned - section->offset_within_address_space;
*start = aligned;
if (delta > size) {
return 0;
}
- return (size - delta) & qemu_real_host_page_mask;
+ return (size - delta) & qemu_real_host_page_mask();
}
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
{
ram_addr_t start = slot->ram_start_offset;
- ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
+ ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
* And mem->memory_size is aligned to it (otherwise this mem can't
* be registered to KVM).
*/
- hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size,
+ hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
/*HOST_LONG_BITS*/ 64) / 8;
mem->dirty_bmap = g_malloc0(bitmap_size);
mem->dirty_bmap_size = bitmap_size;
mem = &kml->slots[slot_id];
if (!mem->memory_size || offset >=
- (mem->memory_size / qemu_real_host_page_size)) {
+ (mem->memory_size / qemu_real_host_page_size())) {
return;
}
/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
#define KVM_CLEAR_LOG_SHIFT 6
-#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
+#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
KVMState *s = kvm_state;
uint64_t end, bmap_start, start_delta, bmap_npages;
struct kvm_clear_dirty_log d;
- unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
+ unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
int ret;
/*
void kvm_set_max_memslot_size(hwaddr max_slot_size)
{
g_assert(
- ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
+ ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
);
kvm_max_slot_size = max_slot_size;
}
* even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
* page size for the system though.
*/
- assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
+ assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
s->sigmask_len = 8;
#else
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
{
- return qemu_real_host_page_size;
+ return qemu_real_host_page_size();
}
#endif
{
if (!bs || !bs->drv) {
/* page size or 4k (hdd sector size) should be on the safe side */
- return MAX(4096, qemu_real_host_page_size);
+ return MAX(4096, qemu_real_host_page_size());
}
IO_CODE();
{
if (!bs || !bs->drv) {
/* page size or 4k (hdd sector size) should be on the safe side */
- return MAX(4096, qemu_real_host_page_size);
+ return MAX(4096, qemu_real_host_page_size());
}
IO_CODE();
{
BDRVRawState *s = bs->opaque;
char *buf;
- size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
+ size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
size_t alignments[] = {1, 512, 1024, 2048, 4096};
/* For SCSI generic devices the alignment is not really used.
raw_probe_alignment(bs, s->fd, errp);
bs->bl.min_mem_alignment = s->buf_align;
- bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size);
+ bs->bl.opt_mem_alignment = MAX(s->buf_align, qemu_real_host_page_size());
/*
* Maximum transfers are best effort, so it is okay to ignore any
size_t write_size = (max_size < MAX_BLOCKSIZE)
? BDRV_SECTOR_SIZE
: MAX_BLOCKSIZE;
- size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size);
+ size_t max_align = MAX(MAX_BLOCKSIZE, qemu_real_host_page_size());
void *buf;
ssize_t n;
int ret;
if (!have_limits) {
bs->bl.min_mem_alignment = 512;
- bs->bl.opt_mem_alignment = qemu_real_host_page_size;
+ bs->bl.opt_mem_alignment = qemu_real_host_page_size();
/* Safe default since most protocols use readv()/writev()/etc */
bs->bl.max_iov = IOV_MAX;
size_t bytes;
int r;
- bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size);
+ bytes = ROUND_UP(nentries * entry_bytes, qemu_real_host_page_size());
q->head = q->tail = 0;
- q->queue = qemu_try_memalign(qemu_real_host_page_size, bytes);
+ q->queue = qemu_try_memalign(qemu_real_host_page_size(), bytes);
if (!q->queue) {
error_setg(errp, "Cannot allocate queue");
return false;
trace_nvme_create_queue_pair(idx, q, size, aio_context,
event_notifier_get_fd(s->irq_notifier));
bytes = QEMU_ALIGN_UP(s->page_size * NVME_NUM_REQS,
- qemu_real_host_page_size);
- q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size, bytes);
+ qemu_real_host_page_size());
+ q->prp_list_pages = qemu_try_memalign(qemu_real_host_page_size(), bytes);
if (!q->prp_list_pages) {
error_setg(errp, "Cannot allocate PRP page list");
goto fail;
.opcode = NVME_ADM_CMD_IDENTIFY,
.cdw10 = cpu_to_le32(0x1),
};
- size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size);
+ size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size());
- id = qemu_try_memalign(qemu_real_host_page_size, id_size);
+ id = qemu_try_memalign(qemu_real_host_page_size(), id_size);
if (!id) {
error_setg(errp, "Cannot allocate buffer for identify response");
goto out;
bool retry = true;
uint64_t iova;
size_t len = QEMU_ALIGN_UP(qiov->iov[i].iov_len,
- qemu_real_host_page_size);
+ qemu_real_host_page_size());
try_map:
r = qemu_vfio_dma_map(s->vfio,
qiov->iov[i].iov_base,
for (i = 0; i < qiov->niov; ++i) {
if (!QEMU_PTR_IS_ALIGNED(qiov->iov[i].iov_base,
- qemu_real_host_page_size) ||
- !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size)) {
+ qemu_real_host_page_size()) ||
+ !QEMU_IS_ALIGNED(qiov->iov[i].iov_len, qemu_real_host_page_size())) {
trace_nvme_qiov_unaligned(qiov, i, qiov->iov[i].iov_base,
qiov->iov[i].iov_len, s->page_size);
return false;
int r;
QEMU_AUTO_VFREE uint8_t *buf = NULL;
QEMUIOVector local_qiov;
- size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size);
+ size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size());
assert(QEMU_IS_ALIGNED(offset, s->page_size));
assert(QEMU_IS_ALIGNED(bytes, s->page_size));
assert(bytes <= s->max_transfer);
}
s->stats.unaligned_accesses++;
trace_nvme_prw_buffered(s, offset, bytes, qiov->niov, is_write);
- buf = qemu_try_memalign(qemu_real_host_page_size, len);
+ buf = qemu_try_memalign(qemu_real_host_page_size(), len);
if (!buf) {
return -ENOMEM;
}
}
- s->bat_dirty_block = 4 * qemu_real_host_page_size;
+ s->bat_dirty_block = 4 * qemu_real_host_page_size();
s->bat_dirty_bmap =
bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));
/* Using MADV_DONTNEED to discard memory is a Linux-specific feature */
#ifdef CONFIG_LINUX
void *t = qcow2_cache_get_table_addr(c, i);
- int align = qemu_real_host_page_size;
+ int align = qemu_real_host_page_size();
size_t mem_size = (size_t) c->table_size * num_tables;
size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
* patch target_mmap(), but it is more complicated as the file
* size must be known.
*/
- if (qemu_real_host_page_size < qemu_host_page_size) {
+ if (qemu_real_host_page_size() < qemu_host_page_size) {
abi_ulong end_addr, end_addr1;
end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
end_addr = HOST_PAGE_ALIGN(elf_bss);
* up to the targets page boundary.
*/
- if ((qemu_real_host_page_size < qemu_host_page_size) && fd != -1) {
+ if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
struct stat sb;
if (fstat(fd, &sb) == -1) {
static size_t
udmabuf_get_size(struct vugbm_buffer *buf)
{
- return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size);
+ return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size());
}
static bool
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
if (qemu_host_page_size == 0) {
- qemu_host_page_size = qemu_real_host_page_size;
+ qemu_host_page_size = qemu_real_host_page_size();
}
if (qemu_host_page_size < TARGET_PAGE_SIZE) {
qemu_host_page_size = TARGET_PAGE_SIZE;
#define QXL_ROM_SZ 8192
QEMU_BUILD_BUG_ON(QXL_REQUIRED_SZ > QXL_ROM_SZ);
- return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size);
+ return QEMU_ALIGN_UP(QXL_REQUIRED_SZ, qemu_real_host_page_size());
}
static void init_qxl_rom(PCIQXLDevice *d)
#include "trace.h"
#include "qom/object.h"
-#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size
+#define FLIC_SAVE_INITIAL_SIZE qemu_real_host_page_size()
#define FLIC_FAILED (-1UL)
#define FLIC_SAVEVM_VERSION 1
FWCfgState *s = opaque;
bool mr_aligned;
- mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size) &&
- QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size) &&
- QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size);
+ mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) &&
+ QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) &&
+ QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size());
return s->acpi_mr_restore && !mr_aligned;
}
}
/* The NewWorld NVRAM is not located in the MacIO device */
- if (kvm_enabled() && qemu_real_host_page_size > 4096) {
+ if (kvm_enabled() && qemu_real_host_page_size() > 4096) {
/* We can't combine read-write and read-only in a single page, so
move the NVRAM out of ROM again for KVM */
nvram_addr = 0xFFE00000;
* our memory slot is of page size granularity.
*/
if (kvm_enabled()) {
- msi_window_size = qemu_real_host_page_size;
+ msi_window_size = qemu_real_host_page_size();
}
memory_region_init_io(&sphb->msiwindow, OBJECT(sphb), &spapr_msi_ops, spapr,
rdma_info_report("Initializing device %s %x.%x", pdev->name,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- if (TARGET_PAGE_SIZE != qemu_real_host_page_size) {
+ if (TARGET_PAGE_SIZE != qemu_real_host_page_size()) {
error_setg(errp, "Target page size must be the same as host page size");
return;
}
uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);
assert(max_transfer);
- max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size)
+ max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size())
/ s->blocksize;
stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */
void tpm_ppi_init(TPMPPI *tpmppi, MemoryRegion *m,
hwaddr addr, Object *obj)
{
- tpmppi->buf = qemu_memalign(qemu_real_host_page_size,
+ tpmppi->buf = qemu_memalign(qemu_real_host_page_size(),
HOST_PAGE_ALIGN(TPM_PPI_ADDR_SIZE));
memory_region_init_ram_device_ptr(&tpmppi->ram, obj, "tpm-ppi",
TPM_PPI_ADDR_SIZE, tpmppi->buf);
{
struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap;
- uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size;
+ uint64_t pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
int ret;
unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
* to qemu_real_host_page_size.
*/
- bitmap->pgsize = qemu_real_host_page_size;
+ bitmap->pgsize = qemu_real_host_page_size();
bitmap->size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE;
}
if (unlikely((section->offset_within_address_space &
- ~qemu_real_host_page_mask) !=
- (section->offset_within_region & ~qemu_real_host_page_mask))) {
+ ~qemu_real_host_page_mask()) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask()))) {
error_report("%s received unaligned region", __func__);
return;
}
iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
if (int128_ge(int128_make64(iova), llend)) {
if (memory_region_is_ram_device(section->mr)) {
memory_region_name(section->mr),
section->offset_within_address_space,
int128_getlo(section->size),
- qemu_real_host_page_size);
+ qemu_real_host_page_size());
}
return;
}
}
if (unlikely((section->offset_within_address_space &
- ~qemu_real_host_page_mask) !=
- (section->offset_within_region & ~qemu_real_host_page_mask))) {
+ ~qemu_real_host_page_mask()) !=
+ (section->offset_within_region & ~qemu_real_host_page_mask()))) {
error_report("%s received unaligned region", __func__);
return;
}
iova = REAL_HOST_PAGE_ALIGN(section->offset_within_address_space);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask));
+ llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask()));
if (int128_ge(int128_make64(iova), llend)) {
return;
* qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
* to qemu_real_host_page_size.
*/
- range->bitmap.pgsize = qemu_real_host_page_size;
+ range->bitmap.pgsize = qemu_real_host_page_size();
- pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size;
+ pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size();
range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) /
BITS_PER_BYTE;
range->bitmap.data = g_try_malloc0(range->bitmap.size);
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty.
*/
- if (cap_mig->pgsize_bitmap & qemu_real_host_page_size) {
+ if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
container->dirty_pages_supported = true;
container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
container->dirty_pgsizes = cap_mig->pgsize_bitmap;
/* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
if (bar_addr != PCI_BAR_UNMAPPED &&
- !(bar_addr & ~qemu_real_host_page_mask)) {
- size = qemu_real_host_page_size;
+ !(bar_addr & ~qemu_real_host_page_mask())) {
+ size = qemu_real_host_page_size();
}
memory_region_transaction_begin();
for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
if (old_addr[bar] != pdev->io_regions[bar].addr &&
vdev->bars[bar].region.size > 0 &&
- vdev->bars[bar].region.size < qemu_real_host_page_size) {
+ vdev->bars[bar].region.size < qemu_real_host_page_size()) {
vfio_sub_page_bar_update_mapping(pdev, bar);
}
}
}
/* MSI-X table start and end aligned to host page size */
- start = vdev->msix->table_offset & qemu_real_host_page_mask;
+ start = vdev->msix->table_offset & qemu_real_host_page_mask();
end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
(vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
*/
if (old_addr[bar] != pdev->io_regions[bar].addr &&
vdev->bars[bar].region.size > 0 &&
- vdev->bars[bar].region.size < qemu_real_host_page_size) {
+ vdev->bars[bar].region.size < qemu_real_host_page_size()) {
vfio_sub_page_bar_update_mapping(pdev, bar);
}
}
const hwaddr gpa = section->offset_within_address_space;
hwaddr end;
int ret;
- hwaddr page_mask = qemu_real_host_page_mask;
+ hwaddr page_mask = qemu_real_host_page_mask();
struct vfio_iommu_spapr_register_memory reg = {
.argsz = sizeof(reg),
.flags = 0,
const hwaddr gpa = section->offset_within_address_space;
hwaddr end;
int ret;
- hwaddr page_mask = qemu_real_host_page_mask;
+ hwaddr page_mask = qemu_real_host_page_mask();
struct vfio_iommu_spapr_register_memory reg = {
.argsz = sizeof(reg),
.flags = 0,
* Below we look at qemu_real_host_page_size as TCEs are allocated from
* system pages.
*/
- bits_per_level = ctz64(qemu_real_host_page_size) + 8;
+ bits_per_level = ctz64(qemu_real_host_page_size()) + 8;
create.levels = bits_total / bits_per_level;
if (bits_total % bits_per_level) {
++create.levels;
}
- max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size);
+ max_levels = (64 - create.page_shift) / ctz64(qemu_real_host_page_size());
for ( ; create.levels <= max_levels; ++create.levels) {
ret = ioctl(container->fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
if (!ret) {
#include "qemu/iova-tree.h"
#include "vhost-iova-tree.h"
-#define iova_min_addr qemu_real_host_page_size
+#define iova_min_addr qemu_real_host_page_size()
/**
* VhostIOVATree, able to:
int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map)
{
/* Some vhost devices do not like addr 0. Skip first page */
- hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size;
+ hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size();
if (map->translated_addr + map->size < map->translated_addr ||
map->perm == IOMMU_NONE) {
size_t avail_size = offsetof(vring_avail_t, ring) +
sizeof(uint16_t) * svq->vring.num;
- return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size);
+ return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size());
}
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq)
{
size_t used_size = offsetof(vring_used_t, ring) +
sizeof(vring_used_elem_t) * svq->vring.num;
- return ROUND_UP(used_size, qemu_real_host_page_size);
+ return ROUND_UP(used_size, qemu_real_host_page_size());
}
/**
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
driver_size = vhost_svq_driver_area_size(svq);
device_size = vhost_svq_device_area_size(svq);
- svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size);
+ svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
desc_size = sizeof(vring_desc_t) * svq->vring.num;
svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
memset(svq->vring.desc, 0, driver_size);
- svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size);
+ svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
memset(svq->vring.used, 0, device_size);
svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
{
assert(n && n->unmap_addr);
- munmap(n->unmap_addr, qemu_real_host_page_size);
+ munmap(n->unmap_addr, qemu_real_host_page_size());
n->unmap_addr = NULL;
}
int fd)
{
int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
- size_t page_size = qemu_real_host_page_size;
+ size_t page_size = qemu_real_host_page_size();
struct vhost_user *u = dev->opaque;
VhostUserState *user = u->user;
VirtIODevice *vdev = dev->vdev;
static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
int queue_index)
{
- size_t page_size = qemu_real_host_page_size;
+ size_t page_size = qemu_real_host_page_size();
struct vhost_vdpa *v = dev->opaque;
VirtIODevice *vdev = dev->vdev;
VhostVDPAHostNotifier *n;
static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
{
- size_t page_size = qemu_real_host_page_size;
+ size_t page_size = qemu_real_host_page_size();
struct vhost_vdpa *v = dev->opaque;
VirtIODevice *vdev = dev->vdev;
VhostVDPAHostNotifier *n;
return false;
}
- size = ROUND_UP(result->size, qemu_real_host_page_size);
+ size = ROUND_UP(result->size, qemu_real_host_page_size());
r = vhost_vdpa_dma_unmap(v, result->iova, size);
return r == 0;
}
#if defined(__x86_64__) || defined(__arm__) || defined(__powerpc64__)
default_thp_size = 2 * MiB;
#elif defined(__aarch64__)
- if (qemu_real_host_page_size == 4 * KiB) {
+ if (qemu_real_host_page_size() == 4 * KiB) {
default_thp_size = 2 * MiB;
- } else if (qemu_real_host_page_size == 16 * KiB) {
+ } else if (qemu_real_host_page_size() == 16 * KiB) {
default_thp_size = 32 * MiB;
- } else if (qemu_real_host_page_size == 64 * KiB) {
+ } else if (qemu_real_host_page_size() == 64 * KiB) {
default_thp_size = 512 * MiB;
}
#endif
const uint64_t page_size = qemu_ram_pagesize(rb);
/* We can have hugetlbfs with a page size smaller than the THP size. */
- if (page_size == qemu_real_host_page_size) {
+ if (page_size == qemu_real_host_page_size()) {
return MAX(page_size, virtio_mem_thp_size());
}
return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE);
* fresh page, consuming actual memory.
*/
return !qemu_ram_is_shared(rb) && rb->fd < 0 &&
- qemu_ram_pagesize(rb) == qemu_real_host_page_size;
+ qemu_ram_pagesize(rb) == qemu_real_host_page_size();
}
#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
extern intptr_t qemu_host_page_mask;
#define HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_host_page_size)
-#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size)
+#define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size())
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
void qemu_init_cpu_list(void);
hwaddr addr;
ram_addr_t ram_addr;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
+ unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
# define QEMU_VMALLOC_ALIGN (256 * 4096)
#elif defined(__linux__) && defined(__sparc__)
-# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size, SHMLBA)
+# define QEMU_VMALLOC_ALIGN MAX(qemu_real_host_page_size(), SHMLBA)
#else
-# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size
+# define QEMU_VMALLOC_ALIGN qemu_real_host_page_size()
#endif
#ifdef CONFIG_POSIX
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
* when intptr_t is 32-bit and we are aligning a long long.
*/
-extern uintptr_t qemu_real_host_page_size;
-extern intptr_t qemu_real_host_page_mask;
+static inline uintptr_t qemu_real_host_page_size(void)
+{
+ return getpagesize();
+}
+
+static inline intptr_t qemu_real_host_page_mask(void)
+{
+ return -(intptr_t)qemu_real_host_page_size();
+}
/*
* After using getopt or getopt_long, if you need to parse another set
size = STACK_LOWER_LIMIT;
}
guard = TARGET_PAGE_SIZE;
- if (guard < qemu_real_host_page_size) {
- guard = qemu_real_host_page_size;
+ if (guard < qemu_real_host_page_size()) {
+ guard = qemu_real_host_page_size();
}
error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
may need to truncate file maps at EOF and add extra anonymous pages
up to the targets page boundary. */
- if ((qemu_real_host_page_size < qemu_host_page_size) &&
+ if ((qemu_real_host_page_size() < qemu_host_page_size) &&
!(flags & MAP_ANONYMOUS)) {
struct stat sb;
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
ram_addr_t start, size_t len)
{
- long our_host_ps = qemu_real_host_page_size;
+ long our_host_ps = qemu_real_host_page_size();
trace_migrate_handle_rp_req_pages(rbname, start, len);
return false;
}
- if (qemu_real_host_page_size != ram_pagesize_summary()) {
+ if (qemu_real_host_page_size() != ram_pagesize_summary()) {
bool have_hp = false;
/* We've got a huge page */
#ifdef UFFD_FEATURE_MISSING_HUGETLBFS
*/
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{
- long pagesize = qemu_real_host_page_size;
+ long pagesize = qemu_real_host_page_size();
int ufd = -1;
bool ret = false; /* Error unless we change it */
void *testarea = NULL;
uint64_t pinfo;
uint64_t ret = -1;
uintptr_t addr = (uintptr_t) ptr;
- uintptr_t pagesize = qemu_real_host_page_size;
+ uintptr_t pagesize = qemu_real_host_page_size();
off_t offset = addr / pagesize * sizeof(pinfo);
int fd;
ERROR("use memset() instead of bzero()\n" . $herecurr);
}
if ($line =~ /\bgetpagesize\(\)/) {
- ERROR("use qemu_real_host_page_size instead of getpagesize()\n" . $herecurr);
+ ERROR("use qemu_real_host_page_size() instead of getpagesize()\n" . $herecurr);
}
if ($line =~ /\bsysconf\(_SC_PAGESIZE\)/) {
- ERROR("use qemu_real_host_page_size instead of sysconf(_SC_PAGESIZE)\n" . $herecurr);
+ ERROR("use qemu_real_host_page_size() instead of sysconf(_SC_PAGESIZE)\n" . $herecurr);
}
my $non_exit_glib_asserts = qr{g_assert_cmpstr|
g_assert_cmpint|
#else
long qemu_minrampagesize(void)
{
- return qemu_real_host_page_size;
+ return qemu_real_host_page_size();
}
long qemu_maxrampagesize(void)
{
- return qemu_real_host_page_size;
+ return qemu_real_host_page_size();
}
#endif
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
- new_block->page_size = qemu_real_host_page_size;
+ new_block->page_size = qemu_real_host_page_size();
new_block->host = host;
new_block->flags = ram_flags;
ram_block_add(new_block, &local_err);
/* Adjust start_pa and size so that they are page-aligned. (Cf
* kvm_set_phys_mem() in kvm-all.c).
*/
- delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
- delta &= ~qemu_real_host_page_mask;
+ delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask());
+ delta &= ~qemu_real_host_page_mask();
if (delta > size) {
return;
}
start_pa += delta;
size -= delta;
- size &= qemu_real_host_page_mask;
- if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ size &= qemu_real_host_page_mask();
+ if (!size || (start_pa & ~qemu_real_host_page_mask())) {
return;
}
* call into the kernel. Instead, we split the mapping into smaller ones,
* and call hax_update_mapping() on each.
*/
- max_mapping_size = UINT32_MAX & qemu_real_host_page_mask;
+ max_mapping_size = UINT32_MAX & qemu_real_host_page_mask();
while (size > max_mapping_size) {
hax_update_mapping(start_pa, max_mapping_size, host_va, flags);
start_pa += max_mapping_size;
}
/* Adjust start_pa and size so that they are page-aligned. */
- delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
- delta &= ~qemu_real_host_page_mask;
+ delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask());
+ delta &= ~qemu_real_host_page_mask();
if (delta > size) {
return;
}
start_pa += delta;
size -= delta;
- size &= qemu_real_host_page_mask;
- if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ size &= qemu_real_host_page_mask();
+ if (!size || (start_pa & ~qemu_real_host_page_mask())) {
return;
}
return;
}
- delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
- delta &= ~qemu_real_host_page_mask;
+ delta = qemu_real_host_page_size() - (start_pa & ~qemu_real_host_page_mask());
+ delta &= ~qemu_real_host_page_mask();
if (delta > size) {
return;
}
start_pa += delta;
size -= delta;
- size &= qemu_real_host_page_mask;
- if (!size || (start_pa & ~qemu_real_host_page_mask)) {
+ size &= qemu_real_host_page_mask();
+ if (!size || (start_pa & ~qemu_real_host_page_mask())) {
return;
}
* will be a normal mapping, not a special hugepage one used
* for RAM.
*/
- if (qemu_real_host_page_size < 0x10000) {
+ if (qemu_real_host_page_size() < 0x10000) {
error_setg(errp,
"KVM can't supply 64kiB CI pages, which guest expects");
}
/* page-align the beginning and end of the buffer */
buf = static_code_gen_buffer;
end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
- buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
- end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
+ buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size());
+ end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size());
size = end - buf;
/* Honor a command-line option limiting the size of the buffer. */
if (size > tb_size) {
- size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
+ size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size());
}
region.start_aligned = buf;
*/
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
{
- const size_t page_size = qemu_real_host_page_size;
+ const size_t page_size = qemu_real_host_page_size();
size_t region_size;
int have_prot, need_prot;
if (started && vubr->notifier.fd >= 0) {
vu_set_queue_host_notifier(dev, vq, vubr->notifier.fd,
- qemu_real_host_page_size,
- qidx * qemu_real_host_page_size);
+ qemu_real_host_page_size(),
+ qidx * qemu_real_host_page_size());
}
if (qidx % 2 == 1) {
{
VuDev *dev = (VuDev *)arg;
VubrDev *vubr = container_of(dev, VubrDev, vudev);
- int pagesize = qemu_real_host_page_size;
+ int pagesize = qemu_real_host_page_size();
int qidx;
while (true) {
void *addr;
int fd;
- length = qemu_real_host_page_size * VHOST_USER_BRIDGE_MAX_QUEUES;
+ length = qemu_real_host_page_size() * VHOST_USER_BRIDGE_MAX_QUEUES;
fd = mkstemp(template);
if (fd < 0) {
int qemu_msync(void *addr, size_t length, int fd)
{
#ifdef CONFIG_POSIX
- size_t align_mask = ~(qemu_real_host_page_size - 1);
+ size_t align_mask = ~(qemu_real_host_page_size() - 1);
/**
* There are no strict reqs as per the length of mapping
* alignment changes. Additionally - round the size to the multiple
* of PAGE_SIZE
*/
- length += ((uintptr_t)addr & (qemu_real_host_page_size - 1));
+ length += ((uintptr_t)addr & (qemu_real_host_page_size() - 1));
length = (length + ~align_mask) & align_mask;
addr = (void *)((uintptr_t)addr & align_mask);
util_ss.add(files('sys_membarrier.c'))
endif
util_ss.add(files('log.c'))
-util_ss.add(files('pagesize.c'))
util_ss.add(files('qdist.c'))
util_ss.add(files('qht.c'))
util_ss.add(files('qsp.c'))
#endif
#endif
- return qemu_real_host_page_size;
+ return qemu_real_host_page_size();
}
size_t qemu_mempath_getpagesize(const char *mem_path)
#endif
#endif
- return qemu_real_host_page_size;
+ return qemu_real_host_page_size();
}
#define OVERCOMMIT_MEMORY_PATH "/proc/sys/vm/overcommit_memory"
* MAP_NORESERVE.
* b) MAP_NORESERVE is not affected by /proc/sys/vm/overcommit_memory.
*/
- if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size) {
+ if (qemu_fd_getpagesize(fd) != qemu_real_host_page_size()) {
return true;
}
* We do this unless we are using the system page size, in which case
* anonymous memory is OK.
*/
- if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) {
+ if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size()) {
fd = -1;
flags |= MAP_ANONYMOUS;
} else {
/* Mappings in the same segment must share the same page size */
return qemu_fd_getpagesize(fd);
#else
- return qemu_real_host_page_size;
+ return qemu_real_host_page_size();
#endif
}
static int qemu_mprotect__osdep(void *addr, size_t size, int prot)
{
- g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask));
- g_assert(!(size & ~qemu_real_host_page_mask));
+ g_assert(!((uintptr_t)addr & ~qemu_real_host_page_mask()));
+ g_assert(!(size & ~qemu_real_host_page_mask()));
#ifdef _WIN32
DWORD old_protect;
#ifdef CONFIG_DEBUG_STACK_USAGE
void *ptr2;
#endif
- size_t pagesz = qemu_real_host_page_size;
+ size_t pagesz = qemu_real_host_page_size();
#ifdef _SC_THREAD_STACK_MIN
/* avoid stacks smaller than _SC_THREAD_STACK_MIN */
long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN);
unsigned int usage;
void *ptr;
- for (ptr = stack + qemu_real_host_page_size; ptr < stack + sz;
+ for (ptr = stack + qemu_real_host_page_size(); ptr < stack + sz;
ptr += sizeof(uint32_t)) {
if (*(uint32_t *)ptr != 0xdeadbeaf) {
break;
#ifdef _SC_PHYS_PAGES
long pages = sysconf(_SC_PHYS_PAGES);
if (pages > 0) {
- if (pages > SIZE_MAX / qemu_real_host_page_size) {
+ if (pages > SIZE_MAX / qemu_real_host_page_size()) {
return SIZE_MAX;
} else {
- return pages * qemu_real_host_page_size;
+ return pages * qemu_real_host_page_size();
}
}
#endif
Error **errp)
{
int i;
- size_t pagesize = qemu_real_host_page_size;
+ size_t pagesize = qemu_real_host_page_size();
memory = (memory + pagesize - 1) & -pagesize;
for (i = 0; i < memory / pagesize; i++) {
+++ /dev/null
-/*
- * pagesize.c - query the host about its page size
- *
- * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
- * License: GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
-
-#include "qemu/osdep.h"
-
-uintptr_t qemu_real_host_page_size;
-intptr_t qemu_real_host_page_mask;
-
-static void __attribute__((constructor)) init_real_host_page_size(void)
-{
- qemu_real_host_page_size = getpagesize();
- qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
-}
Error **errp)
{
void *p;
- assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size));
+ assert(QEMU_IS_ALIGNED(offset, qemu_real_host_page_size()));
assert_bar_index_valid(s, index);
p = mmap(NULL, MIN(size, s->bar_region_info[index].size - offset),
prot, MAP_SHARED,
IOVAMapping m = {.host = host, .size = size, .iova = iova};
IOVAMapping *insert;
- assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size));
- assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size));
- assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size));
+ assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size()));
+ assert(QEMU_IS_ALIGNED(s->low_water_mark, qemu_real_host_page_size()));
+ assert(QEMU_IS_ALIGNED(s->high_water_mark, qemu_real_host_page_size()));
trace_qemu_vfio_new_mapping(s, host, size, index, iova);
assert(index >= 0);
index = mapping - s->mappings;
assert(mapping->size > 0);
- assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size));
+ assert(QEMU_IS_ALIGNED(mapping->size, qemu_real_host_page_size()));
assert(index >= 0 && index < s->nr_mappings);
if (ioctl(s->container, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
error_setg_errno(errp, errno, "VFIO_UNMAP_DMA failed");
IOVAMapping *mapping;
uint64_t iova0;
- assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size));
- assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size));
+ assert(QEMU_PTR_IS_ALIGNED(host, qemu_real_host_page_size()));
+ assert(QEMU_IS_ALIGNED(size, qemu_real_host_page_size()));
trace_qemu_vfio_dma_map(s, host, size, temporary, iova);
QEMU_LOCK_GUARD(&s->lock);
mapping = qemu_vfio_find_mapping(s, host, &index);