bl = io_buffer_get_list(ctx, req->buf_index);
if (likely(bl)) {
- if (bl->is_mapped)
+ if (bl->is_buf_ring)
ret = io_ring_buffer_select(req, len, bl, issue_flags);
else
ret = io_provided_buffer_select(req, len, bl);
if (!nbufs)
return 0;
- if (bl->is_mapped) {
+ if (bl->is_buf_ring) {
i = bl->buf_ring->tail - bl->head;
if (bl->is_mmap) {
/*
}
/* make sure it's seen as empty */
INIT_LIST_HEAD(&bl->buf_list);
- bl->is_mapped = 0;
+ bl->is_buf_ring = 0;
return i;
}
if (bl) {
ret = -EINVAL;
/* can't use provide/remove buffers command on mapped buffers */
- if (!bl->is_mapped)
+ if (!bl->is_buf_ring)
ret = __io_remove_buffers(ctx, bl, p->nbufs);
}
io_ring_submit_unlock(ctx, issue_flags);
}
}
/* can't add buffers via this command for a mapped buffer ring */
- if (bl->is_mapped) {
+ if (bl->is_buf_ring) {
ret = -EINVAL;
goto err;
}
bl->buf_pages = pages;
bl->buf_nr_pages = nr_pages;
bl->buf_ring = br;
- bl->is_mapped = 1;
+ bl->is_buf_ring = 1;
bl->is_mmap = 0;
return 0;
error_unpin:
}
ibf->inuse = 1;
bl->buf_ring = ibf->mem;
- bl->is_mapped = 1;
+ bl->is_buf_ring = 1;
bl->is_mmap = 1;
return 0;
}
bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */
- if (bl->is_mapped || !list_empty(&bl->buf_list))
+ if (bl->is_buf_ring || !list_empty(&bl->buf_list))
return -EEXIST;
} else {
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
bl = io_buffer_get_list(ctx, reg.bgid);
if (!bl)
return -ENOENT;
- if (!bl->is_mapped)
+ if (!bl->is_buf_ring)
return -EINVAL;
__io_remove_buffers(ctx, bl, -1U);
bl = io_buffer_get_list(ctx, buf_status.buf_group);
if (!bl)
return -ENOENT;
- if (!bl->is_mapped)
+ if (!bl->is_buf_ring)
return -EINVAL;
buf_status.head = bl->head;