for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
- vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
- if (!vdpasim->buffer)
- goto err_iommu;
-
for (i = 0; i < dev_attr->nvqs; i++)
vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
&vdpasim->iommu_lock);
vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
}
- kvfree(vdpasim->buffer);
+ vdpasim->dev_attr.free(vdpasim);
+
for (i = 0; i < vdpasim->dev_attr.nas; i++)
vhost_iotlb_reset(&vdpasim->iommu[i]);
kfree(vdpasim->iommu);
#define VDPASIM_BLK_AS_NUM 1
#define VDPASIM_BLK_GROUP_NUM 1
+struct vdpasim_blk {
+ struct vdpasim vdpasim;
+ void *buffer;
+};
+
+static struct vdpasim_blk *sim_to_blk(struct vdpasim *vdpasim)
+{
+ return container_of(vdpasim, struct vdpasim_blk, vdpasim);
+}
+
static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
static bool vdpasim_blk_check_range(struct vdpasim *vdpasim, u64 start_sector,
static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
struct vdpasim_virtqueue *vq)
{
+ struct vdpasim_blk *blk = sim_to_blk(vdpasim);
size_t pushed = 0, to_pull, to_push;
struct virtio_blk_outhdr hdr;
bool handled = false;
}
bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
- vdpasim->buffer + offset,
- to_push);
+ blk->buffer + offset, to_push);
if (bytes < 0) {
dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
}
bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov,
- vdpasim->buffer + offset,
- to_pull);
+ blk->buffer + offset, to_pull);
if (bytes < 0) {
dev_dbg(&vdpasim->vdpa.dev,
"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
}
if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
- memset(vdpasim->buffer + offset, 0,
+ memset(blk->buffer + offset, 0,
num_sectors << SECTOR_SHIFT);
}
}
+static void vdpasim_blk_free(struct vdpasim *vdpasim)
+{
+ struct vdpasim_blk *blk = sim_to_blk(vdpasim);
+
+ kvfree(blk->buffer);
+}
+
static void vdpasim_blk_mgmtdev_release(struct device *dev)
{
}
const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
+ struct vdpasim_blk *blk;
struct vdpasim *simdev;
int ret;
dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
dev_attr.ngroups = VDPASIM_BLK_GROUP_NUM;
dev_attr.nas = VDPASIM_BLK_AS_NUM;
- dev_attr.alloc_size = sizeof(struct vdpasim);
+ dev_attr.alloc_size = sizeof(struct vdpasim_blk);
dev_attr.config_size = sizeof(struct virtio_blk_config);
dev_attr.get_config = vdpasim_blk_get_config;
dev_attr.work_fn = vdpasim_blk_work;
- dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
+ dev_attr.free = vdpasim_blk_free;
simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
return PTR_ERR(simdev);
+ blk = sim_to_blk(simdev);
+
+ blk->buffer = kvmalloc(VDPASIM_BLK_CAPACITY << SECTOR_SHIFT,
+ GFP_KERNEL);
+ if (!blk->buffer) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM);
if (ret)
goto put_dev;
struct vdpasim_dataq_stats tx_stats;
struct vdpasim_dataq_stats rx_stats;
struct vdpasim_cq_stats cq_stats;
+ void *buffer;
};
static struct vdpasim_net *sim_to_net(struct vdpasim *vdpasim)
size_t hdr_len = modern ? sizeof(struct virtio_net_hdr_v1) :
sizeof(struct virtio_net_hdr);
struct virtio_net_config *vio_config = vdpasim->config;
+ struct vdpasim_net *net = sim_to_net(vdpasim);
if (len < ETH_ALEN + hdr_len)
return false;
- if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) ||
- is_multicast_ether_addr(vdpasim->buffer + hdr_len))
+ if (is_broadcast_ether_addr(net->buffer + hdr_len) ||
+ is_multicast_ether_addr(net->buffer + hdr_len))
return true;
- if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
+ if (!strncmp(net->buffer + hdr_len, vio_config->mac, ETH_ALEN))
return true;
return false;
++tx_pkts;
read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
- vdpasim->buffer,
- PAGE_SIZE);
+ net->buffer, PAGE_SIZE);
tx_bytes += read;
}
write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
- vdpasim->buffer, read);
+ net->buffer, read);
if (write <= 0) {
++rx_errors;
break;
vio_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
}
+static void vdpasim_net_free(struct vdpasim *vdpasim)
+{
+ struct vdpasim_net *net = sim_to_net(vdpasim);
+
+ kvfree(net->buffer);
+}
+
static void vdpasim_net_mgmtdev_release(struct device *dev)
{
}
dev_attr.get_config = vdpasim_net_get_config;
dev_attr.work_fn = vdpasim_net_work;
dev_attr.get_stats = vdpasim_net_get_stats;
- dev_attr.buffer_size = PAGE_SIZE;
+ dev_attr.free = vdpasim_net_free;
simdev = vdpasim_create(&dev_attr, config);
if (IS_ERR(simdev))
u64_stats_init(&net->rx_stats.syncp);
u64_stats_init(&net->cq_stats.syncp);
+ net->buffer = kvmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!net->buffer) {
+ ret = -ENOMEM;
+ goto reg_err;
+ }
+
/*
* Initialization must be completed before this call, since it can
* connect the device to the vDPA bus, so requests can arrive after