}
iov[iovcnt].iov_len = frag_len;
iov[iovcnt].iov_base = g_malloc(frag_len);
- if (!iov[iovcnt].iov_base) {
- err = -ROCKER_ENOMEM;
- goto err_no_mem;
- }
pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base,
iov[iovcnt].iov_len);
err = fp_port_eg(r->fp_port[port], iov, iovcnt);
err_too_many_frags:
-err_no_mem:
err_bad_attr:
for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) {
g_free(iov[i].iov_base);
*/
data = g_malloc(data_size);
- if (!data) {
- err = -ROCKER_ENOMEM;
- goto out;
- }
+
iov_to_buf(iov, iovcnt, 0, data, data_size);
pci_dma_write(dev, frag_addr, data, data_size);
g_free(data);
buf = g_malloc(r->test_dma_size);
- if (!buf) {
- DPRINTF("test dma buffer alloc failed");
- return;
- }
-
switch (val) {
case ROCKER_TEST_DMA_CTRL_CLEAR:
memset(buf, 0, r->test_dma_size);
r->worlds[ROCKER_WORLD_TYPE_OF_DPA] = of_dpa_world_alloc(r);
- for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
- if (!r->worlds[i]) {
- err = -ENOMEM;
- goto err_world_alloc;
- }
- }
-
if (!r->world_name) {
r->world_name = g_strdup(world_name(r->worlds[ROCKER_WORLD_TYPE_OF_DPA]));
}
}
r->rings = g_new(DescRing *, rocker_pci_ring_count(r));
- if (!r->rings) {
- goto err_rings_alloc;
- }
/* Rings are ordered like this:
* - command ring
* .....
*/
- err = -ENOMEM;
for (i = 0; i < rocker_pci_ring_count(r); i++) {
DescRing *ring = desc_ring_alloc(r, i);
- if (!ring) {
- goto err_ring_alloc;
- }
-
if (i == ROCKER_RING_CMD) {
desc_ring_set_consume(ring, cmd_consume, ROCKER_MSIX_VEC_CMD);
} else if (i == ROCKER_RING_EVENT) {
fp_port_alloc(r, r->name, &r->fp_start_macaddr,
i, &r->fp_ports_peers[i]);
- if (!port) {
- goto err_port_alloc;
- }
-
r->fp_port[i] = port;
fp_port_set_world(port, r->world_dflt);
}
return 0;
-err_port_alloc:
- for (--i; i >= 0; i--) {
- FpPort *port = r->fp_port[i];
- fp_port_free(port);
- }
- i = rocker_pci_ring_count(r);
-err_ring_alloc:
- for (--i; i >= 0; i--) {
- desc_ring_free(r->rings[i]);
- }
- g_free(r->rings);
-err_rings_alloc:
err_duplicate:
rocker_msix_uninit(r);
err_msix_init:
object_unparent(OBJECT(&r->msix_bar));
object_unparent(OBJECT(&r->mmio));
err_world_type_by_name:
-err_world_alloc:
for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
if (r->worlds[i]) {
world_free(r->worlds[i]);
info->buf_size = size;
}
- if (!info->buf) {
- return NULL;
- }
-
pci_dma_read(dev, le64_to_cpu(info->desc.buf_addr), info->buf, size);
return info->buf;
ring->head = ring->tail = 0;
ring->info = g_renew(DescInfo, ring->info, size);
- if (!ring->info) {
- return false;
- }
memset(ring->info, 0, size * sizeof(DescInfo));
DescRing *ring;
ring = g_new0(DescRing, 1);
- if (!ring) {
- return NULL;
- }
ring->r = r;
ring->index = index;
int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000;
flow = g_new0(OfDpaFlow, 1);
- if (!flow) {
- return NULL;
- }
flow->cookie = cookie;
flow->mask.tbl_id = 0xffffffff;
{
OfDpaGroup *group = g_new0(OfDpaGroup, 1);
- if (!group) {
- return NULL;
- }
-
group->id = id;
return group;
}
flow = of_dpa_flow_alloc(cookie);
- if (!flow) {
- return -ROCKER_ENOMEM;
- }
err = of_dpa_cmd_flow_add_mod(of_dpa, flow, flow_tlvs);
if (err) {
rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]);
tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1);
- if (!tlvs) {
- return -ROCKER_ENOMEM;
- }
g_free(group->l2_flood.group_ids);
group->l2_flood.group_ids =
g_new0(uint32_t, group->l2_flood.group_count);
- if (!group->l2_flood.group_ids) {
- err = -ROCKER_ENOMEM;
- goto err_out;
- }
rocker_tlv_parse_nested(tlvs, group->l2_flood.group_count,
group_tlvs[ROCKER_TLV_OF_DPA_GROUP_IDS]);
}
group = of_dpa_group_alloc(group_id);
- if (!group) {
- return -ROCKER_ENOMEM;
- }
err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
if (err) {