qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
- net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner,
- E1000E_MAX_TX_FRAGS, core->has_vnet);
+ net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
}
net_rx_pkt_init(&core->rx_pkt, core->has_vnet);
PCIDevice *pci_dev;
struct virtio_net_hdr virt_hdr;
- bool has_virt_hdr;
struct iovec *raw;
uint32_t raw_frags;
};
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags, bool has_virt_hdr)
+ uint32_t max_frags)
{
struct NetTxPkt *p = g_malloc0(sizeof *p);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
- p->has_virt_hdr = has_virt_hdr;
p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr;
- p->vec[NET_TX_PKT_VHDR_FRAG].iov_len =
- p->has_virt_hdr ? sizeof p->virt_hdr : 0;
+ p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr;
p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr;
p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr;
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
{
+ bool using_vnet_hdr = qemu_get_using_vnet_hdr(nc->peer);
+
assert(pkt);
- if (!pkt->has_virt_hdr &&
+ if (!using_vnet_hdr &&
pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_tx_pkt_do_sw_csum(pkt);
}
}
}
- if (pkt->has_virt_hdr ||
+ if (using_vnet_hdr ||
pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
+ int index = using_vnet_hdr ?
+ NET_TX_PKT_VHDR_FRAG : NET_TX_PKT_L2HDR_FRAG;
net_tx_pkt_fix_ip6_payload_len(pkt);
- net_tx_pkt_sendv(pkt, nc, pkt->vec,
- pkt->payload_frags + NET_TX_PKT_PL_START_FRAG);
+ net_tx_pkt_sendv(pkt, nc, pkt->vec + index,
+ pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - index);
return true;
}
* @pkt: packet pointer
* @pci_dev: PCI device processing this packet
* @max_frags: max tx ip fragments
- * @has_virt_hdr: device uses virtio header.
*/
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags, bool has_virt_hdr);
+ uint32_t max_frags);
/**
* Clean all tx packet resources.
/* Preallocate TX packet wrapper */
VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
- s->max_tx_frags, s->peer_has_vhdr);
+ net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
/* Read rings memory locations for RX queues */
{
VMXNET3State *s = opaque;
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
- s->max_tx_frags, s->peer_has_vhdr);
+ net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
if (s->msix_used) {