addr = le64_to_cpu(dp->buffer_addr);
if (!tx->skip_cp) {
- if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) {
+ if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, core->owner,
+ addr, split_size)) {
tx->skip_cp = true;
}
}
}
tx->skip_cp = false;
- net_tx_pkt_reset(tx->tx_pkt, core->owner);
+ net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner);
tx->sum_needed = 0;
tx->cptse = 0;
qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
- net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS);
+ net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
}
net_rx_pkt_init(&core->rx_pkt);
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
+ net_tx_pkt_reset(core->tx[i].tx_pkt,
+ net_tx_pkt_unmap_frag_pci, core->owner);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner);
+ net_tx_pkt_reset(core->tx[i].tx_pkt,
+ net_tx_pkt_unmap_frag_pci, core->owner);
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
core->tx[i].skip_cp = false;
}
length = cmd_type_len & 0xFFFF;
if (!tx->skip_cp) {
- if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, buffer_addr, length)) {
+ if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev,
+ buffer_addr, length)) {
tx->skip_cp = true;
}
}
tx->first = true;
tx->skip_cp = false;
- net_tx_pkt_reset(tx->tx_pkt, dev);
+ net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev);
}
}
d = core->owner;
}
- net_tx_pkt_reset(txr->tx->tx_pkt, d);
-
while (!igb_ring_empty(core, txi)) {
base = igb_ring_head_descr(core, txi);
core->mac[EICR] |= eic;
igb_set_interrupt_cause(core, E1000_ICR_TXDW);
}
+
+ net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d);
}
static uint32_t
core->vmstate = qemu_add_vm_change_state_handler(igb_vm_state_change, core);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- net_tx_pkt_init(&core->tx[i].tx_pkt, NULL, E1000E_MAX_TX_FRAGS);
+ net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
}
net_rx_pkt_init(&core->rx_pkt);
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < IGB_NUM_QUEUES; i++) {
- net_tx_pkt_reset(core->tx[i].tx_pkt, NULL);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
tx = &core->tx[i];
- net_tx_pkt_reset(tx->tx_pkt, NULL);
memset(tx->ctx, 0, sizeof(tx->ctx));
tx->first = true;
tx->skip_cp = false;
*/
#include "qemu/osdep.h"
-#include "net_tx_pkt.h"
#include "net/eth.h"
#include "net/checksum.h"
#include "net/tap.h"
#include "net/net.h"
#include "hw/pci/pci_device.h"
+#include "net_tx_pkt.h"
enum {
NET_TX_PKT_VHDR_FRAG = 0,
/* TX packet private context */
struct NetTxPkt {
- PCIDevice *pci_dev;
-
struct virtio_net_hdr virt_hdr;
struct iovec *raw;
uint8_t l4proto;
};
-void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags)
+void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags)
{
struct NetTxPkt *p = g_malloc0(sizeof *p);
- p->pci_dev = pci_dev;
-
p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
p->raw = g_new(struct iovec, max_frags);
}
}
-static bool net_tx_pkt_add_raw_fragment_common(struct NetTxPkt *pkt,
- void *base, size_t len)
+bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len)
{
struct iovec *ventry;
assert(pkt);
#endif
}
-void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *pci_dev)
+void net_tx_pkt_reset(struct NetTxPkt *pkt,
+ NetTxPktFreeFrag callback, void *context)
{
int i;
assert(pkt->raw);
for (i = 0; i < pkt->raw_frags; i++) {
assert(pkt->raw[i].iov_base);
- net_tx_pkt_unmap_frag_pci(pkt->pci_dev,
- pkt->raw[i].iov_base,
- pkt->raw[i].iov_len);
+ callback(context, pkt->raw[i].iov_base, pkt->raw[i].iov_len);
}
}
- pkt->pci_dev = pci_dev;
pkt->raw_frags = 0;
pkt->hdr_len = 0;
pci_dma_unmap(context, base, len, DMA_DIRECTION_TO_DEVICE, 0);
}
-bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
- size_t len)
+bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev,
+ dma_addr_t pa, size_t len)
{
dma_addr_t mapped_len = len;
- void *base = pci_dma_map(pkt->pci_dev, pa, &mapped_len,
- DMA_DIRECTION_TO_DEVICE);
+ void *base = pci_dma_map(pci_dev, pa, &mapped_len, DMA_DIRECTION_TO_DEVICE);
if (!base) {
return false;
}
- if (mapped_len != len ||
- !net_tx_pkt_add_raw_fragment_common(pkt, base, len)) {
- net_tx_pkt_unmap_frag_pci(pkt->pci_dev, base, mapped_len);
+ if (mapped_len != len || !net_tx_pkt_add_raw_fragment(pkt, base, len)) {
+ net_tx_pkt_unmap_frag_pci(pci_dev, base, mapped_len);
return false;
}
}
static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
- NetTxPktCallback callback,
+ NetTxPktSend callback,
void *context)
{
uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
}
bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
- NetTxPktCallback callback, void *context)
+ NetTxPktSend callback, void *context)
{
assert(pkt);
struct NetTxPkt;
-typedef void (* NetTxPktCallback)(void *, const struct iovec *, int, const struct iovec *, int);
+typedef void (*NetTxPktFreeFrag)(void *, void *, size_t);
+typedef void (*NetTxPktSend)(void *, const struct iovec *, int, const struct iovec *, int);
/**
* Init function for tx packet functionality
*
* @pkt: packet pointer
- * @pci_dev: PCI device processing this packet
* @max_frags: max tx ip fragments
*/
-void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags);
+void net_tx_pkt_init(struct NetTxPkt **pkt, uint32_t max_frags);
/**
* Clean all tx packet resources.
* populate data fragment into pkt context.
*
* @pkt: packet
- * @pa: physical address of fragment
+ * @base: pointer to fragment
* @len: length of fragment
*
*/
-bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, hwaddr pa,
- size_t len);
+bool net_tx_pkt_add_raw_fragment(struct NetTxPkt *pkt, void *base, size_t len);
/**
* Fix ip header fields and calculate IP header and pseudo header checksums.
* reset tx packet private context (needed to be called between packets)
*
* @pkt: packet
- * @dev: PCI device processing the next packet
- *
+ * @callback: function to free the fragments
+ * @context: pointer to be passed to the callback
*/
-void net_tx_pkt_reset(struct NetTxPkt *pkt, PCIDevice *dev);
+void net_tx_pkt_reset(struct NetTxPkt *pkt,
+ NetTxPktFreeFrag callback, void *context);
/**
* Unmap a fragment mapped from a PCI device.
*/
void net_tx_pkt_unmap_frag_pci(void *context, void *base, size_t len);
+/**
+ * map data fragment from PCI device and populate it into pkt context.
+ *
+ * @pci_dev: PCI device owning fragment
+ * @pa: physical address of fragment
+ * @len: length of fragment
+ */
+bool net_tx_pkt_add_raw_fragment_pci(struct NetTxPkt *pkt, PCIDevice *pci_dev,
+ dma_addr_t pa, size_t len);
+
/**
* Send packet to qemu. handles sw offloads if vhdr is not supported.
*
* @ret: operation result
*/
bool net_tx_pkt_send_custom(struct NetTxPkt *pkt, bool offload,
- NetTxPktCallback callback, void *context);
+ NetTxPktSend callback, void *context);
/**
* parse raw packet data and analyze offload requirements.
data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE;
data_pa = txd.addr;
- if (!net_tx_pkt_add_raw_fragment(s->tx_pkt,
- data_pa,
- data_len)) {
+ if (!net_tx_pkt_add_raw_fragment_pci(s->tx_pkt, PCI_DEVICE(s),
+ data_pa, data_len)) {
s->skip_current_tx_pkt = true;
}
}
vmxnet3_complete_packet(s, qidx, txd_idx);
s->tx_sop = true;
s->skip_current_tx_pkt = false;
- net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
+ net_tx_pkt_reset(s->tx_pkt,
+ net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s));
}
}
}
{
if (s->device_active) {
VMW_CBPRN("Deactivating vmxnet3...");
- net_tx_pkt_reset(s->tx_pkt, PCI_DEVICE(s));
+ net_tx_pkt_reset(s->tx_pkt, net_tx_pkt_unmap_frag_pci, PCI_DEVICE(s));
net_tx_pkt_uninit(s->tx_pkt);
net_rx_pkt_uninit(s->rx_pkt);
s->device_active = false;
/* Preallocate TX packet wrapper */
VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
+ net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags);
net_rx_pkt_init(&s->rx_pkt);
/* Read rings memory locations for RX queues */
{
VMXNET3State *s = opaque;
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
+ net_tx_pkt_init(&s->tx_pkt, s->max_tx_frags);
net_rx_pkt_init(&s->rx_pkt);
if (s->msix_used) {