"Error reading flash.\n");
goto exit;
}
-
}
status = ql_validate_flash(qdev,
maplen), DMA_TO_DEVICE);
}
}
-
}
/* Map the buffers for this transmit. This will return
dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
skb_frag_size(frag));
-
}
/* Save the number of segments we've mapped. */
tx_ring_desc->map_cnt = map_idx;
struct tx_ring *tx_ring;
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
-
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
switch (net_rsp->opcode) {
-
case OPCODE_OB_MAC_TSO_IOCB:
case OPCODE_OB_MAC_IOCB:
ql_process_mac_tx_intr(qdev, net_rsp);
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
-
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"cq_id = %d, prod = %d, cnsmr = %d\n",
rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
{
-
if (skb_is_gso(skb)) {
int err;
__be16 l3_proto = vlan_get_protocol(skb);
static int ql_alloc_rx_resources(struct ql_adapter *qdev,
struct rx_ring *rx_ring)
{
-
/*
* Allocate the completion queue for this rx_ring.
*/
static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
{
-
/* Don't kill the reset worker thread if we
* are in the process of recovery.
*/