diff options
author | Sathya Perla <sathyap@serverengines.com> | 2010-03-22 20:41:12 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-23 13:22:39 -0700 |
commit | 7101e111217581a36e2eeae7c4a3815d60673cbc (patch) | |
tree | 360d8de508a994b90b6d27297c9bc32ab3869239 /drivers/net/benet | |
parent | 7316ae88c43d47f6503f4c29b4973204e33c3411 (diff) | |
download | linux-7101e111217581a36e2eeae7c4a3815d60673cbc.tar.gz linux-7101e111217581a36e2eeae7c4a3815d60673cbc.tar.xz |
be2net: handle dma mapping errors in Tx path
Signed-off-by: Sathya Perla <sathyap@serverengines.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet')
-rw-r--r-- | drivers/net/benet/be_main.c | 41 |
1 files changed, 38 insertions, 3 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 43e8032f9236..0800c6363908 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -386,26 +386,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); } +static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, + bool unmap_single) +{ + dma_addr_t dma; + + be_dws_le_to_cpu(wrb, sizeof(*wrb)); + + dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; + if (dma != 0) { + if (unmap_single) + pci_unmap_single(pdev, dma, wrb->frag_len, + PCI_DMA_TODEVICE); + else + pci_unmap_page(pdev, dma, wrb->frag_len, + PCI_DMA_TODEVICE); + } +} static int make_tx_wrbs(struct be_adapter *adapter, struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) { - u64 busaddr; - u32 i, copied = 0; + dma_addr_t busaddr; + int i, copied = 0; struct pci_dev *pdev = adapter->pdev; struct sk_buff *first_skb = skb; struct be_queue_info *txq = &adapter->tx_obj.q; struct be_eth_wrb *wrb; struct be_eth_hdr_wrb *hdr; + bool map_single = false; + u16 map_head; hdr = queue_head_node(txq); - atomic_add(wrb_cnt, &txq->used); queue_head_inc(txq); + map_head = txq->head; if (skb->len > skb->data_len) { int len = skb->len - skb->data_len; busaddr = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, busaddr)) + goto dma_err; + map_single = true; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, len); be_dws_cpu_to_le(wrb, sizeof(*wrb)); @@ -419,6 +441,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, busaddr = pci_map_page(pdev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, busaddr)) + goto dma_err; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, frag->size); be_dws_cpu_to_le(wrb, sizeof(*wrb)); @@ -438,6 +462,16 @@ static int make_tx_wrbs(struct be_adapter *adapter, be_dws_cpu_to_le(hdr, sizeof(*hdr)); return copied; +dma_err: + txq->head = map_head; + while (copied) { + wrb = queue_head_node(txq); + unmap_tx_frag(pdev, wrb, map_single); + map_single = false; + copied -= wrb->frag_len; + queue_head_inc(txq); + } + return 0; } static netdev_tx_t be_xmit(struct sk_buff *skb, @@ -462,6 +496,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, * *BEFORE* ringing the tx doorbell, so that we serialze the * tx compls of the current transmit which'll wake up the queue */ + atomic_add(wrb_cnt, &txq->used); if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { netif_stop_queue(netdev); |