Commit 3e71f6dd authored by Ron Mercer's avatar Ron Mercer Committed by Jeff Garzik

qla3xxx: bugfix: Fixed jumbo frame handling for 3032 chip.

The scatter/gather lists were not being build correctly.  When
large frames spanned several buffers the chip would panic.
Signed-off-by: default avatarRon Mercer <ron.mercer@qlogic.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 97916330
...@@ -2122,11 +2122,13 @@ static void ql_hw_csum_setup(struct sk_buff *skb, ...@@ -2122,11 +2122,13 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
if (ip) { if (ip) {
if (ip->protocol == IPPROTO_TCP) { if (ip->protocol == IPPROTO_TCP) {
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC; mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
OB_3032MAC_IOCB_REQ_IC;
mac_iocb_ptr->ip_hdr_off = offset; mac_iocb_ptr->ip_hdr_off = offset;
mac_iocb_ptr->ip_hdr_len = ip->ihl; mac_iocb_ptr->ip_hdr_len = ip->ihl;
} else if (ip->protocol == IPPROTO_UDP) { } else if (ip->protocol == IPPROTO_UDP) {
mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC; mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
OB_3032MAC_IOCB_REQ_IC;
mac_iocb_ptr->ip_hdr_off = offset; mac_iocb_ptr->ip_hdr_off = offset;
mac_iocb_ptr->ip_hdr_len = ip->ihl; mac_iocb_ptr->ip_hdr_len = ip->ihl;
} }
...@@ -2134,51 +2136,29 @@ static void ql_hw_csum_setup(struct sk_buff *skb, ...@@ -2134,51 +2136,29 @@ static void ql_hw_csum_setup(struct sk_buff *skb,
} }
/* /*
* The difference between 3022 and 3032 sends: * Map the buffers for this transmit. This will return
* 3022 only supports a simple single segment transmission. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
* 3032 supports checksumming and scatter/gather lists (fragments).
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
* in the IOCB plus a chain of outbound address lists (OAL) that
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
* will used to point to an OAL when more ALP entries are required.
* The IOCB is always the top of the chain followed by one or more
* OALs (when necessary).
*/ */
static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) static int ql_send_map(struct ql3_adapter *qdev,
struct ob_mac_iocb_req *mac_iocb_ptr,
struct ql_tx_buf_cb *tx_cb,
struct sk_buff *skb)
{ {
struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
struct ql_tx_buf_cb *tx_cb;
u32 tot_len = skb->len;
struct oal *oal; struct oal *oal;
struct oal_entry *oal_entry; struct oal_entry *oal_entry;
int len; int len = skb_headlen(skb);
struct ob_mac_iocb_req *mac_iocb_ptr;
u64 map; u64 map;
int seg_cnt, seg = 0; int seg_cnt, seg = 0;
int frag_cnt = (int)skb_shinfo(skb)->nr_frags; int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags)); seg_cnt = tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags));
if(seg_cnt == -1) { if(seg_cnt == -1) {
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__); printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
return NETDEV_TX_OK; return NETDEV_TX_BUSY;
} }
mac_iocb_ptr = tx_cb->queue_entry; /*
mac_iocb_ptr->opcode = qdev->mac_ob_opcode; * Map the skb buffer first.
mac_iocb_ptr->flags |= qdev->mb_bit_mask; */
mac_iocb_ptr->transaction_id = qdev->req_producer_index;
mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
tx_cb->skb = skb;
if (skb->ip_summed == CHECKSUM_PARTIAL)
ql_hw_csum_setup(skb, mac_iocb_ptr);
len = skb_headlen(skb);
map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
...@@ -2235,6 +2215,55 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) ...@@ -2235,6 +2215,55 @@ static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
oal_entry->len = oal_entry->len =
cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY); cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
} }
return NETDEV_TX_OK;
}
/*
* The difference between 3022 and 3032 sends:
* 3022 only supports a simple single segment transmission.
* 3032 supports checksumming and scatter/gather lists (fragments).
* The 3032 supports sglists by using the 3 addr/len pairs (ALP)
* in the IOCB plus a chain of outbound address lists (OAL) that
* each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
* will used to point to an OAL when more ALP entries are required.
* The IOCB is always the top of the chain followed by one or more
* OALs (when necessary).
*/
static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
{
struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
struct ql_tx_buf_cb *tx_cb;
u32 tot_len = skb->len;
struct ob_mac_iocb_req *mac_iocb_ptr;
if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
if((tx_cb->seg_count = ql_get_seg_count((skb_shinfo(skb)->nr_frags))) == -1) {
printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
return NETDEV_TX_OK;
}
mac_iocb_ptr = tx_cb->queue_entry;
mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
mac_iocb_ptr->flags |= qdev->mb_bit_mask;
mac_iocb_ptr->transaction_id = qdev->req_producer_index;
mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
tx_cb->skb = skb;
if (skb->ip_summed == CHECKSUM_PARTIAL)
ql_hw_csum_setup(skb, mac_iocb_ptr);
if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
return NETDEV_TX_BUSY;
}
wmb(); wmb();
qdev->req_producer_index++; qdev->req_producer_index++;
if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
......
...@@ -1094,7 +1094,6 @@ struct oal_entry { ...@@ -1094,7 +1094,6 @@ struct oal_entry {
u32 len; u32 len;
#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */ #define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */ #define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
u32 reserved;
}; };
struct oal { struct oal {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment