Commit 7286384b authored by Julian Wiedmann's avatar Julian Wiedmann Committed by David S. Miller

s390/qeth: split L2 xmit paths

l2_hard_start_xmit() actually doesn't contain much shared code,
and having device-specific paths makes isolated changes a lot easier.
So split it into three routines for IQD, OSN and OSD/OSM/OSX.

No functional change.
Signed-off-by: default avatarJulian Wiedmann <jwi@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ae2b27b8
...@@ -676,143 +676,164 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) ...@@ -676,143 +676,164 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
qeth_promisc_to_bridge(card); qeth_promisc_to_bridge(card);
} }
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb,
struct net_device *dev) struct qeth_qdio_out_q *queue, int cast_type)
{ {
unsigned int data_offset = ETH_HLEN;
struct qeth_hdr *hdr;
int rc; int rc;
struct qeth_hdr *hdr = NULL;
int elements = 0;
struct qeth_card *card = dev->ml_priv;
struct sk_buff *new_skb = skb;
int cast_type = qeth_l2_get_cast_type(card, skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int data_offset = -1;
int elements_needed = 0;
int hd_len = 0;
unsigned int nr_frags;
if (card->qdio.do_prio_queueing || (cast_type && hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
card->info.is_multicast_different)) if (!hdr)
queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, return -ENOMEM;
qeth_get_ip_version(skb), cast_type)]; qeth_l2_fill_header(card, hdr, skb, cast_type);
else hdr->hdr.l2.pkt_length = skb->len;
queue = card->qdio.out_qs[card->qdio.default_out_queue]; skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr),
data_offset);
if ((card->state != CARD_STATE_UP) || !card->lan_online) { if (!qeth_get_elements_no(card, skb, 1, data_offset)) {
card->stats.tx_carrier_errors++; rc = -E2BIG;
goto tx_drop; goto out;
} }
rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset,
data_offset);
out:
if (rc)
kmem_cache_free(qeth_core_header_cache, hdr);
return rc;
}
if ((card->info.type == QETH_CARD_TYPE_OSN) && static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb,
(skb->protocol == htons(ETH_P_IPV6))) struct qeth_qdio_out_q *queue, int cast_type)
goto tx_drop; {
unsigned int elements, nr_frags;
if (card->options.performance_stats) { struct sk_buff *skb_copy;
card->perf_stats.outbound_cnt++; struct qeth_hdr *hdr;
card->perf_stats.outbound_start_time = qeth_get_micros(); int rc;
}
netif_stop_queue(dev);
/* fix hardware limitation: as long as we do not have sbal /* fix hardware limitation: as long as we do not have sbal
* chaining we can not send long frag lists * chaining we can not send long frag lists
*/ */
if ((card->info.type != QETH_CARD_TYPE_IQD) && if (!qeth_get_elements_no(card, skb, 0, 0)) {
!qeth_get_elements_no(card, new_skb, 0, 0)) { rc = skb_linearize(skb);
int lin_rc = skb_linearize(new_skb);
if (card->options.performance_stats) { if (card->options.performance_stats) {
if (lin_rc) if (rc)
card->perf_stats.tx_linfail++; card->perf_stats.tx_linfail++;
else else
card->perf_stats.tx_lin++; card->perf_stats.tx_lin++;
} }
if (lin_rc) if (rc)
goto tx_drop; return rc;
} }
nr_frags = skb_shinfo(new_skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
if (card->info.type == QETH_CARD_TYPE_OSN) /* create a copy with writeable headroom */
hdr = (struct qeth_hdr *)skb->data; skb_copy = skb_realloc_headroom(skb, sizeof(struct qeth_hdr));
else { if (!skb_copy)
if (card->info.type == QETH_CARD_TYPE_IQD) { return -ENOMEM;
new_skb = skb; hdr = skb_push(skb_copy, sizeof(struct qeth_hdr));
data_offset = ETH_HLEN; qeth_l2_fill_header(card, hdr, skb_copy, cast_type);
hd_len = ETH_HLEN; if (skb_copy->ip_summed == CHECKSUM_PARTIAL)
hdr = kmem_cache_alloc(qeth_core_header_cache, qeth_l2_hdr_csum(card, hdr, skb_copy);
GFP_ATOMIC);
if (!hdr)
goto tx_drop;
elements_needed++;
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
hdr->hdr.l2.pkt_length = new_skb->len;
skb_copy_from_linear_data(new_skb,
((char *)hdr) + sizeof(*hdr),
ETH_HLEN);
} else {
/* create a clone with writeable headroom */
new_skb = skb_realloc_headroom(skb,
sizeof(struct qeth_hdr));
if (!new_skb)
goto tx_drop;
hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
qeth_l2_fill_header(card, hdr, new_skb, cast_type);
if (new_skb->ip_summed == CHECKSUM_PARTIAL)
qeth_l2_hdr_csum(card, hdr, new_skb);
}
}
elements = qeth_get_elements_no(card, new_skb, elements_needed, elements = qeth_get_elements_no(card, skb_copy, 0, 0);
(data_offset > 0) ? data_offset : 0);
if (!elements) { if (!elements) {
if (data_offset >= 0) rc = -E2BIG;
kmem_cache_free(qeth_core_header_cache, hdr); goto out;
goto tx_drop;
} }
if (qeth_hdr_chk_and_bounce(skb_copy, &hdr, sizeof(*hdr))) {
if (card->info.type != QETH_CARD_TYPE_IQD) { rc = -EINVAL;
if (qeth_hdr_chk_and_bounce(new_skb, &hdr, goto out;
sizeof(struct qeth_hdr_layer2))) }
goto tx_drop; rc = qeth_do_send_packet(card, queue, skb_copy, hdr, elements);
rc = qeth_do_send_packet(card, queue, new_skb, hdr, out:
elements);
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
data_offset, hd_len);
if (!rc) { if (!rc) {
card->stats.tx_packets++; /* tx success, free dangling original */
card->stats.tx_bytes += tx_bytes; dev_kfree_skb_any(skb);
if (card->options.performance_stats && nr_frags) { if (card->options.performance_stats && nr_frags) {
card->perf_stats.sg_skbs_sent++; card->perf_stats.sg_skbs_sent++;
/* nr_frags + skb->data */ /* nr_frags + skb->data */
card->perf_stats.sg_frags_sent += nr_frags + 1; card->perf_stats.sg_frags_sent += nr_frags + 1;
} }
if (new_skb != skb)
dev_kfree_skb_any(skb);
rc = NETDEV_TX_OK;
} else { } else {
if (data_offset >= 0) /* tx fail, free copy */
kmem_cache_free(qeth_core_header_cache, hdr); dev_kfree_skb_any(skb_copy);
}
return rc;
}
if (rc == -EBUSY) { static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
if (new_skb != skb) struct qeth_qdio_out_q *queue)
dev_kfree_skb_any(new_skb); {
return NETDEV_TX_BUSY; unsigned int elements;
} else struct qeth_hdr *hdr;
if (skb->protocol == htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
hdr = (struct qeth_hdr *)skb->data;
elements = qeth_get_elements_no(card, skb, 0, 0);
if (!elements)
return -E2BIG;
if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr)))
return -EINVAL;
return qeth_do_send_packet(card, queue, skb, hdr, elements);
}
static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
int cast_type = qeth_l2_get_cast_type(card, skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
if (card->qdio.do_prio_queueing || (cast_type &&
card->info.is_multicast_different))
queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb,
qeth_get_ip_version(skb), cast_type)];
else
queue = card->qdio.out_qs[card->qdio.default_out_queue];
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
card->stats.tx_carrier_errors++;
goto tx_drop; goto tx_drop;
} }
netif_wake_queue(dev); if (card->options.performance_stats) {
card->perf_stats.outbound_cnt++;
card->perf_stats.outbound_start_time = qeth_get_micros();
}
netif_stop_queue(dev);
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
rc = qeth_l2_xmit_osn(card, skb, queue);
break;
case QETH_CARD_TYPE_IQD:
rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type);
break;
default:
rc = qeth_l2_xmit_osa(card, skb, queue, cast_type);
}
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
if (card->options.performance_stats) if (card->options.performance_stats)
card->perf_stats.outbound_time += qeth_get_micros() - card->perf_stats.outbound_time += qeth_get_micros() -
card->perf_stats.outbound_start_time; card->perf_stats.outbound_start_time;
return rc; netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
return NETDEV_TX_BUSY;
} /* else fall through */
tx_drop: tx_drop:
card->stats.tx_dropped++; card->stats.tx_dropped++;
card->stats.tx_errors++; card->stats.tx_errors++;
if ((new_skb != skb) && new_skb)
dev_kfree_skb_any(new_skb);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
netif_wake_queue(dev); netif_wake_queue(dev);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment