Commit 51aa165c authored by Frank Blaschka's avatar Frank Blaschka Committed by David S. Miller

qeth: fix page breaks in hw headers

Turning on memory debugging showed there could be page breaks in
hardware headers. OSA does not allow this so we had to add code
to bounce the header in case there is a page break. This patch also
fixes a problem in case the skb->data part of a fragmented skb
spreads multiple pages.
Signed-off-by: default avatarFrank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 43a65303
......@@ -869,6 +869,7 @@ void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
int qeth_set_access_ctrl_online(struct qeth_card *card);
int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
......
......@@ -57,48 +57,6 @@ static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static inline void __qeth_fill_buffer_frag(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso,
int *next_element_to_fill)
{
struct skb_frag_struct *frag;
int fragno;
unsigned long addr;
int element, cnt, dlen;
fragno = skb_shinfo(skb)->nr_frags;
element = *next_element_to_fill;
dlen = 0;
if (is_tso)
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_FIRST_FRAG;
dlen = skb->len - skb->data_len;
if (dlen) {
buffer->element[element].addr = skb->data;
buffer->element[element].length = dlen;
element++;
}
for (cnt = 0; cnt < fragno; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
frag->page_offset;
buffer->element[element].addr = (char *)addr;
buffer->element[element].length = frag->size;
if (cnt < (fragno - 1))
buffer->element[element].flags =
SBAL_FLAGS_MIDDLE_FRAG;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
element++;
}
*next_element_to_fill = element;
}
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
......@@ -3020,13 +2978,11 @@ EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
int qeth_get_elements_no(struct qeth_card *card, void *hdr,
struct sk_buff *skb, int elems)
{
int elements_needed = 0;
int dlen = skb->len - skb->data_len;
int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
PFN_DOWN((unsigned long)skb->data);
if (skb_shinfo(skb)->nr_frags > 0)
elements_needed = (skb_shinfo(skb)->nr_frags + 1);
if (elements_needed == 0)
elements_needed = 1 + (((((unsigned long) skb->data) %
PAGE_SIZE) + skb->len) >> PAGE_SHIFT);
elements_needed += skb_shinfo(skb)->nr_frags;
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
......@@ -3037,15 +2993,35 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
{
int hroom, inpage, rest;
if (((unsigned long)skb->data & PAGE_MASK) !=
(((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
hroom = skb_headroom(skb);
inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
rest = len - inpage;
if (rest > hroom)
return 1;
memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
skb->data -= rest;
QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
int length = skb->len;
int length = skb->len - skb->data_len;
int length_here;
int element;
char *data;
int first_lap ;
int first_lap, cnt;
struct skb_frag_struct *frag;
element = *next_element_to_fill;
data = skb->data;
......@@ -3068,10 +3044,14 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
length -= length_here;
if (!length) {
if (first_lap)
if (skb_shinfo(skb)->nr_frags)
buffer->element[element].flags =
SBAL_FLAGS_FIRST_FRAG;
else
buffer->element[element].flags = 0;
else
buffer->element[element].flags =
SBAL_FLAGS_LAST_FRAG;
SBAL_FLAGS_MIDDLE_FRAG;
} else {
if (first_lap)
buffer->element[element].flags =
......@@ -3084,6 +3064,18 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
element++;
first_lap = 0;
}
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
buffer->element[element].addr = (char *)page_to_phys(frag->page)
+ frag->page_offset;
buffer->element[element].length = frag->size;
buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG;
element++;
}
if (buffer->element[element - 1].flags)
buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG;
*next_element_to_fill = element;
}
......@@ -3124,12 +3116,8 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buf->next_element_to_fill++;
}
if (skb_shinfo(skb)->nr_frags == 0)
__qeth_fill_buffer(skb, buffer, large_send,
(int *)&buf->next_element_to_fill, offset);
else
__qeth_fill_buffer_frag(skb, buffer, large_send,
(int *)&buf->next_element_to_fill);
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
......
......@@ -712,10 +712,13 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
if (card->info.type != QETH_CARD_TYPE_IQD)
if (card->info.type != QETH_CARD_TYPE_IQD) {
if (qeth_hdr_chk_and_bounce(new_skb,
sizeof(struct qeth_hdr_layer2)))
goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements);
else
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements, data_offset, hd_len);
if (!rc) {
......
......@@ -2904,19 +2904,11 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
tcp_hdr(skb)->doff * 4;
int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
elements += skb_shinfo(skb)->nr_frags;
return elements;
}
static inline int qeth_l3_tso_check(struct sk_buff *skb)
{
int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
(unsigned long)skb->data;
return (((unsigned long)skb->data & PAGE_MASK) !=
(((unsigned long)skb->data + len) & PAGE_MASK));
}
static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int rc;
......@@ -3016,8 +3008,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
(cast_type == RTN_UNSPEC)) {
hdr = (struct qeth_hdr *)skb_push(new_skb,
sizeof(struct qeth_hdr_tso));
if (qeth_l3_tso_check(new_skb))
QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
memset(hdr, 0, sizeof(struct qeth_hdr_tso));
qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
qeth_tso_fill_header(card, hdr, new_skb);
......@@ -3048,10 +3038,20 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
elements_needed += elems;
nr_frags = skb_shinfo(new_skb)->nr_frags;
if (card->info.type != QETH_CARD_TYPE_IQD)
if (card->info.type != QETH_CARD_TYPE_IQD) {
int len;
if (large_send == QETH_LARGE_SEND_TSO)
len = ((unsigned long)tcp_hdr(new_skb) +
tcp_hdr(new_skb)->doff * 4) -
(unsigned long)new_skb->data;
else
len = sizeof(struct qeth_hdr_layer3);
if (qeth_hdr_chk_and_bounce(new_skb, len))
goto tx_drop;
rc = qeth_do_send_packet(card, queue, new_skb, hdr,
elements_needed);
else
} else
rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
elements_needed, data_offset, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment