Commit 37b4c9bd authored by Linus Torvalds's avatar Linus Torvalds

Import 1.1.5

parent b12c3ada
VERSION = 1
PATCHLEVEL = 1
SUBLEVEL = 4
SUBLEVEL = 5
all: Version zImage
......
......@@ -50,23 +50,17 @@ loopback_xmit(struct sk_buff *skb, struct device *dev)
cli();
if (dev->tbusy != 0) {
sti();
printk("loopback error: called by %08lx\n",
((unsigned long *)&skb)[-1]);
stats->tx_errors++;
return(1);
}
dev->tbusy = 1;
sti();
start_bh_atomic();
done = dev_rint(skb->data, skb->len, 0, dev);
if (skb->free) kfree_skb(skb, FREE_WRITE);
end_bh_atomic();
while (done != 1) {
start_bh_atomic();
done = dev_rint(NULL, 0, 0, dev);
end_bh_atomic();
}
stats->tx_packets++;
......
This diff is collapsed.
......@@ -319,6 +319,7 @@ int dev_close(struct device *dev)
void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
{
unsigned long flags;
int where = 0; /* used to say if the packet should go */
/* at the front or the back of the */
/* queue. */
......@@ -335,7 +336,6 @@ void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
IS_SKB(skb);
skb->dev = dev;
start_bh_atomic();
/*
* This just eliminates some race conditions, but not all...
......@@ -348,7 +348,6 @@ void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
*/
printk("dev_queue_xmit: worked around a missed interrupt\n");
dev->hard_start_xmit(NULL, dev);
end_bh_atomic();
return;
}
......@@ -376,33 +375,27 @@ void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
*/
if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
end_bh_atomic();
return;
}
/*
* This is vitally important. We _MUST_ keep packets in order. While tcp/ip
* suffers only a slow down some IPX apps, and all the AX.25 code will break
* if it occurs out of order.
*
* This is commented out while I fix a few 'side effects'
*/
save_flags(flags);
cli();
if (!where) {
skb_queue_tail(dev->buffs + pri,skb);
skb = skb_dequeue(dev->buffs + pri);
}
restore_flags(flags);
if ((where==1 || skb_peek(&dev->buffs[pri])==NULL) && dev->hard_start_xmit(skb, dev) == 0)
{
end_bh_atomic();
if (dev->hard_start_xmit(skb, dev) == 0) {
return;
}
/*
* Transmission failed, put skb back into a list.
*/
if(where)
skb_queue_head(&dev->buffs[pri],skb);
else
skb_queue_tail(&dev->buffs[pri],skb);
end_bh_atomic();
cli();
skb_queue_head(dev->buffs + pri,skb);
restore_flags(flags);
}
/*
......
......@@ -28,7 +28,6 @@
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
#include <linux/string.h>
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
......@@ -39,6 +38,7 @@
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include "snmp.h"
#include "ip.h"
#include "route.h"
......@@ -313,8 +313,10 @@ static void icmp_unreach(struct icmphdr *icmph, struct sk_buff *skb)
* Handle ICMP_REDIRECT.
*/
static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb, struct device *dev)
static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb,
struct device *dev, unsigned long source)
{
struct rtable *rt;
struct iphdr *iph;
unsigned long ip;
......@@ -340,8 +342,16 @@ static void icmp_redirect(struct icmphdr *icmph, struct sk_buff *skb, struct dev
#endif
case ICMP_REDIR_HOST:
/*
* Add better route to host
* Add better route to host.
* But first check that the redirect
* comes from the old gateway..
*/
rt = ip_rt_route(ip, NULL, NULL);
if (!rt)
break;
if (rt->rt_gateway != source)
break;
printk("redirect from %08lx\n", source);
ip_rt_add((RTF_DYNAMIC | RTF_MODIFIED | RTF_HOST | RTF_GATEWAY),
ip, 0, icmph->un.gateway, dev);
break;
......@@ -657,7 +667,7 @@ int icmp_rcv(struct sk_buff *skb1, struct device *dev, struct options *opt,
return(0);
case ICMP_REDIRECT:
icmp_statistics.IcmpInRedirects++;
icmp_redirect(icmph, skb1, dev);
icmp_redirect(icmph, skb1, dev, saddr);
return(0);
case ICMP_ECHO:
icmp_statistics.IcmpInEchos++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment