Commit d8c94048 authored by David S. Miller's avatar David S. Miller

Merge davem@nuts.davemloft.net:/disk1/BK/net-2.6

into kernel.bkbits.net:/home/davem/net-2.6
parents f9affc34 71c4ed54
......@@ -43,8 +43,21 @@ dev->get_stats:
dev->hard_start_xmit:
Synchronization: dev->xmit_lock spinlock.
When the driver sets NETIF_F_LLTX in dev->features this will be
called without holding xmit_lock. In this case the driver
has to lock by itself when needed. It is recommended to use a try lock
for this and return -1 when the spin lock fails.
The locking there should also properly protect against
set_multicast_list
Context: BHs disabled
Notes: netif_queue_stopped() is guaranteed false
Return codes:
o NETDEV_TX_OK everything ok.
o NETDEV_TX_BUSY Cannot transmit packet, try later
Usually a bug, means queue start/stop flow control is broken in
the driver. Note: the driver must NOT put the skb in its DMA ring.
o NETDEV_TX_LOCKED Locking failed, please retry quickly.
Only valid when NETIF_F_LLTX is set.
dev->tx_timeout:
Synchronization: dev->xmit_lock spinlock.
......
......@@ -1778,7 +1778,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb);
return 0;
return NETDEV_TX_OK;
}
#ifdef NETIF_F_TSO
......@@ -1817,7 +1817,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (!spin_trylock(&adapter->tx_lock)) {
/* Collision - tell upper layer to requeue */
local_irq_restore(flags);
return -1;
return NETDEV_TX_LOCKED;
}
/* need: count + 2 desc gap to keep tail from touching
......@@ -1825,7 +1825,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return 1;
return NETDEV_TX_BUSY;
}
if(unlikely(adapter->hw.mac_type == e1000_82547)) {
......@@ -1833,7 +1833,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netif_stop_queue(netdev);
mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return 1;
return NETDEV_TX_BUSY;
}
}
......@@ -1856,7 +1856,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return 0;
return NETDEV_TX_OK;
}
/**
......
......@@ -4956,7 +4956,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
dev->stop = &SkGeClose;
dev->hard_start_xmit = &SkGeXmit;
dev->get_stats = &SkGeStats;
dev->last_stats = &SkGeStats;
dev->set_multicast_list = &SkGeSetRxMode;
dev->set_mac_address = &SkGeSetMacAddr;
dev->do_ioctl = &SkGeIoctl;
......@@ -5037,7 +5036,6 @@ static int __devinit skge_probe_one(struct pci_dev *pdev,
dev->stop = &SkGeClose;
dev->hard_start_xmit = &SkGeXmit;
dev->get_stats = &SkGeStats;
dev->last_stats = &SkGeStats;
dev->set_multicast_list = &SkGeSetRxMode;
dev->set_mac_address = &SkGeSetMacAddr;
dev->do_ioctl = &SkGeIoctl;
......
......@@ -2956,6 +2956,7 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
dma_addr_t mapping, int len, u32 flags,
u32 mss_and_is_end)
{
struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
int is_end = (mss_and_is_end & 0x1);
u32 mss = (mss_and_is_end >> 1);
u32 vlan_tag = 0;
......@@ -2967,35 +2968,11 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
flags &= 0xffff;
}
vlan_tag |= (mss << TXD_MSS_SHIFT);
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
txd->addr_hi = ((u64) mapping >> 32);
txd->addr_lo = ((u64) mapping & 0xffffffff);
txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
} else {
struct tx_ring_info *txr = &tp->tx_buffers[entry];
void __iomem *txd;
txd = (tp->regs +
NIC_SRAM_WIN_BASE +
NIC_SRAM_TX_BUFFER_DESC);
txd += (entry * TXD_SIZE);
/* Save some PIOs */
if (sizeof(dma_addr_t) != sizeof(u32))
writel(((u64) mapping >> 32),
txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
writel(((u64) mapping & 0xffffffff),
txd + TXD_ADDR + TG3_64BIT_REG_LOW);
writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
if (txr->prev_vlan_tag != vlan_tag) {
writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
txr->prev_vlan_tag = vlan_tag;
}
}
}
static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
......@@ -3039,7 +3016,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
local_irq_save(flags);
if (!spin_trylock(&tp->tx_lock)) {
local_irq_restore(flags);
return -1;
return NETDEV_TX_LOCKED;
}
/* This is a hard error, log it. */
......@@ -3048,7 +3025,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tp->tx_lock, flags);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
return 1;
return NETDEV_TX_BUSY;
}
entry = tp->tx_prod;
......@@ -3188,19 +3165,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Packets are ready, update Tx producer idx local and on card. */
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 +
TG3_64BIT_REG_LOW), entry);
} else {
/* First, make sure tg3 sees last descriptor fully
* in SRAM.
*/
if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
tw32_tx_mbox((MAILBOX_SNDNIC_PROD_IDX_0 +
TG3_64BIT_REG_LOW), entry);
}
tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
tp->tx_prod = entry;
if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
......@@ -3211,7 +3176,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
return 0;
return NETDEV_TX_OK;
}
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
......@@ -3339,7 +3304,6 @@ static void tg3_free_rings(struct tg3 *tp)
*/
static void tg3_init_rings(struct tg3 *tp)
{
void __iomem *start, *end;
u32 i;
/* Free up all the SKBs. */
......@@ -3349,21 +3313,7 @@ static void tg3_init_rings(struct tg3 *tp)
memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
} else {
start = (tp->regs +
NIC_SRAM_WIN_BASE +
NIC_SRAM_TX_BUFFER_DESC);
end = start + TG3_TX_RING_BYTES;
while (start < end) {
writel(0, start);
start += 4;
}
for (i = 0; i < TG3_TX_RING_SIZE; i++)
tp->tx_buffers[i].prev_vlan_tag = 0;
}
/* Initialize invariants of the rings, we only set this
* stuff once. This works because the card does not
......@@ -3494,15 +3444,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
if (!tp->rx_rcb)
goto err_out;
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
&tp->tx_desc_mapping);
if (!tp->tx_ring)
goto err_out;
} else {
tp->tx_ring = NULL;
tp->tx_desc_mapping = 0;
}
tp->hw_status = pci_alloc_consistent(tp->pdev,
TG3_HW_STATUS_SIZE,
......@@ -4966,10 +4911,7 @@ static int tg3_reset_hw(struct tg3 *tp)
GRC_MODE_4X_NIC_SEND_RINGS |
GRC_MODE_NO_TX_PHDR_CSUM |
GRC_MODE_NO_RX_PHDR_CSUM);
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
else
tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
......@@ -5122,18 +5064,11 @@ static int tg3_reset_hw(struct tg3 *tp)
tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
tp->tx_desc_mapping,
(TG3_TX_RING_SIZE <<
BDINFO_FLAGS_MAXLEN_SHIFT),
NIC_SRAM_TX_BUFFER_DESC);
} else {
tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
0,
BDINFO_FLAGS_DISABLED,
NIC_SRAM_TX_BUFFER_DESC);
}
/* There is only one receive return ring on 5705/5750, no need
* to explicitly disable the others.
......@@ -5679,8 +5614,8 @@ static int tg3_open(struct net_device *dev)
spin_unlock(&tp->tx_lock);
spin_unlock_irq(&tp->lock);
/* If you move this call, make sure TG3_FLAG_HOST_TXDS in
* tp->tg3_flags is accurate at that new place.
/* The placement of this call is tied
* to the setup and use of Host TX descriptors.
*/
err = tg3_alloc_consistent(tp);
if (err)
......@@ -6295,7 +6230,9 @@ static void tg3_set_rx_mode(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock);
__tg3_set_rx_mode(dev);
spin_unlock(&tp->tx_lock);
spin_unlock_irq(&tp->lock);
}
......@@ -7633,23 +7570,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
udelay(50);
tg3_nvram_init(tp);
/* Always use host TXDs, it performs better in particular
* with multi-frag packets. The tests below are kept here
* as documentation should we change this decision again
* in the future.
*/
tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
#if 0
/* Determine if TX descriptors will reside in
* main memory or in the chip SRAM.
*/
if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
#endif
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
......@@ -8452,11 +8372,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
printk("%2.2x%c", dev->dev_addr[i],
i == 5 ? '\n' : ':');
printk(KERN_INFO "%s: HostTXDS[%d] RXcsums[%d] LinkChgREG[%d] "
printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
"MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
"TSOcap[%d] \n",
dev->name,
(tp->tg3_flags & TG3_FLAG_HOST_TXDS) != 0,
(tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
(tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
(tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
......
......@@ -1549,7 +1549,7 @@
* exist only in the cards on-chip SRAM. All 16 send bds are under
* the same mode, they may not be configured individually.
*
* The mode we use is controlled by TG3_FLAG_HOST_TXDS in tp->tg3_flags.
* This driver always uses host memory TX descriptors.
*
* To use host memory TX descriptors:
* 1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register.
......@@ -2005,7 +2005,6 @@ struct tg3 {
spinlock_t tx_lock;
/* TX descs are only used if TG3_FLAG_HOST_TXDS is set. */
struct tg3_tx_buffer_desc *tx_ring;
struct tx_ring_info *tx_buffers;
dma_addr_t tx_desc_mapping;
......@@ -2041,7 +2040,6 @@ struct tg3 {
u32 rx_offset;
u32 tg3_flags;
#define TG3_FLAG_HOST_TXDS 0x00000001
#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
#define TG3_FLAG_RX_CHECKSUMS 0x00000004
#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
......
......@@ -73,6 +73,11 @@ struct ethtool_ops;
#define MAX_ADDR_LEN 32 /* Largest hardware address length */
/* Driver transmit return codes */
#define NETDEV_TX_OK 0 /* driver took care of packet */
#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
/*
* Compute the worst case header length according to the protocols
* used.
......@@ -479,7 +484,6 @@ struct net_device
/* class/net/name entry */
struct class_device class_dev;
struct net_device_stats* (*last_stats)(struct net_device *);
/* how much padding had been added by alloc_netdev() */
int padded;
};
......
......@@ -3,10 +3,7 @@
#define _IP_CONNTRACK_PROTOCOL_H
#include <linux/netfilter_ipv4/ip_conntrack.h>
/* length of buffer to which print_tuple/print_conntrack members are
* writing */
#define IP_CT_PRINT_BUFLEN 100
struct seq_file;
struct ip_conntrack_protocol
{
......@@ -31,13 +28,12 @@ struct ip_conntrack_protocol
int (*invert_tuple)(struct ip_conntrack_tuple *inverse,
const struct ip_conntrack_tuple *orig);
/* Print out the per-protocol part of the tuple. */
unsigned int (*print_tuple)(char *buffer,
/* Print out the per-protocol part of the tuple. Return like seq_* */
int (*print_tuple)(struct seq_file *,
const struct ip_conntrack_tuple *);
/* Print out the private part of the conntrack. */
unsigned int (*print_conntrack)(char *buffer,
const struct ip_conntrack *);
int (*print_conntrack)(struct seq_file *, const struct ip_conntrack *);
/* Returns verdict for packet, or -1 for invalid. */
int (*packet)(struct ip_conntrack *conntrack,
......
......@@ -1159,6 +1159,12 @@ static inline void nf_reset(struct sk_buff *skb)
skb->nf_debug = 0;
#endif
}
static inline void nf_reset_debug(struct sk_buff *skb)
{
#ifdef CONFIG_NETFILTER_DEBUG
skb->nf_debug = 0;
#endif
}
#ifdef CONFIG_BRIDGE_NETFILTER
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
......
......@@ -861,18 +861,6 @@ static int default_rebuild_header(struct sk_buff *skb)
}
/*
* Some old buggy device drivers change get_stats after registering
* the device. Try and trap them here.
* This can be elimnated when all devices are known fixed.
*/
static inline int get_stats_changed(struct net_device *dev)
{
int changed = dev->last_stats != dev->get_stats;
dev->last_stats = dev->get_stats;
return changed;
}
/**
* dev_open - prepare an interface for use.
* @dev: device to open
......@@ -896,14 +884,6 @@ int dev_open(struct net_device *dev)
if (dev->flags & IFF_UP)
return 0;
/*
* Check for broken device drivers.
*/
if (get_stats_changed(dev) && net_ratelimit()) {
printk(KERN_ERR "%s: driver changed get_stats after register\n",
dev->name);
}
/*
* Is it even present?
*/
......@@ -920,14 +900,6 @@ int dev_open(struct net_device *dev)
clear_bit(__LINK_STATE_START, &dev->state);
}
/*
* Check for more broken device drivers.
*/
if (get_stats_changed(dev) && net_ratelimit()) {
printk(KERN_ERR "%s: driver changed get_stats in open\n",
dev->name);
}
/*
* If it went open OK then:
*/
......
......@@ -408,7 +408,6 @@ int netdev_register_sysfs(struct net_device *net)
class_dev->class = &net_class;
class_dev->class_data = net;
net->last_stats = net->get_stats;
strlcpy(class_dev->class_id, net->name, BUS_ID_SIZE);
if ((ret = class_device_register(class_dev)))
......
......@@ -265,7 +265,7 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct ifinfomsg *ifm = NLMSG_DATA(nlh);
struct rtattr **ida = arg;
struct net_device *dev;
int err;
int err, send_addr_notify = 0;
dev = dev_get_by_index(ifm->ifi_index);
if (!dev)
......@@ -312,6 +312,7 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
err = dev->set_mac_address(dev, RTA_DATA(ida[IFLA_ADDRESS - 1]));
if (err)
goto out;
send_addr_notify = 1;
}
if (ida[IFLA_BROADCAST - 1]) {
......@@ -319,6 +320,7 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
goto out;
memcpy(dev->broadcast, RTA_DATA(ida[IFLA_BROADCAST - 1]),
dev->addr_len);
send_addr_notify = 1;
}
if (ida[IFLA_MTU - 1]) {
......@@ -365,7 +367,7 @@ static int do_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
err = 0;
out:
if (!err)
if (send_addr_notify)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
dev_put(dev);
......
......@@ -743,13 +743,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
if (skb->nfcache & NFC_IPVS_PROPERTY)
return NF_ACCEPT;
if (skb->ip_summed == CHECKSUM_HW) {
if (skb_checksum_help(pskb, (out == NULL)))
return NF_DROP;
if (skb != *pskb)
skb = *pskb;
}
iph = skb->nh.iph;
if (unlikely(iph->protocol == IPPROTO_ICMP)) {
int related, verdict = ip_vs_out_icmp(pskb, &related);
......@@ -993,13 +986,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff **pskb,
return NF_ACCEPT;
}
if (skb->ip_summed == CHECKSUM_HW) {
if (skb_checksum_help(pskb, (out == NULL)))
return NF_DROP;
if (skb != *pskb)
skb = *pskb;
}
iph = skb->nh.iph;
if (unlikely(iph->protocol == IPPROTO_ICMP)) {
int related, verdict = ip_vs_in_icmp(pskb, &related);
......
......@@ -124,11 +124,11 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
dst_release(old_dst);
}
#define IP_VS_XMIT(skb, rt) \
do { \
nf_reset(skb); \
nf_reset_debug(skb); \
(skb)->nfcache |= NFC_IPVS_PROPERTY; \
(skb)->ip_summed = CHECKSUM_NONE; \
NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \
(rt)->u.dst.dev, dst_output); \
} while (0)
......@@ -408,8 +408,6 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
ip_select_ident(iph, &rt->u.dst, NULL);
ip_send_check(iph);
skb->ip_summed = CHECKSUM_NONE;
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
......
......@@ -34,14 +34,14 @@ static int generic_invert_tuple(struct ip_conntrack_tuple *tuple,
}
/* Print out the per-protocol part of the tuple. */
static unsigned int generic_print_tuple(char *buffer,
static int generic_print_tuple(struct seq_file *s,
const struct ip_conntrack_tuple *tuple)
{
return 0;
}
/* Print out the private part of the conntrack. */
static unsigned int generic_print_conntrack(char *buffer,
static int generic_print_conntrack(struct seq_file *s,
const struct ip_conntrack *state)
{
return 0;
......
......@@ -12,6 +12,7 @@
#include <linux/netfilter.h>
#include <linux/in.h>
#include <linux/icmp.h>
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/netfilter.h>
......@@ -70,17 +71,17 @@ static int icmp_invert_tuple(struct ip_conntrack_tuple *tuple,
}
/* Print out the per-protocol part of the tuple. */
static unsigned int icmp_print_tuple(char *buffer,
static int icmp_print_tuple(struct seq_file *s,
const struct ip_conntrack_tuple *tuple)
{
return sprintf(buffer, "type=%u code=%u id=%u ",
return seq_printf(s, "type=%u code=%u id=%u ",
tuple->dst.u.icmp.type,
tuple->dst.u.icmp.code,
ntohs(tuple->src.u.icmp.id));
}
/* Print out the private part of the conntrack. */
static unsigned int icmp_print_conntrack(char *buffer,
static int icmp_print_conntrack(struct seq_file *s,
const struct ip_conntrack *conntrack)
{
return 0;
......
......@@ -22,6 +22,7 @@
#include <linux/ip.h>
#include <linux/sctp.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
......@@ -178,19 +179,19 @@ static int sctp_invert_tuple(struct ip_conntrack_tuple *tuple,
}
/* Print out the per-protocol part of the tuple. */
static unsigned int sctp_print_tuple(char *buffer,
static int sctp_print_tuple(struct seq_file *s,
const struct ip_conntrack_tuple *tuple)
{
DEBUGP(__FUNCTION__);
DEBUGP("\n");
return sprintf(buffer, "sport=%hu dport=%hu ",
return seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.sctp.port),
ntohs(tuple->dst.u.sctp.port));
}
/* Print out the private part of the conntrack. */
static unsigned int sctp_print_conntrack(char *buffer,
static int sctp_print_conntrack(struct seq_file *s,
const struct ip_conntrack *conntrack)
{
enum sctp_conntrack state;
......@@ -202,7 +203,7 @@ static unsigned int sctp_print_conntrack(char *buffer,
state = conntrack->proto.sctp.state;
READ_UNLOCK(&sctp_lock);
return sprintf(buffer, "%s ", sctp_conntrack_names[state]);
return seq_printf(s, "%s ", sctp_conntrack_names[state]);
}
#define for_each_sctp_chunk(skb, sch, offset, count) \
......
......@@ -316,16 +316,16 @@ static int tcp_invert_tuple(struct ip_conntrack_tuple *tuple,
}
/* Print out the per-protocol part of the tuple. */
static unsigned int tcp_print_tuple(char *buffer,
static int tcp_print_tuple(struct seq_file *s,
const struct ip_conntrack_tuple *tuple)
{
return sprintf(buffer, "sport=%hu dport=%hu ",
return seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.tcp.port),
ntohs(tuple->dst.u.tcp.port));
}
/* Print out the private part of the conntrack. */
static unsigned int tcp_print_conntrack(char *buffer,
static int tcp_print_conntrack(struct seq_file *s,
const struct ip_conntrack *conntrack)
{
enum tcp_conntrack state;
......@@ -334,7 +334,7 @@ static unsigned int tcp_print_conntrack(char *buffer,
state = conntrack->proto.tcp.state;
READ_UNLOCK(&tcp_lock);
return sprintf(buffer, "%s ", tcp_conntrack_names[state]);
return seq_printf(s, "%s ", tcp_conntrack_names[state]);
}
static unsigned int get_conntrack_index(const struct tcphdr *tcph)
......
......@@ -12,6 +12,7 @@
#include <linux/netfilter.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/seq_file.h>
#include <net/checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
......@@ -46,16 +47,16 @@ static int udp_invert_tuple(struct ip_conntrack_tuple *tuple,
}
/* Print out the per-protocol part of the tuple. */
static unsigned int udp_print_tuple(char *buffer,
static int udp_print_tuple(struct seq_file *s,
const struct ip_conntrack_tuple *tuple)
{
return sprintf(buffer, "sport=%hu dport=%hu ",
return seq_printf(s, "sport=%hu dport=%hu ",
ntohs(tuple->src.u.udp.port),
ntohs(tuple->dst.u.udp.port));
}
/* Print out the private part of the conntrack. */
static unsigned int udp_print_conntrack(char *buffer,
static int udp_print_conntrack(struct seq_file *s,
const struct ip_conntrack *conntrack)
{
return 0;
......
......@@ -57,18 +57,13 @@ static int kill_proto(const struct ip_conntrack *i, void *data)
}
#ifdef CONFIG_PROC_FS
static unsigned int
print_tuple(char *buffer, const struct ip_conntrack_tuple *tuple,
static int
print_tuple(struct seq_file *s, const struct ip_conntrack_tuple *tuple,
struct ip_conntrack_protocol *proto)
{
int len;
len = sprintf(buffer, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
NIPQUAD(tuple->src.ip), NIPQUAD(tuple->dst.ip));
len += proto->print_tuple(buffer + len, tuple);
return len;
return proto->print_tuple(s, tuple);
}
#ifdef CONFIG_IP_NF_CT_ACCT
......@@ -85,39 +80,21 @@ seq_print_counters(struct seq_file *s, struct ip_conntrack_counter *counter)
static void *ct_seq_start(struct seq_file *s, loff_t *pos)
{
unsigned int *bucket;
/* strange seq_file api calls stop even if we fail,
* thus we need to grab lock since stop unlocks */
READ_LOCK(&ip_conntrack_lock);
if (*pos >= ip_conntrack_htable_size)
return NULL;
bucket = kmalloc(sizeof(unsigned int), GFP_KERNEL);
if (!bucket) {
return ERR_PTR(-ENOMEM);
}
*bucket = *pos;
return bucket;
return &ip_conntrack_hash[*pos];
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void ct_seq_stop(struct seq_file *s, void *v)
{
unsigned int *bucket = (unsigned int *) v;
*pos = ++(*bucket);
if (*pos >= ip_conntrack_htable_size) {
kfree(v);
return NULL;
}
return bucket;
}
static void ct_seq_stop(struct seq_file *s, void *v)
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
READ_UNLOCK(&ip_conntrack_lock);
(*pos)++;
if (*pos >= ip_conntrack_htable_size)
return NULL;
return &ip_conntrack_hash[*pos];
}
/* return 0 on success, 1 in case of error */
......@@ -126,7 +103,6 @@ static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
{
struct ip_conntrack *conntrack = hash->ctrack;
struct ip_conntrack_protocol *proto;
char buffer[IP_CT_PRINT_BUFLEN];
MUST_BE_READ_LOCKED(&ip_conntrack_lock);
......@@ -147,12 +123,12 @@ static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
? (conntrack->timeout.expires - jiffies)/HZ : 0) != 0)
return 1;
proto->print_conntrack(buffer, conntrack);
if (seq_puts(s, buffer))
if (proto->print_conntrack(s, conntrack))
return 1;
print_tuple(buffer, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
proto);
if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
proto))
return 1;
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
return 1;
......@@ -161,9 +137,8 @@ static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
if (seq_printf(s, "[UNREPLIED] "))
return 1;
print_tuple(buffer, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
proto);
if (seq_puts(s, buffer))
if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
proto))
return 1;
if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
......@@ -179,17 +154,18 @@ static int ct_seq_real_show(const struct ip_conntrack_tuple_hash *hash,
return 0;
}
static int ct_seq_show(struct seq_file *s, void *v)
{
unsigned int *bucket = (unsigned int *) v;
struct list_head *list = v;
int ret = 0;
if (LIST_FIND(&ip_conntrack_hash[*bucket], ct_seq_real_show,
struct ip_conntrack_tuple_hash *, s)) {
/* buffer was filled and unable to print that tuple */
return 1;
}
return 0;
/* FIXME: Simply truncates if hash chain too long. */
READ_LOCK(&ip_conntrack_lock);
if (LIST_FIND(list, ct_seq_real_show,
struct ip_conntrack_tuple_hash *, s))
ret = -ENOSPC;
READ_UNLOCK(&ip_conntrack_lock);
return ret;
}
static struct seq_operations ct_seq_ops = {
......@@ -255,7 +231,6 @@ static void exp_seq_stop(struct seq_file *s, void *v)
static int exp_seq_show(struct seq_file *s, void *v)
{
struct ip_conntrack_expect *expect = v;
char buffer[IP_CT_PRINT_BUFLEN];
if (expect->expectant->helper->timeout)
seq_printf(s, "%lu ", timer_pending(&expect->timeout)
......@@ -266,9 +241,8 @@ static int exp_seq_show(struct seq_file *s, void *v)
seq_printf(s, "use=%u proto=%u ", atomic_read(&expect->use),
expect->tuple.dst.protonum);
print_tuple(buffer, &expect->tuple,
return print_tuple(s, &expect->tuple,
__ip_ct_find_proto(expect->tuple.dst.protonum));
return seq_printf(s, "%s\n", buffer);
}
static struct seq_operations exp_seq_ops = {
......
......@@ -1032,7 +1032,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if(!before(TCP_SKB_CB(skb)->seq, end_seq))
break;
fack_count++;
fack_count += TCP_SKB_CB(skb)->tso_factor;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
......
......@@ -495,10 +495,20 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
*/
TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
tcp_dec_pcount(&tp->lost_out, skb);
tcp_dec_pcount(&tp->left_out, skb);
}
/* Fix up tso_factor for both original and new SKB. */
tcp_set_skb_tso_factor(skb, tp->mss_cache_std);
tcp_set_skb_tso_factor(buff, tp->mss_cache_std);
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
tcp_inc_pcount(&tp->lost_out, skb);
tcp_inc_pcount(&tp->left_out, skb);
}
if (TCP_SKB_CB(buff)->sacked&TCPCB_LOST) {
tcp_inc_pcount(&tp->lost_out, buff);
tcp_inc_pcount(&tp->left_out, buff);
......
......@@ -184,7 +184,8 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
if (dev->ifindex == oif)
return sprt;
if (dev->flags & IFF_LOOPBACK) {
if (sprt->rt6i_idev->dev->ifindex != oif) {
if (sprt->rt6i_idev == NULL ||
sprt->rt6i_idev->dev->ifindex != oif) {
if (strict && oif)
continue;
if (local && (!oif ||
......
......@@ -139,13 +139,8 @@ int qdisc_restart(struct net_device *dev)
if (netdev_nit)
dev_queue_xmit_nit(skb, dev);
/* hard_start_xmit returns:
0 device not ready
1 everything ok
-1 didn't get device lock (for LLTX)
*/
ret = dev->hard_start_xmit(skb, dev);
if (ret == 0) {
if (ret == NETDEV_TX_OK) {
if (!nolock) {
dev->xmit_lock_owner = -1;
spin_unlock(&dev->xmit_lock);
......@@ -153,10 +148,11 @@ int qdisc_restart(struct net_device *dev)
spin_lock(&dev->queue_lock);
return -1;
}
if (ret == -1 && nolock)
if (ret == NETDEV_TX_LOCKED && nolock)
goto collision;
}
/* NETDEV_TX_BUSY - we need to requeue */
/* Release the driver */
if (!nolock) {
dev->xmit_lock_owner = -1;
......@@ -176,7 +172,7 @@ int qdisc_restart(struct net_device *dev)
3. device is buggy (ppp)
*/
requeue:
requeue:
q->ops->requeue(skb, q);
netif_schedule(dev);
return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment