Commit 4e700bcd authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

parents 6aba74f2 a84b50ce
......@@ -165,7 +165,6 @@ static uint32_t fpga_tx(struct solos_card *);
static irqreturn_t solos_irq(int irq, void *dev_id);
static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
static int list_vccs(int vci);
static void release_vccs(struct atm_dev *dev);
static int atm_init(struct solos_card *, struct device *);
static void atm_remove(struct solos_card *);
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
......@@ -384,7 +383,6 @@ static int process_status(struct solos_card *card, int port, struct sk_buff *skb
/* Anything but 'Showtime' is down */
if (strcmp(state_str, "Showtime")) {
atm_dev_signal_change(card->atmdev[port], ATM_PHY_SIG_LOST);
release_vccs(card->atmdev[port]);
dev_info(&card->dev->dev, "Port %d: %s\n", port, state_str);
return 0;
}
......@@ -697,7 +695,7 @@ void solos_bh(unsigned long card_arg)
size);
}
if (atmdebug) {
dev_info(&card->dev->dev, "Received: device %d\n", port);
dev_info(&card->dev->dev, "Received: port %d\n", port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
......@@ -710,8 +708,8 @@ void solos_bh(unsigned long card_arg)
le16_to_cpu(header->vci));
if (!vcc) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Received packet for unknown VCI.VPI %d.%d on port %d\n",
le16_to_cpu(header->vci), le16_to_cpu(header->vpi),
dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n",
le16_to_cpu(header->vpi), le16_to_cpu(header->vci),
port);
continue;
}
......@@ -830,28 +828,6 @@ static int list_vccs(int vci)
return num_found;
}
static void release_vccs(struct atm_dev *dev)
{
int i;
write_lock_irq(&vcc_sklist_lock);
for (i = 0; i < VCC_HTABLE_SIZE; i++) {
struct hlist_head *head = &vcc_hash[i];
struct hlist_node *node, *tmp;
struct sock *s;
struct atm_vcc *vcc;
sk_for_each_safe(s, node, tmp, head) {
vcc = atm_sk(s);
if (vcc->dev == dev) {
vcc_release_async(vcc, -EPIPE);
sk_del_node_init(s);
}
}
}
write_unlock_irq(&vcc_sklist_lock);
}
static int popen(struct atm_vcc *vcc)
{
......@@ -1018,8 +994,15 @@ static uint32_t fpga_tx(struct solos_card *card)
/* Clean up and free oldskb now it's gone */
if (atmdebug) {
struct pkt_hdr *header = (void *)oldskb->data;
int size = le16_to_cpu(header->size);
skb_pull(oldskb, sizeof(*header));
dev_info(&card->dev->dev, "Transmitted: port %d\n",
port);
dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n",
size, le16_to_cpu(header->vpi),
le16_to_cpu(header->vci));
print_buffer(oldskb);
}
......@@ -1262,7 +1245,7 @@ static int atm_init(struct solos_card *card, struct device *parent)
card->atmdev[i]->ci_range.vci_bits = 16;
card->atmdev[i]->dev_data = card;
card->atmdev[i]->phy_data = (void *)(unsigned long)i;
atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_UNKNOWN);
atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND);
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
if (!skb) {
......
......@@ -31,24 +31,9 @@
#include <linux/connector.h>
#include <linux/delay.h>
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
container_of(work, struct cn_callback_entry, work);
struct cn_callback_data *d = &cbq->data;
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
d->callback(msg, nsp);
kfree_skb(d->skb);
d->skb = NULL;
kfree(d->free);
}
static struct cn_callback_entry *
cn_queue_alloc_callback_entry(const char *name, struct cb_id *id,
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq;
......@@ -59,17 +44,23 @@ cn_queue_alloc_callback_entry(const char *name, struct cb_id *id,
return NULL;
}
atomic_set(&cbq->refcnt, 1);
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->data.callback = callback;
INIT_WORK(&cbq->work, &cn_queue_wrapper);
cbq->callback = callback;
return cbq;
}
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
void cn_queue_release_callback(struct cn_callback_entry *cbq)
{
flush_workqueue(cbq->pdev->cn_queue);
if (!atomic_dec_and_test(&cbq->refcnt))
return;
atomic_dec(&cbq->pdev->refcnt);
kfree(cbq);
}
......@@ -85,13 +76,10 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cn_callback_entry *cbq, *__cbq;
int found = 0;
cbq = cn_queue_alloc_callback_entry(name, id, callback);
cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
if (!cbq)
return -ENOMEM;
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
spin_lock_bh(&dev->queue_lock);
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, id)) {
......@@ -104,8 +92,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
spin_unlock_bh(&dev->queue_lock);
if (found) {
cn_queue_free_callback(cbq);
atomic_dec(&dev->refcnt);
cn_queue_release_callback(cbq);
return -EINVAL;
}
......@@ -130,10 +117,8 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
}
spin_unlock_bh(&dev->queue_lock);
if (found) {
cn_queue_free_callback(cbq);
atomic_dec(&dev->refcnt);
}
if (found)
cn_queue_release_callback(cbq);
}
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
......@@ -151,12 +136,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
dev->nls = nls;
dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
if (!dev->cn_queue) {
kfree(dev);
return NULL;
}
return dev;
}
......@@ -164,9 +143,6 @@ void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
flush_workqueue(dev->cn_queue);
destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
list_del(&cbq->callback_entry);
......
......@@ -122,51 +122,28 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
*/
static int cn_call_callback(struct sk_buff *skb)
{
struct cn_callback_entry *__cbq, *__new_cbq;
struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
if (likely(!work_pending(&__cbq->work) &&
__cbq->data.skb == NULL)) {
__cbq->data.skb = skb;
if (queue_work(dev->cbdev->cn_queue,
&__cbq->work))
err = 0;
else
err = -EINVAL;
} else {
struct cn_callback_data *d;
err = -ENOMEM;
__new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
if (__new_cbq) {
d = &__new_cbq->data;
d->skb = skb;
d->callback = __cbq->data.callback;
d->free = __new_cbq;
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
if (queue_work(dev->cbdev->cn_queue,
&__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);
err = -EINVAL;
}
}
}
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&i->id.id, &msg->id)) {
atomic_inc(&i->refcnt);
cbq = i;
break;
}
}
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
}
return err;
}
......
......@@ -1996,13 +1996,15 @@ static int atl2_set_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
ptr = (u32 *)eeprom_buff;
ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0])))
return -EIO;
if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0]))) {
ret_val = -EIO;
goto out;
}
ptr++;
}
if (((eeprom->offset + eeprom->len) & 3)) {
......@@ -2011,18 +2013,22 @@ static int atl2_set_eeprom(struct net_device *netdev,
* only the first byte of the word is being modified
*/
if (!atl2_read_eeprom(hw, last_dword * 4,
&(eeprom_buff[last_dword - first_dword])))
return -EIO;
&(eeprom_buff[last_dword - first_dword]))) {
ret_val = -EIO;
goto out;
}
}
/* Device's eeprom is always little-endian, word addressable */
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_dword - first_dword + 1; i++) {
if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i]))
return -EIO;
if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i])) {
ret_val = -EIO;
goto out;
}
}
out:
kfree(eeprom_buff);
return ret_val;
}
......
......@@ -75,15 +75,9 @@ static int dongle_id = 0; /* default: probe */
/* We can't guess the type of connected dongle, user *must* supply it. */
module_param(dongle_id, int, 0);
/* FIXME : we should not need this, because instances should be automatically
* managed by the PCI layer. Especially that we seem to only be using the
* first entry. Jean II */
/* Max 4 instances for now */
static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
/* Some prototypes */
static int via_ircc_open(int i, chipio_t * info, unsigned int id);
static int via_ircc_close(struct via_ircc_cb *self);
static int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
unsigned int id);
static int via_ircc_dma_receive(struct via_ircc_cb *self);
static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
int iobase);
......@@ -215,7 +209,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
pci_write_config_byte(pcidev,0x5a,0xc0);
WriteLPCReg(0x28, 0x70 );
if (via_ircc_open(0, &info,0x3076) == 0)
if (via_ircc_open(pcidev, &info, 0x3076) == 0)
rc=0;
} else
rc = -ENODEV; //IR not turn on
......@@ -254,7 +248,7 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
info.irq=FirIRQ;
info.dma=FirDRQ1;
info.dma2=FirDRQ0;
if (via_ircc_open(0, &info,0x3096) == 0)
if (via_ircc_open(pcidev, &info, 0x3096) == 0)
rc=0;
} else
rc = -ENODEV; //IR not turn on !!!!!
......@@ -264,48 +258,10 @@ static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_devi
return rc;
}
/*
* Function via_ircc_clean ()
*
* Close all configured chips
*
*/
static void via_ircc_clean(void)
{
int i;
IRDA_DEBUG(3, "%s()\n", __func__);
for (i=0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
via_ircc_close(dev_self[i]);
}
}
static void __devexit via_remove_one (struct pci_dev *pdev)
{
IRDA_DEBUG(3, "%s()\n", __func__);
/* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
* to get our driver instance and call directly via_ircc_close().
* See vlsi_ir for details...
* Jean II */
via_ircc_clean();
/* FIXME : This should be in via_ircc_close(), because here we may
* theoritically disable still configured devices :-( - Jean II */
pci_disable_device(pdev);
}
static void __exit via_ircc_cleanup(void)
{
IRDA_DEBUG(3, "%s()\n", __func__);
/* FIXME : This should be redundant, as pci_unregister_driver()
* should call via_remove_one() on each device.
* Jean II */
via_ircc_clean();
/* Cleanup all instances of the driver */
pci_unregister_driver (&via_driver);
}
......@@ -324,12 +280,13 @@ static const struct net_device_ops via_ircc_fir_ops = {
};
/*
* Function via_ircc_open (iobase, irq)
* Function via_ircc_open(pdev, iobase, irq)
*
* Open driver instance
*
*/
static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
unsigned int id)
{
struct net_device *dev;
struct via_ircc_cb *self;
......@@ -337,9 +294,6 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
IRDA_DEBUG(3, "%s()\n", __func__);
if (i >= ARRAY_SIZE(dev_self))
return -ENOMEM;
/* Allocate new instance of the driver */
dev = alloc_irdadev(sizeof(struct via_ircc_cb));
if (dev == NULL)
......@@ -349,13 +303,8 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
self->netdev = dev;
spin_lock_init(&self->lock);
/* FIXME : We should store our driver instance in the PCI layer,
* using pci_set_drvdata(), not in this array.
* See vlsi_ir for details... - Jean II */
/* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
/* Need to store self somewhere */
dev_self[i] = self;
self->index = i;
pci_set_drvdata(pdev, self);
/* Initialize Resource */
self->io.cfg_base = info->cfg_base;
self->io.fir_base = info->fir_base;
......@@ -414,7 +363,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
......@@ -423,7 +372,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
......@@ -455,33 +404,32 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
via_hw_init(self);
return 0;
err_out4:
dma_free_coherent(NULL, self->tx_buff.truesize,
dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
err_out3:
dma_free_coherent(NULL, self->rx_buff.truesize,
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
err_out2:
release_region(self->io.fir_base, self->io.fir_ext);
err_out1:
pci_set_drvdata(pdev, NULL);
free_netdev(dev);
dev_self[i] = NULL;
return err;
}
/*
* Function via_ircc_close (self)
* Function via_remove_one(pdev)
*
* Close driver instance
*
*/
static int via_ircc_close(struct via_ircc_cb *self)
static void __devexit via_remove_one(struct pci_dev *pdev)
{
struct via_ircc_cb *self = pci_get_drvdata(pdev);
int iobase;
IRDA_DEBUG(3, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return -1;);
iobase = self->io.fir_base;
ResetChip(iobase, 5); //hardware reset.
......@@ -493,16 +441,16 @@ static int via_ircc_close(struct via_ircc_cb *self)
__func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
dma_free_coherent(NULL, self->tx_buff.truesize,
dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
self->tx_buff.head, self->tx_buff_dma);
if (self->rx_buff.head)
dma_free_coherent(NULL, self->rx_buff.truesize,
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
self->rx_buff.head, self->rx_buff_dma);
dev_self[self->index] = NULL;
pci_set_drvdata(pdev, NULL);
free_netdev(self->netdev);
return 0;
pci_disable_device(pdev);
}
/*
......
......@@ -442,11 +442,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
int err;
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver. */
if (NULL == d->driver) {
int err;
d->driver = &genphy_driver.driver;
err = d->driver->probe(d);
......@@ -474,7 +474,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
return phy_init_hw(phydev);
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
return err;
}
/**
......
......@@ -433,4 +433,19 @@ config USB_SIERRA_NET
To compile this driver as a module, choose M here: the
module will be called sierra_net.
config USB_VL600
tristate "LG VL600 modem dongle"
depends on USB_NET_CDCETHER
select USB_ACM
help
Select this if you want to use an LG Electronics 4G/LTE usb modem
called VL600. This driver only handles the ethernet
interface exposed by the modem firmware. To establish a connection
you will first need a userspace program that sends the right
command to the modem through its CDC ACM port, and most
likely also a DHCP client. See this thread about using the
4G modem from Verizon:
http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
endmenu
......@@ -27,4 +27,5 @@ obj-$(CONFIG_USB_IPHETH) += ipheth.o
obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
......@@ -378,7 +378,7 @@ static void dumpspeed(struct usbnet *dev, __le32 *speeds)
__le32_to_cpu(speeds[1]) / 1000);
}
static void cdc_status(struct usbnet *dev, struct urb *urb)
void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
......@@ -418,8 +418,9 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
break;
}
}
EXPORT_SYMBOL_GPL(usbnet_cdc_status);
static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status;
struct cdc_state *info = (void *) &dev->data;
......@@ -441,6 +442,7 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
*/
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
static int cdc_manage_power(struct usbnet *dev, int on)
{
......@@ -452,18 +454,18 @@ static const struct driver_info cdc_info = {
.description = "CDC Ethernet Device",
.flags = FLAG_ETHER,
// .check_connect = cdc_check_connect,
.bind = cdc_bind,
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = cdc_status,
.status = usbnet_cdc_status,
.manage_power = cdc_manage_power,
};
static const struct driver_info mbm_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_WWAN,
.bind = cdc_bind,
.bind = usbnet_cdc_bind,
.unbind = usbnet_cdc_unbind,
.status = cdc_status,
.status = usbnet_cdc_status,
.manage_power = cdc_manage_power,
};
......@@ -560,6 +562,13 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
/* LG Electronics VL600 wants additional headers on every frame */
{
USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/*
* WHITELIST!!!
*
......
This diff is collapsed.
......@@ -387,8 +387,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
{
if (dev->driver_info->rx_fixup &&
!dev->driver_info->rx_fixup (dev, skb))
goto error;
!dev->driver_info->rx_fixup (dev, skb)) {
/* With RX_ASSEMBLE, rx_fixup() must update counters */
if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
dev->net->stats.rx_errors++;
goto done;
}
// else network stack removes extra byte if we forced a short packet
if (skb->len) {
......@@ -401,8 +405,8 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
}
netif_dbg(dev, rx_err, dev->net, "drop\n");
error:
dev->net->stats.rx_errors++;
done:
skb_queue_tail(&dev->done, skb);
}
......
......@@ -443,6 +443,7 @@ void atm_dev_signal_change(struct atm_dev *dev, char signal);
void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
/*
* This is approximately the algorithm used by alloc_skb.
......
......@@ -88,8 +88,6 @@ struct cn_queue_dev {
atomic_t refcnt;
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
struct list_head queue_list;
spinlock_t queue_lock;
......@@ -101,20 +99,13 @@ struct cn_callback_id {
struct cb_id id;
};
struct cn_callback_data {
struct sk_buff *skb;
void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
void *free;
};
struct cn_callback_entry {
struct list_head callback_entry;
struct work_struct work;
atomic_t refcnt;
struct cn_queue_dev *pdev;
struct cn_callback_id id;
struct cn_callback_data data;
void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
u32 seq, group;
};
......@@ -138,13 +129,12 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);
int cn_cb_equal(struct cb_id *, struct cb_id *);
void cn_queue_wrapper(struct work_struct *work);
#endif /* __KERNEL__ */
#endif /* __CONNECTOR_H */
......@@ -126,7 +126,7 @@ struct sk_buff;
* GRO uses frags we allocate at least 16 regardless of page size.
*/
#if (65536/PAGE_SIZE + 2) < 16
#define MAX_SKB_FRAGS 16
#define MAX_SKB_FRAGS 16UL
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
#endif
......
......@@ -102,6 +102,7 @@ struct driver_info {
* Affects statistic (counters) and short packet handling.
*/
#define FLAG_MULTI_PACKET 0x1000
#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
......@@ -172,7 +173,9 @@ struct cdc_state {
};
extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
extern void usbnet_cdc_status(struct usbnet *, struct urb *);
/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
......
......@@ -286,5 +286,21 @@ static inline void ipv6_ib_mc_map(const struct in6_addr *addr,
buf[9] = broadcast[9];
memcpy(buf + 10, addr->s6_addr + 6, 10);
}
static inline int ipv6_ipgre_mc_map(const struct in6_addr *addr,
const unsigned char *broadcast, char *buf)
{
if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0) {
memcpy(buf, broadcast, 4);
} else {
/* v4mapped? */
if ((addr->s6_addr32[0] | addr->s6_addr32[1] |
(addr->s6_addr32[2] ^ htonl(0x0000ffff))) != 0)
return -EINVAL;
memcpy(buf, &addr->s6_addr32[3], 4);
}
return 0;
}
#endif
#endif
......@@ -339,6 +339,14 @@ static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, ch
buf[16] = addr & 0x0f;
}
static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
memcpy(buf, broadcast, 4);
else
memcpy(buf, &naddr, sizeof(naddr));
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
......
......@@ -252,6 +252,7 @@ void atm_dev_release_vccs(struct atm_dev *dev)
}
write_unlock_irq(&vcc_sklist_lock);
}
EXPORT_SYMBOL(atm_dev_release_vccs);
static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
......
......@@ -1475,7 +1475,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
ip6h->payload_len == 0)
return 0;
len = ntohs(ip6h->payload_len);
len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
if (skb->len < len)
return -EINVAL;
......
......@@ -213,7 +213,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
/* user has chosen a value so keep it */
if (br->flags & BR_SET_MAC_ADDR)
return;
return false;
list_for_each_entry(p, &br->port_list, list) {
if (addr == br_mac_zero ||
......
......@@ -1454,6 +1454,27 @@ static inline void net_timestamp_check(struct sk_buff *skb)
__net_timestamp(skb);
}
static inline bool is_skb_forwardable(struct net_device *dev,
struct sk_buff *skb)
{
unsigned int len;
if (!(dev->flags & IFF_UP))
return false;
len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
if (skb->len <= len)
return true;
/* if TSO is enabled, we don't care about the length as the packet
* could be forwarded without being segmented before
*/
if (skb_is_gso(skb))
return true;
return false;
}
/**
* dev_forward_skb - loopback an skb to another netif
*
......@@ -1477,8 +1498,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb_orphan(skb);
nf_reset(skb);
if (unlikely(!(dev->flags & IFF_UP) ||
(skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN)))) {
if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
......
......@@ -215,6 +215,9 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
case ARPHRD_INFINIBAND:
ip_ib_mc_map(addr, dev->broadcast, haddr);
return 0;
case ARPHRD_IPGRE:
ip_ipgre_mc_map(addr, dev->broadcast, haddr);
return 0;
default:
if (dir) {
memcpy(haddr, dev->broadcast, dev->addr_len);
......
......@@ -1068,6 +1068,7 @@ static void ip_fib_net_exit(struct net *net)
fib4_rules_exit(net);
#endif
rtnl_lock();
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct fib_table *tb;
struct hlist_head *head;
......@@ -1080,6 +1081,7 @@ static void ip_fib_net_exit(struct net *net)
fib_free_table(tb);
}
}
rtnl_unlock();
kfree(net->ipv4.fib_table_hash);
}
......
......@@ -341,6 +341,8 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d
case ARPHRD_INFINIBAND:
ipv6_ib_mc_map(addr, dev->broadcast, buf);
return 0;
case ARPHRD_IPGRE:
return ipv6_ipgre_mc_map(addr, dev->broadcast, buf);
default:
if (dir) {
memcpy(buf, dev->broadcast, dev->addr_len);
......
......@@ -1205,7 +1205,7 @@ SCTP_STATIC __init int sctp_init(void)
if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_assoc_hashtable = (struct sctp_hashbucket *)
__get_free_pages(GFP_ATOMIC, order);
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
pr_err("Failed association hash alloc\n");
......@@ -1238,7 +1238,7 @@ SCTP_STATIC __init int sctp_init(void)
if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
__get_free_pages(GFP_ATOMIC, order);
__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
pr_err("Failed bind hash alloc\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment