Commit d122179a authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	net/core/ethtool.c
parents 419c2046 b00916b1
......@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
{
struct atm_dev *dev;
IADEV *iadev;
unsigned long flags;
int ret;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
......@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
ia_dev[iadev_count] = iadev;
_ia_dev[iadev_count] = dev;
iadev_count++;
spin_lock_init(&iadev->misc_lock);
/* First fixes first. I don't want to think about this now. */
spin_lock_irqsave(&iadev->misc_lock, flags);
if (ia_init(dev) || ia_start(dev)) {
IF_INIT(printk("IA register failed!\n");)
iadev_count--;
ia_dev[iadev_count] = NULL;
_ia_dev[iadev_count] = NULL;
spin_unlock_irqrestore(&iadev->misc_lock, flags);
ret = -EINVAL;
goto err_out_deregister_dev;
}
spin_unlock_irqrestore(&iadev->misc_lock, flags);
IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
iadev->next_board = ia_boards;
......
......@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
struct dle_q rx_dle_q;
struct free_desc_q *rx_free_desc_qhead;
struct sk_buff_head rx_dma_q;
spinlock_t rx_lock, misc_lock;
spinlock_t rx_lock;
struct atm_vcc **rx_open; /* list of all open VCs */
u16 num_rx_desc, rx_buf_sz, rxing;
u32 rx_pkt_ram, rx_tmp_cnt;
......
......@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
struct solos_card *card = atmdev->dev_data;
struct sk_buff *skb;
unsigned int len;
spin_lock(&card->cli_queue_lock);
skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
......@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
if(skb == NULL)
return sprintf(buf, "No data.\n");
memcpy(buf, skb->data, skb->len);
dev_dbg(&card->dev->dev, "len: %d\n", skb->len);
len = skb->len;
memcpy(buf, skb->data, len);
dev_dbg(&card->dev->dev, "len: %d\n", len);
kfree_skb(skb);
return skb->len;
return len;
}
static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
......
......@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
}
else if(callid>=0x0000 && callid<=0x7FFF)
{
int len;
pr_debug("%s: Got Incoming Call\n",
sc_adapter[card]->devicename);
strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
strcpy(setup.eazmsn,
sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
sizeof(setup.phone));
if (len >= sizeof(setup.phone))
continue;
len = strlcpy(setup.eazmsn,
sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
sizeof(setup.eazmsn));
if (len >= sizeof(setup.eazmsn))
continue;
setup.si1 = 7;
setup.si2 = 0;
setup.plan = 0;
......@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
* Handle a GetMyNumber Rsp
*/
if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
rcvmsg.msg_data.byte_array,
sizeof(rcvmsg.msg_data.byte_array));
continue;
}
......
......@@ -1311,6 +1311,9 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_mii_init;
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(ndev);
ret = register_netdev(ndev);
if (ret)
goto failed_register;
......
......@@ -1217,7 +1217,8 @@ static void rtl8169_update_counters(struct net_device *dev)
if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
return;
counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
&paddr, GFP_KERNEL);
if (!counters)
return;
......@@ -1238,7 +1239,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
paddr);
}
static void rtl8169_get_ethtool_stats(struct net_device *dev,
......@@ -3298,15 +3300,15 @@ static int rtl8169_open(struct net_device *dev)
/*
* Rx and Tx desscriptors needs 256 bytes alignment.
* pci_alloc_consistent provides more.
* dma_alloc_coherent provides more.
*/
tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr);
tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr, GFP_KERNEL);
if (!tp->TxDescArray)
goto err_pm_runtime_put;
tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr);
tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr, GFP_KERNEL);
if (!tp->RxDescArray)
goto err_free_tx_0;
......@@ -3340,12 +3342,12 @@ static int rtl8169_open(struct net_device *dev)
err_release_ring_2:
rtl8169_rx_clear(tp);
err_free_rx_1:
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
tp->RxDescArray = NULL;
err_free_tx_0:
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
tp->TxDescArray = NULL;
err_pm_runtime_put:
pm_runtime_put_noidle(&pdev->dev);
......@@ -3981,7 +3983,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp,
{
struct pci_dev *pdev = tp->pci_dev;
pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(*sk_buff);
*sk_buff = NULL;
......@@ -4006,7 +4008,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
struct net_device *dev,
struct RxDesc *desc, int rx_buf_sz,
unsigned int align)
unsigned int align, gfp_t gfp)
{
struct sk_buff *skb;
dma_addr_t mapping;
......@@ -4014,13 +4016,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
pad = align ? align : NET_IP_ALIGN;
skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
if (!skb)
goto err_out;
skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
PCI_DMA_FROMDEVICE);
rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
......@@ -4045,7 +4047,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
}
static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
u32 start, u32 end)
u32 start, u32 end, gfp_t gfp)
{
u32 cur;
......@@ -4060,7 +4062,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
tp->RxDescArray + i,
tp->rx_buf_sz, tp->align);
tp->rx_buf_sz, tp->align, gfp);
if (!skb)
break;
......@@ -4088,7 +4090,7 @@ static int rtl8169_init_ring(struct net_device *dev)
memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
goto err_out;
rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
......@@ -4105,7 +4107,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb,
{
unsigned int len = tx_skb->len;
pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
PCI_DMA_TODEVICE);
desc->opts1 = 0x00;
desc->opts2 = 0x00;
desc->addr = 0x00;
......@@ -4249,7 +4252,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
txd = tp->TxDescArray + entry;
len = frag->size;
addr = ((void *) page_address(frag->page)) + frag->page_offset;
mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
PCI_DMA_TODEVICE);
/* anti gcc 2.95.3 bugware (sic) */
status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
......@@ -4319,7 +4323,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].skb = skb;
}
mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
PCI_DMA_TODEVICE);
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
......@@ -4482,8 +4487,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
if (!skb)
goto out;
pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
*sk_buff = skb;
done = true;
......@@ -4552,11 +4557,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
}
if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
pci_dma_sync_single_for_device(pdev, addr,
dma_sync_single_for_device(&pdev->dev, addr,
pkt_size, PCI_DMA_FROMDEVICE);
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
pci_unmap_single(pdev, addr, tp->rx_buf_sz,
dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL;
}
......@@ -4587,7 +4592,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
if (!delta && count)
netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
......@@ -4773,10 +4778,10 @@ static int rtl8169_close(struct net_device *dev)
free_irq(dev->irq, dev);
pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
tp->TxDescArray = NULL;
tp->RxDescArray = NULL;
......
......@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
int i, result;
struct device *dev = i2400m_dev(i2400m);
const struct i2400m_msg_hdr *msg_hdr;
size_t pl_itr, pl_size, skb_len;
size_t pl_itr, pl_size;
unsigned long flags;
unsigned num_pls, single_last;
unsigned num_pls, single_last, skb_len;
skb_len = skb->len;
d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
i2400m, skb, skb_len);
result = -EIO;
msg_hdr = (void *) skb->data;
result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
if (result < 0)
goto error_msg_hdr_check;
result = -EIO;
......@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
num_pls * sizeof(msg_hdr->pld[0]);
pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
if (pl_itr > skb->len) { /* got all the payload descriptors? */
if (pl_itr > skb_len) { /* got all the payload descriptors? */
dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
"%u payload descriptors (%zu each, total %zu)\n",
skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
goto error_pl_descr_short;
}
/* Walk each payload payload--check we really got it */
......@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
/* work around old gcc warnings */
pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
pl_itr, skb->len);
pl_itr, skb_len);
if (result < 0)
goto error_pl_descr_check;
single_last = num_pls == 1 || i == num_pls - 1;
......@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
if (i < i2400m->rx_pl_min)
i2400m->rx_pl_min = i;
i2400m->rx_num++;
i2400m->rx_size_acc += skb->len;
if (skb->len < i2400m->rx_size_min)
i2400m->rx_size_min = skb->len;
if (skb->len > i2400m->rx_size_max)
i2400m->rx_size_max = skb->len;
i2400m->rx_size_acc += skb_len;
if (skb_len < i2400m->rx_size_min)
i2400m->rx_size_min = skb_len;
if (skb_len > i2400m->rx_size_max)
i2400m->rx_size_max = skb_len;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
error_pl_descr_check:
error_pl_descr_short:
error_msg_hdr_check:
d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
i2400m, skb, skb_len, result);
return result;
}
......
......@@ -161,12 +161,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l
{
struct sk_buff *skb;
release_sock(sk);
if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
lock_sock(sk);
if (!skb && *err)
return NULL;
*err = sock_error(sk);
if (*err)
goto out;
if (sk->sk_shutdown) {
*err = -ECONNRESET;
goto out;
}
return skb;
out:
kfree_skb(skb);
return NULL;
}
int bt_err(__u16 code);
......
......@@ -778,7 +778,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
eg->packets_rcvd++;
mpc->eg_ops->put(eg);
memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data));
memset(ATM_SKB(new_skb), 0, sizeof(struct atm_skb_data));
netif_rx(new_skb);
}
......
......@@ -1441,33 +1441,23 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
static void l2cap_streaming_send(struct sock *sk)
{
struct sk_buff *skb, *tx_skb;
struct sk_buff *skb;
struct l2cap_pinfo *pi = l2cap_pi(sk);
u16 control, fcs;
while ((skb = sk->sk_send_head)) {
tx_skb = skb_clone(skb, GFP_ATOMIC);
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
if (pi->fcs == L2CAP_FCS_CRC16) {
fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
put_unaligned_le16(fcs, skb->data + skb->len - 2);
}
l2cap_do_send(sk, tx_skb);
l2cap_do_send(sk, skb);
pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
if (skb_queue_is_last(TX_QUEUE(sk), skb))
sk->sk_send_head = NULL;
else
sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
skb = skb_dequeue(TX_QUEUE(sk));
kfree_skb(skb);
}
}
......@@ -1960,6 +1950,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
switch (optname) {
case L2CAP_OPTIONS:
if (sk->sk_state == BT_CONNECTED) {
err = -EINVAL;
break;
}
opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
......@@ -2771,10 +2766,10 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
case L2CAP_CONF_MTU:
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
pi->omtu = L2CAP_DEFAULT_MIN_MTU;
pi->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
pi->omtu = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
pi->imtu = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
break;
case L2CAP_CONF_FLUSH_TO:
......@@ -3071,6 +3066,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
return 0;
}
static inline void set_default_fcs(struct l2cap_pinfo *pi)
{
/* FCS is enabled only in ERTM or streaming mode, if one or both
* sides request it.
*/
if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
pi->fcs = L2CAP_FCS_NONE;
else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
pi->fcs = L2CAP_FCS_CRC16;
}
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
......@@ -3088,14 +3094,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (!sk)
return -ENOENT;
if (sk->sk_state != BT_CONFIG) {
struct l2cap_cmd_rej rej;
rej.reason = cpu_to_le16(0x0002);
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
sizeof(rej), &rej);
if (sk->sk_state == BT_DISCONN)
goto unlock;
}
/* Reject if config buffer is too small. */
len = cmd_len - sizeof(*req);
......@@ -3135,9 +3135,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto unlock;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
set_default_fcs(l2cap_pi(sk));
sk->sk_state = BT_CONNECTED;
......@@ -3225,9 +3223,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
set_default_fcs(l2cap_pi(sk));
sk->sk_state = BT_CONNECTED;
l2cap_pi(sk)->next_tx_seq = 0;
......
......@@ -82,11 +82,14 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk);
if (err)
......@@ -108,6 +111,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
}
bh_unlock_sock(sk);
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
......
......@@ -355,7 +355,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
GFP_USER);
if (!rule_buf)
return -ENOMEM;
......@@ -404,7 +404,7 @@ static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
(KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
return -ENOMEM;
full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
indir = kmalloc(full_size, GFP_USER);
indir = kzalloc(full_size, GFP_USER);
if (!indir)
return -ENOMEM;
......@@ -579,7 +579,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
gstrings.len = ret;
data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
if (!data)
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment