Commit 75ef7cdd authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  net: bug fix for vlan + gro issue
  tc35815: Remove a wrong netif_wake_queue() call which triggers BUG_ON
  cdc_ether: new PID for Ericsson C3607w to the whitelist (resubmit)
  IPv6: better document max_addresses parameter
  MAINTAINERS: update mv643xx_eth maintenance status
  e1000: Fix DMA mapping error handling on RX
  iwlwifi: sanity check before counting number of tfds can be free
  iwlwifi: error checking for number of tfds in queue
  iwlwifi: set HT flags after channel in rxon
parents be64c970 c4d49794
......@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
Default: 5
max_addresses - INTEGER
Number of maximum addresses per interface. 0 disables limitation.
It is recommended not set too large value (or 0) because it would
be too easy way to crash kernel to allow to create too much of
autoconfigured addresses.
Maximum number of autoconfigured addresses per interface. Setting
to zero disables the limitation. It is not recommended to set this
value too large (or to zero) because it would be an easy way to
crash the kernel by allowing too many addresses to be created.
Default: 16
disable_ipv6 - BOOLEAN
......
......@@ -3489,9 +3489,9 @@ S: Maintained
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
M: Lennert Buytenhek <buytenh@marvell.com>
M: Lennert Buytenhek <buytenh@wantstofly.org>
L: netdev@vger.kernel.org
S: Supported
S: Maintained
F: drivers/net/mv643xx_eth.*
F: include/linux/mv643xx.h
......
......@@ -4006,11 +4006,21 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
}
}
if (!buffer_info->dma)
if (!buffer_info->dma) {
buffer_info->dma = pci_map_page(pdev,
buffer_info->page, 0,
buffer_info->length,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
put_page(buffer_info->page);
dev_kfree_skb(skb);
buffer_info->page = NULL;
buffer_info->skb = NULL;
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
}
}
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
......@@ -4101,6 +4111,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
skb->data,
buffer_info->length,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->dma = 0;
adapter->alloc_rx_buff_failed++;
break; /* while !buffer_info->skb */
}
/*
* XXX if it was allocated cleanly it will never map to a
......
......@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
/* Transmit complete. */
lp->lstats.tx_ints++;
tc35815_txdone(dev);
netif_wake_queue(dev);
if (ret < 0)
ret = 0;
}
......
......@@ -583,6 +583,11 @@ static const struct usb_device_id products [] = {
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &mbm_info,
}, {
/* Ericsson C3607w ver 2 */
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &mbm_info,
}, {
/* Toshiba F3507g */
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
......
......@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
......
......@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
scd_ssn , index, txq_id, txq->swq_id);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
......@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
iwl_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
......
......@@ -2744,8 +2744,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
priv->staging_rxon.flags = 0;
iwl_set_rxon_ht(priv, ht_conf);
iwl_set_rxon_channel(priv, conf->channel);
iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, conf->channel->band);
spin_unlock_irqrestore(&priv->lock, flags);
......
......@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
......
......@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed)
{
if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
else {
IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
priv->stations[sta_id].tid[tid].tfds_in_queue,
freed);
priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
}
}
EXPORT_SYMBOL(iwl_free_tfds_in_queue);
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
......@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
int nfreed = 0;
struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
......@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
tx_info = &txq->txb[txq->q.read_ptr];
iwl_tx_status(priv, tx_info->skb[0]);
hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
if (hdr && ieee80211_is_data_qos(hdr->frame_control))
nfreed++;
tx_info->skb[0] = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
nfreed++;
}
return nfreed;
}
......@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
......
......@@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
switch (ret) {
case GRO_NORMAL:
case GRO_HELD:
skb->protocol = eth_type_trans(skb, napi->dev);
skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_HELD)
skb_gro_pull(skb, -ETH_HLEN);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment