Commit 83b7b77a authored by David S. Miller's avatar David S. Miller

Merge branch 'netif_rx-conversions-part2'

Sebastian Andrzej Siewior says:

====================
net: Convert user to netif_rx(), part 2.

This is the second batch of converting netif_rx_ni() caller to
netif_rx(). The change making this possible is net-next and
netif_rx_ni() is a wrapper around netif_rx(). This is a clean up in
order to remove netif_rx_ni().

The brcmfmac changes are slilghtly larger because the inirq parameter
can be removed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4c22aac3 f9834dbd
......@@ -154,7 +154,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
netif_rx_ni(skb);
netif_rx(skb);
restart:
netdev_dbg(dev, "restarted\n");
......
......@@ -221,7 +221,7 @@ static void slc_bump(struct slcan *sl)
if (!(cf.can_id & CAN_RTR_FLAG))
sl->dev->stats.rx_bytes += cf.len;
netif_rx_ni(skb);
netif_rx(skb);
}
/* parse tty input stream */
......
......@@ -356,7 +356,7 @@ static void hi3110_hw_rx(struct spi_device *spi)
can_led_event(priv->net, CAN_LED_EVENT_RX);
netif_rx_ni(skb);
netif_rx(skb);
}
static void hi3110_hw_sleep(struct spi_device *spi)
......@@ -677,7 +677,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
netif_rx_ni(skb);
netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
can_bus_off(net);
......@@ -718,7 +718,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
netdev_dbg(priv->net, "Bus Error\n");
netif_rx_ni(skb);
netif_rx(skb);
}
}
......
......@@ -740,7 +740,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
can_led_event(priv->net, CAN_LED_EVENT_RX);
netif_rx_ni(skb);
netif_rx(skb);
}
static void mcp251x_hw_sleep(struct spi_device *spi)
......@@ -987,7 +987,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
if (skb) {
frame->can_id |= can_id;
frame->data[1] = data1;
netif_rx_ni(skb);
netif_rx(skb);
} else {
netdev_err(net, "cannot allocate error skb\n");
}
......
......@@ -80,7 +80,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx_ni(skb);
netif_rx(skb);
}
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
......
......@@ -63,7 +63,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->ip_summed = CHECKSUM_UNNECESSARY;
len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
peerstats = &peer->stats;
......
......@@ -286,7 +286,7 @@ static void mctp_serial_rx(struct mctp_serial *dev)
cb = __mctp_cb(skb);
cb->halen = 0;
netif_rx_ni(skb);
netif_rx(skb);
dev->netdev->stats.rx_packets++;
dev->netdev->stats.rx_bytes += dev->rxlen;
}
......
......@@ -886,7 +886,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
if (shhwtstamps)
netif_rx_ni(skb);
netif_rx(skb);
}
static void decode_txts(struct dp83640_private *dp83640,
......@@ -1329,7 +1329,7 @@ static void rx_timestamp_work(struct work_struct *work)
break;
}
netif_rx_ni(skb);
netif_rx(skb);
}
if (!skb_queue_empty(&dp83640->rx_queue))
......@@ -1380,7 +1380,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
} else {
netif_rx_ni(skb);
netif_rx(skb);
}
return true;
......
......@@ -1212,7 +1212,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
ts.tv_sec--;
shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
netif_rx_ni(skb);
netif_rx(skb);
return true;
}
......
......@@ -478,7 +478,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
shhwtstamps_rx = skb_hwtstamps(skb);
shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
netif_rx_ni(skb);
netif_rx(skb);
}
if (priv->extts) {
......
......@@ -676,7 +676,7 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
case PLIP_PK_DONE:
/* Inform the upper layer for the arrival of a packet. */
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
netif_rx_ni(rcv->skb);
netif_rx(rcv->skb);
dev->stats.rx_bytes += rcv->length.h;
dev->stats.rx_packets++;
rcv->skb = NULL;
......
......@@ -368,7 +368,7 @@ static void sl_bump(struct slip *sl)
skb_put_data(skb, sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
netif_rx_ni(skb);
netif_rx(skb);
dev->stats.rx_packets++;
}
......
......@@ -839,7 +839,7 @@ static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
skb->protocol = eth_type_trans(skb, skb->dev);
netif_rx_ni(skb);
netif_rx(skb);
}
static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
......
......@@ -958,7 +958,7 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
if (gro)
napi_gro_receive(&wil->napi_rx, skb);
else
netif_rx_ni(skb);
netif_rx(skb);
}
ndev->stats.rx_packets++;
stats->rx_packets++;
......
......@@ -1199,7 +1199,7 @@ static void wmi_evt_eapol_rx(struct wil6210_vif *vif, int id, void *d, int len)
eth->h_proto = cpu_to_be16(ETH_P_PAE);
skb_put_data(skb, evt->eapol, eapol_len);
skb->protocol = eth_type_trans(skb, ndev);
if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += sz;
if (stats) {
......
......@@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
}
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
struct sk_buff *skb, bool inirq)
struct sk_buff *skb)
{
brcmf_fws_rxreorder(ifp, skb, inirq);
brcmf_fws_rxreorder(ifp, skb);
}
static void
......
......@@ -400,7 +400,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
* STA connects to the AP interface. This is an obsoleted standard most
......@@ -423,15 +423,7 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
if (inirq) {
netif_rx(skb);
} else {
/* If the receive is not processed inside an ISR,
* the softirqd must be woken explicitly to service
* the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
*/
netif_rx_ni(skb);
}
netif_rx(skb);
}
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
......@@ -480,7 +472,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
brcmf_netif_rx(ifp, skb, false);
brcmf_netif_rx(ifp, skb);
}
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
......@@ -515,7 +507,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
return;
if (brcmf_proto_is_reorder_skb(skb)) {
brcmf_proto_rxreorder(ifp, skb, inirq);
brcmf_proto_rxreorder(ifp, skb);
} else {
/* Process special event packets */
if (handle_event) {
......@@ -524,7 +516,7 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
brcmf_fweh_process_skb(ifp->drvr, skb,
BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
}
brcmf_netif_rx(ifp, skb, inirq);
brcmf_netif_rx(ifp, skb);
}
}
......
......@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_detach(struct net_device *ndev, bool locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
......
......@@ -1664,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
rfi->pend_pkts -= skb_queue_len(skb_list);
}
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
{
struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
......@@ -1682,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
/* validate flags and flow id */
if (flags == 0xFF) {
bphy_err(drvr, "invalid flags...so ignore this packet\n");
brcmf_netif_rx(ifp, pkt, inirq);
brcmf_netif_rx(ifp, pkt);
return;
}
......@@ -1694,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
brcmf_netif_rx(ifp, pkt, inirq);
brcmf_netif_rx(ifp, pkt);
return;
}
......@@ -1719,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
brcmf_netif_rx(ifp, pkt, inirq);
brcmf_netif_rx(ifp, pkt);
return;
}
......@@ -1833,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
brcmf_netif_rx(ifp, pkt, inirq);
brcmf_netif_rx(ifp, pkt);
}
}
......
......@@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
#endif /* FWSIGNAL_H_ */
......@@ -536,8 +536,7 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
bool inirq)
static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
}
......@@ -1191,7 +1190,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
brcmf_netif_rx(ifp, skb, false);
brcmf_netif_rx(ifp, skb);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
......
......@@ -32,7 +32,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
void (*add_if)(struct brcmf_if *ifp);
void (*del_if)(struct brcmf_if *ifp);
void (*reset_if)(struct brcmf_if *ifp);
......@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
}
static inline void
brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
{
ifp->drvr->proto->rxreorder(ifp, skb, inirq);
ifp->drvr->proto->rxreorder(ifp, skb);
}
static inline void
......
......@@ -147,7 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
netif_rx_any_context(skb);
netif_rx(skb);
ret = 0;
done:
......@@ -262,7 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
netif_rx_any_context(skb);
netif_rx(skb);
ret = 0;
......
......@@ -350,7 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* Forward multicast/broadcast packet to upper layer*/
netif_rx_any_context(skb);
netif_rx(skb);
return 0;
}
......
......@@ -488,7 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
netif_rx_any_context(skb);
netif_rx(skb);
return 0;
}
......
......@@ -284,7 +284,7 @@ int can_send(struct sk_buff *skb, int loop)
}
if (newskb)
netif_rx_ni(newskb);
netif_rx(newskb);
/* update statistics */
pkg_stats->tx_frames++;
......
......@@ -2153,7 +2153,7 @@ void cfg80211_send_layer2_update(struct net_device *dev, const u8 *addr)
skb->dev = dev;
skb->protocol = eth_type_trans(skb, dev);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx_ni(skb);
netif_rx(skb);
}
EXPORT_SYMBOL(cfg80211_send_layer2_update);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment