Commit 1829b086 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

benet: use GFP_KERNEL allocations when possible

Extend be_alloc_pages() with a gfp parameter, so that we use GFP_KERNEL
allocations instead of GFP_ATOMIC when not running in softirq context.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Acked-by: default avatarAjit Khaparde <ajit.khaparde@emulex.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6b8a66ee
...@@ -1169,20 +1169,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) ...@@ -1169,20 +1169,20 @@ static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
} }
static inline struct page *be_alloc_pages(u32 size) static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
{ {
gfp_t alloc_flags = GFP_ATOMIC;
u32 order = get_order(size); u32 order = get_order(size);
if (order > 0) if (order > 0)
alloc_flags |= __GFP_COMP; gfp |= __GFP_COMP;
return alloc_pages(alloc_flags, order); return alloc_pages(gfp, order);
} }
/* /*
* Allocate a page, split it to fragments of size rx_frag_size and post as * Allocate a page, split it to fragments of size rx_frag_size and post as
* receive buffers to BE * receive buffers to BE
*/ */
static void be_post_rx_frags(struct be_rx_obj *rxo) static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
{ {
struct be_adapter *adapter = rxo->adapter; struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
...@@ -1196,7 +1196,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo) ...@@ -1196,7 +1196,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
page_info = &rxo->page_info_tbl[rxq->head]; page_info = &rxo->page_info_tbl[rxq->head];
for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
if (!pagep) { if (!pagep) {
pagep = be_alloc_pages(adapter->big_page_size); pagep = be_alloc_pages(adapter->big_page_size, gfp);
if (unlikely(!pagep)) { if (unlikely(!pagep)) {
rxo->stats.rx_post_fail++; rxo->stats.rx_post_fail++;
break; break;
...@@ -1753,7 +1753,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget) ...@@ -1753,7 +1753,7 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
/* Refill the queue */ /* Refill the queue */
if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
be_post_rx_frags(rxo); be_post_rx_frags(rxo, GFP_ATOMIC);
/* All consumed */ /* All consumed */
if (work_done < budget) { if (work_done < budget) {
...@@ -1890,7 +1890,7 @@ static void be_worker(struct work_struct *work) ...@@ -1890,7 +1890,7 @@ static void be_worker(struct work_struct *work)
if (rxo->rx_post_starved) { if (rxo->rx_post_starved) {
rxo->rx_post_starved = false; rxo->rx_post_starved = false;
be_post_rx_frags(rxo); be_post_rx_frags(rxo, GFP_KERNEL);
} }
} }
if (!adapter->ue_detected && !lancer_chip(adapter)) if (!adapter->ue_detected && !lancer_chip(adapter))
...@@ -2138,7 +2138,7 @@ static int be_open(struct net_device *netdev) ...@@ -2138,7 +2138,7 @@ static int be_open(struct net_device *netdev)
u16 link_speed; u16 link_speed;
for_all_rx_queues(adapter, rxo, i) { for_all_rx_queues(adapter, rxo, i) {
be_post_rx_frags(rxo); be_post_rx_frags(rxo, GFP_KERNEL);
napi_enable(&rxo->rx_eq.napi); napi_enable(&rxo->rx_eq.napi);
} }
napi_enable(&tx_eq->napi); napi_enable(&tx_eq->napi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment