Commit 35b7a191 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-drivers-pgcnt'

Eric Dumazet says:

====================
net: fix races accessing page->_count

This is illegal to use atomic_set(&page->_count, ...) even if we 'own'
the page. Other entities in the kernel need to use get_page_unless_zero()
to get a reference to the page before testing page properties, so we could
loose a refcount increment.

The only case it is valid is when page->_count is 0, we can use this in
__netdev_alloc_frag()

Note that I never seen crashes caused by these races, the issue was reported
by Andres Lagar-Cavilla and Hugh Dickins.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1fadee0c 4c450583
...@@ -219,11 +219,10 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, ...@@ -219,11 +219,10 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= FM10K_RX_BUFSZ; rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
/* since we are the only owner of the page and we need to /* Even if we own the page, we are not allowed to use atomic_set()
* increment it, just set the value to 2 in order to avoid * This would break get_page_unless_zero() users.
* an unnecessary locked operation
*/ */
atomic_set(&page->_count, 2); atomic_inc(&page->_count);
#else #else
/* move offset up to the next cache line */ /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; rx_buffer->page_offset += truesize;
......
...@@ -6545,11 +6545,10 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, ...@@ -6545,11 +6545,10 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ; rx_buffer->page_offset ^= IGB_RX_BUFSZ;
/* since we are the only owner of the page and we need to /* Even if we own the page, we are not allowed to use atomic_set()
* increment it, just set the value to 2 in order to avoid * This would break get_page_unless_zero() users.
* an unnecessary locked operation
*/ */
atomic_set(&page->_count, 2); atomic_inc(&page->_count);
#else #else
/* move offset up to the next cache line */ /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; rx_buffer->page_offset += truesize;
......
...@@ -1865,12 +1865,10 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, ...@@ -1865,12 +1865,10 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= truesize; rx_buffer->page_offset ^= truesize;
/* /* Even if we own the page, we are not allowed to use atomic_set()
* since we are the only owner of the page and we need to * This would break get_page_unless_zero() users.
* increment it, just set the value to 2 in order to avoid
* an unecessary locked operation
*/ */
atomic_set(&page->_count, 2); atomic_inc(&page->_count);
#else #else
/* move offset up to the next cache line */ /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; rx_buffer->page_offset += truesize;
......
...@@ -76,10 +76,10 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, ...@@ -76,10 +76,10 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
page_alloc->dma = dma; page_alloc->dma = dma;
page_alloc->page_offset = frag_info->frag_align; page_alloc->page_offset = frag_info->frag_align;
/* Not doing get_page() for each frag is a big win /* Not doing get_page() for each frag is a big win
* on asymetric workloads. * on asymetric workloads. Note we can not use atomic_set().
*/ */
atomic_set(&page->_count, atomic_add(page_alloc->page_size / frag_info->frag_stride - 1,
page_alloc->page_size / frag_info->frag_stride); &page->_count);
return 0; return 0;
} }
......
...@@ -360,18 +360,29 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -360,18 +360,29 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
goto end; goto end;
} }
nc->frag.size = PAGE_SIZE << order; nc->frag.size = PAGE_SIZE << order;
recycle: /* Even if we own the page, we do not use atomic_set().
atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); * This would break get_page_unless_zero() users.
*/
atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
&nc->frag.page->_count);
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
nc->frag.offset = 0; nc->frag.offset = 0;
} }
if (nc->frag.offset + fragsz > nc->frag.size) { if (nc->frag.offset + fragsz > nc->frag.size) {
/* avoid unnecessary locked operations if possible */ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || if (!atomic_sub_and_test(nc->pagecnt_bias,
atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) &nc->frag.page->_count))
goto recycle; goto refill;
goto refill; /* OK, page count is 0, we can safely set it */
atomic_set(&nc->frag.page->_count,
NETDEV_PAGECNT_MAX_BIAS);
} else {
atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
&nc->frag.page->_count);
}
nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
nc->frag.offset = 0;
} }
data = page_address(nc->frag.page) + nc->frag.offset; data = page_address(nc->frag.page) + nc->frag.offset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment