Commit 851b10d6 authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by David S. Miller

net/mlx4_en: do batched put_page using atomic_sub

This patch fixes couple error paths after allocation failures.
Atomic set of page reference counter is safe only if it is zero,
otherwise set can race with any speculative get_page_unless_zero.
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 04aeb56a
......@@ -126,7 +126,9 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
set_page_count(page, 1);
/* Revert changes done by mlx4_alloc_pages */
page_ref_sub(page, page_alloc[i].page_size /
priv->frag_info[i].frag_stride - 1);
put_page(page);
}
}
......@@ -176,7 +178,9 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
dma_unmap_page(priv->ddev, page_alloc->dma,
page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
set_page_count(page, 1);
/* Revert changes done by mlx4_alloc_pages */
page_ref_sub(page, page_alloc->page_size /
priv->frag_info[i].frag_stride - 1);
put_page(page);
page_alloc->page = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment