Commit 51c56b00 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: remove k{un}map_skb_frag()

Since commit 3e4d3af5 (mm: stack based kmap_atomic()) we dont have
to disable BH anymore while mapping skb frags.

We can remove kmap_skb_frag() / kunmap_skb_frag() helpers and use
kmap_atomic() / kunmap_atomic()
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 109d2446
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
#include <net/tcp_states.h> #include <net/tcp_states.h>
#include <net/route.h> #include <net/route.h>
#include <linux/atalk.h> #include <linux/atalk.h>
#include "../core/kmap_skb.h" #include <linux/highmem.h>
struct datalink_proto *ddp_dl, *aarp_dl; struct datalink_proto *ddp_dl, *aarp_dl;
static const struct proto_ops atalk_dgram_ops; static const struct proto_ops atalk_dgram_ops;
...@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset, ...@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(frag); vaddr = kmap_atomic(skb_frag_page(frag));
sum = atalk_sum_partial(vaddr + frag->page_offset + sum = atalk_sum_partial(vaddr + frag->page_offset +
offset - start, copy, sum); offset - start, copy, sum);
kunmap_skb_frag(vaddr); kunmap_atomic(vaddr);
if (!(len -= copy)) if (!(len -= copy))
return sum; return sum;
......
#include <linux/highmem.h>
static inline void *kmap_skb_frag(const skb_frag_t *frag)
{
#ifdef CONFIG_HIGHMEM
BUG_ON(in_irq());
local_bh_disable();
#endif
return kmap_atomic(skb_frag_page(frag));
}
static inline void kunmap_skb_frag(void *vaddr)
{
kunmap_atomic(vaddr);
#ifdef CONFIG_HIGHMEM
local_bh_enable();
#endif
}
...@@ -68,8 +68,7 @@ ...@@ -68,8 +68,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <trace/events/skb.h> #include <trace/events/skb.h>
#include <linux/highmem.h>
#include "kmap_skb.h"
static struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly;
...@@ -708,10 +707,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) ...@@ -708,10 +707,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
} }
return -ENOMEM; return -ENOMEM;
} }
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); vaddr = kmap_atomic(skb_frag_page(f));
memcpy(page_address(page), memcpy(page_address(page),
vaddr + f->page_offset, skb_frag_size(f)); vaddr + f->page_offset, skb_frag_size(f));
kunmap_skb_frag(vaddr); kunmap_atomic(vaddr);
page->private = (unsigned long)head; page->private = (unsigned long)head;
head = page; head = page;
} }
...@@ -1486,21 +1485,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) ...@@ -1486,21 +1485,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len); WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); end = start + skb_frag_size(f);
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
u8 *vaddr; u8 *vaddr;
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); vaddr = kmap_atomic(skb_frag_page(f));
memcpy(to, memcpy(to,
vaddr + skb_shinfo(skb)->frags[i].page_offset+ vaddr + f->page_offset + offset - start,
offset - start, copy); copy);
kunmap_skb_frag(vaddr); kunmap_atomic(vaddr);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return 0; return 0;
...@@ -1805,10 +1805,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) ...@@ -1805,10 +1805,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(frag); vaddr = kmap_atomic(skb_frag_page(frag));
memcpy(vaddr + frag->page_offset + offset - start, memcpy(vaddr + frag->page_offset + offset - start,
from, copy); from, copy);
kunmap_skb_frag(vaddr); kunmap_atomic(vaddr);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return 0; return 0;
...@@ -1868,21 +1868,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, ...@@ -1868,21 +1868,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end; int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len); WARN_ON(start > offset + len);
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) { if ((copy = end - offset) > 0) {
__wsum csum2; __wsum csum2;
u8 *vaddr; u8 *vaddr;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(frag); vaddr = kmap_atomic(skb_frag_page(frag));
csum2 = csum_partial(vaddr + frag->page_offset + csum2 = csum_partial(vaddr + frag->page_offset +
offset - start, copy, 0); offset - start, copy, 0);
kunmap_skb_frag(vaddr); kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy)) if (!(len -= copy))
return csum; return csum;
...@@ -1954,12 +1954,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, ...@@ -1954,12 +1954,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
if (copy > len) if (copy > len)
copy = len; copy = len;
vaddr = kmap_skb_frag(frag); vaddr = kmap_atomic(skb_frag_page(frag));
csum2 = csum_partial_copy_nocheck(vaddr + csum2 = csum_partial_copy_nocheck(vaddr +
frag->page_offset + frag->page_offset +
offset - start, to, offset - start, to,
copy, 0); copy, 0);
kunmap_skb_frag(vaddr); kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy)) if (!(len -= copy))
return csum; return csum;
...@@ -2479,7 +2479,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, ...@@ -2479,7 +2479,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
if (abs_offset < block_limit) { if (abs_offset < block_limit) {
if (!st->frag_data) if (!st->frag_data)
st->frag_data = kmap_skb_frag(frag); st->frag_data = kmap_atomic(skb_frag_page(frag));
*data = (u8 *) st->frag_data + frag->page_offset + *data = (u8 *) st->frag_data + frag->page_offset +
(abs_offset - st->stepped_offset); (abs_offset - st->stepped_offset);
...@@ -2488,7 +2488,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, ...@@ -2488,7 +2488,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
} }
if (st->frag_data) { if (st->frag_data) {
kunmap_skb_frag(st->frag_data); kunmap_atomic(st->frag_data);
st->frag_data = NULL; st->frag_data = NULL;
} }
...@@ -2497,7 +2497,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data, ...@@ -2497,7 +2497,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
} }
if (st->frag_data) { if (st->frag_data) {
kunmap_skb_frag(st->frag_data); kunmap_atomic(st->frag_data);
st->frag_data = NULL; st->frag_data = NULL;
} }
...@@ -2525,7 +2525,7 @@ EXPORT_SYMBOL(skb_seq_read); ...@@ -2525,7 +2525,7 @@ EXPORT_SYMBOL(skb_seq_read);
void skb_abort_seq_read(struct skb_seq_state *st) void skb_abort_seq_read(struct skb_seq_state *st)
{ {
if (st->frag_data) if (st->frag_data)
kunmap_skb_frag(st->frag_data); kunmap_atomic(st->frag_data);
} }
EXPORT_SYMBOL(skb_abort_seq_read); EXPORT_SYMBOL(skb_abort_seq_read);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment