Commit cefe0078 authored by Annie Li's avatar Annie Li Committed by David S. Miller

xen-netfront: fix resource leak in netfront

This patch removes grant transfer releasing code from netfront, and uses
gnttab_end_foreign_access to end grant access since
gnttab_end_foreign_access_ref may fail when the grant entry is
currently used for reading or writing.

* clean up grant transfer code kept from old netfront(2.6.18) which grants
pages for access/map and transfer. But grant transfer is deprecated in current
netfront, so remove corresponding release code for transfer.

* fix resource leak, release grant access (through gnttab_end_foreign_access)
and skb for tx/rx path, use get_page to ensure page is released when grant
access is completed successfully.

Xen-blkfront/xen-tpmfront/xen-pcifront also have similar issue, but patches
for them will be created separately.

V6: Correct subject line and commit message.

V5: Remove unecessary change in xennet_end_access.

V4: Revert put_page in gnttab_end_foreign_access, and keep netfront change in
single patch.

V3: Changes as suggestion from David Vrabel, ensure pages are not freed untill
grant acess is ended.

V2: Improve patch comments.
Signed-off-by: default avatarAnnie Li <annie.li@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ce60e0c4
...@@ -117,6 +117,7 @@ struct netfront_info { ...@@ -117,6 +117,7 @@ struct netfront_info {
} tx_skbs[NET_TX_RING_SIZE]; } tx_skbs[NET_TX_RING_SIZE];
grant_ref_t gref_tx_head; grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist; unsigned tx_skb_freelist;
spinlock_t rx_lock ____cacheline_aligned_in_smp; spinlock_t rx_lock ____cacheline_aligned_in_smp;
...@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev) ...@@ -396,6 +397,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
gnttab_release_grant_reference( gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]); &np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF; np->grant_tx_ref[id] = GRANT_INVALID_REF;
np->grant_tx_page[id] = NULL;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }
...@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, ...@@ -452,6 +454,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly); mfn, GNTMAP_readonly);
np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref; tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset; tx->offset = offset;
tx->size = len; tx->size = len;
...@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, ...@@ -497,6 +500,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
np->xbdev->otherend_id, np->xbdev->otherend_id,
mfn, GNTMAP_readonly); mfn, GNTMAP_readonly);
np->grant_tx_page[id] = page;
tx->gref = np->grant_tx_ref[id] = ref; tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset; tx->offset = offset;
tx->size = bytes; tx->size = bytes;
...@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -596,6 +600,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
mfn = virt_to_mfn(data); mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref( gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref; tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset; tx->offset = offset;
tx->size = len; tx->size = len;
...@@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np) ...@@ -1085,10 +1090,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
continue; continue;
skb = np->tx_skbs[i].skb; skb = np->tx_skbs[i].skb;
gnttab_end_foreign_access_ref(np->grant_tx_ref[i], get_page(np->grant_tx_page[i]);
GNTMAP_readonly); gnttab_end_foreign_access(np->grant_tx_ref[i],
gnttab_release_grant_reference(&np->gref_tx_head, GNTMAP_readonly,
np->grant_tx_ref[i]); (unsigned long)page_address(np->grant_tx_page[i]));
np->grant_tx_page[i] = NULL;
np->grant_tx_ref[i] = GRANT_INVALID_REF; np->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
...@@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np) ...@@ -1097,78 +1103,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
static void xennet_release_rx_bufs(struct netfront_info *np) static void xennet_release_rx_bufs(struct netfront_info *np)
{ {
struct mmu_update *mmu = np->rx_mmu;
struct multicall_entry *mcl = np->rx_mcl;
struct sk_buff_head free_list;
struct sk_buff *skb;
unsigned long mfn;
int xfer = 0, noxfer = 0, unused = 0;
int id, ref; int id, ref;
dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
__func__);
return;
skb_queue_head_init(&free_list);
spin_lock_bh(&np->rx_lock); spin_lock_bh(&np->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) { for (id = 0; id < NET_RX_RING_SIZE; id++) {
ref = np->grant_rx_ref[id]; struct sk_buff *skb;
if (ref == GRANT_INVALID_REF) { struct page *page;
unused++;
continue;
}
skb = np->rx_skbs[id]; skb = np->rx_skbs[id];
mfn = gnttab_end_foreign_transfer_ref(ref); if (!skb)
gnttab_release_grant_reference(&np->gref_rx_head, ref);
np->grant_rx_ref[id] = GRANT_INVALID_REF;
if (0 == mfn) {
skb_shinfo(skb)->nr_frags = 0;
dev_kfree_skb(skb);
noxfer++;
continue; continue;
}
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Remap the page. */
const struct page *page =
skb_frag_page(&skb_shinfo(skb)->frags[0]);
unsigned long pfn = page_to_pfn(page);
void *vaddr = page_address(page);
MULTI_update_va_mapping(mcl, (unsigned long)vaddr, ref = np->grant_rx_ref[id];
mfn_pte(mfn, PAGE_KERNEL), if (ref == GRANT_INVALID_REF)
0); continue;
mcl++;
mmu->ptr = ((u64)mfn << PAGE_SHIFT)
| MMU_MACHPHYS_UPDATE;
mmu->val = pfn;
mmu++;
set_phys_to_machine(pfn, mfn); page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
}
__skb_queue_tail(&free_list, skb);
xfer++;
}
dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", /* gnttab_end_foreign_access() needs a page ref until
__func__, xfer, noxfer, unused); * foreign access is ended (which may be deferred).
*/
get_page(page);
gnttab_end_foreign_access(ref, 0,
(unsigned long)page_address(page));
np->grant_rx_ref[id] = GRANT_INVALID_REF;
if (xfer) { kfree_skb(skb);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Do all the remapping work and M2P updates. */
MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
NULL, DOMID_SELF);
mcl++;
HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
}
} }
__skb_queue_purge(&free_list);
spin_unlock_bh(&np->rx_lock); spin_unlock_bh(&np->rx_lock);
} }
...@@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) ...@@ -1339,6 +1302,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
for (i = 0; i < NET_RX_RING_SIZE; i++) { for (i = 0; i < NET_RX_RING_SIZE; i++) {
np->rx_skbs[i] = NULL; np->rx_skbs[i] = NULL;
np->grant_rx_ref[i] = GRANT_INVALID_REF; np->grant_rx_ref[i] = GRANT_INVALID_REF;
np->grant_tx_page[i] = NULL;
} }
/* A grant for every tx ring slot */ /* A grant for every tx ring slot */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment