Commit 849e3e93 authored by Mitko Haralanov's avatar Mitko Haralanov Committed by Doug Ledford

IB/hfi1: Prevent unpinning of wrong pages

The routine used by the SDMA cache to handle already
cached nodes can extend an already existing node.

In its error handling code, the routine will unpin pages
when not all pages of the buffer extension were pinned.

There was a bug in that part of the routine, which would
mistakenly unpin pages from the original set rather than
the newly pinned pages.

This commit fixes that bug by offsetting the page array
to the proper place pointing at the beginning of the newly
pinned pages.
Reviewed-by: default avatarDean Luick <dean.luick@intel.com>
Signed-off-by: default avatarMitko Haralanov <mitko.haralanov@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent de82bdff
...@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *); ...@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
static void user_sdma_free_request(struct user_sdma_request *, bool); static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *, static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *); struct user_sdma_iovec *);
static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
unsigned);
static int check_header_template(struct user_sdma_request *, static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32); struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *, static int set_txreq_header(struct user_sdma_request *,
...@@ -1110,7 +1111,8 @@ static int pin_vector_pages(struct user_sdma_request *req, ...@@ -1110,7 +1111,8 @@ static int pin_vector_pages(struct user_sdma_request *req,
goto bail; goto bail;
} }
if (pinned != npages) { if (pinned != npages) {
unpin_vector_pages(current->mm, pages, pinned); unpin_vector_pages(current->mm, pages, node->npages,
pinned);
ret = -EFAULT; ret = -EFAULT;
goto bail; goto bail;
} }
...@@ -1150,9 +1152,9 @@ static int pin_vector_pages(struct user_sdma_request *req, ...@@ -1150,9 +1152,9 @@ static int pin_vector_pages(struct user_sdma_request *req,
} }
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
unsigned npages) unsigned start, unsigned npages)
{ {
hfi1_release_user_pages(mm, pages, npages, 0); hfi1_release_user_pages(mm, pages + start, npages, 0);
kfree(pages); kfree(pages);
} }
...@@ -1566,7 +1568,8 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, ...@@ -1566,7 +1568,8 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
* prevent a deadlock when hfi1_release_user_pages() attempts to * prevent a deadlock when hfi1_release_user_pages() attempts to
* take the mmap_sem, which the MMU notifier has already taken. * take the mmap_sem, which the MMU notifier has already taken.
*/ */
unpin_vector_pages(mm ? NULL : current->mm, node->pages, node->npages); unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
node->npages);
/* /*
* If called by the MMU notifier, we have to adjust the pinned * If called by the MMU notifier, we have to adjust the pinned
* page count ourselves. * page count ourselves.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment