Commit 9871e2de authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds

mm/cma: add cma_pages_valid to determine if pages are in CMA

Add new interface cma_pages_valid() which indicates if the specified
pages are part of a CMA region.  This interface will be used in a
subsequent patch by hugetlb code.

In order to keep the same amount of DEBUG information, a pr_debug() call
was added to cma_pages_valid().  In the case where the page passed to
cma_release is not in cma region, the debug message will be printed from
cma_pages_valid as opposed to cma_release.

Link: https://lkml.kernel.org/r/20211007181918.136982-3-mike.kravetz@oracle.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Nghia Le <nghialm78@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79dfc695
...@@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, ...@@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma); struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn); bool no_warn);
extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
......
...@@ -524,6 +524,25 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, ...@@ -524,6 +524,25 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
return page; return page;
} }
bool cma_pages_valid(struct cma *cma, const struct page *pages,
unsigned long count)
{
unsigned long pfn;
if (!cma || !pages)
return false;
pfn = page_to_pfn(pages);
if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) {
pr_debug("%s(page %p, count %lu)\n", __func__,
(void *)pages, count);
return false;
}
return true;
}
/** /**
* cma_release() - release allocated pages * cma_release() - release allocated pages
* @cma: Contiguous memory region for which the allocation is performed. * @cma: Contiguous memory region for which the allocation is performed.
...@@ -539,16 +558,13 @@ bool cma_release(struct cma *cma, const struct page *pages, ...@@ -539,16 +558,13 @@ bool cma_release(struct cma *cma, const struct page *pages,
{ {
unsigned long pfn; unsigned long pfn;
if (!cma || !pages) if (!cma_pages_valid(cma, pages, count))
return false; return false;
pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count);
pfn = page_to_pfn(pages); pfn = page_to_pfn(pages);
if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
return false;
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
free_contig_range(pfn, count); free_contig_range(pfn, count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment