Commit a9f87f64 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use a 64bit interval tree for VM management v2

This only makes a difference for 32-bit systems. The idea is to have a
fixed virtual address space size with 4-level page tables and to
minimize differences between 32 and 64-bit systems.

v2: Update commit message.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ca7f65c7
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/interval_tree.h> #include <linux/rbtree.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
...@@ -379,7 +379,10 @@ struct amdgpu_bo_list_entry { ...@@ -379,7 +379,10 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va_mapping { struct amdgpu_bo_va_mapping {
struct list_head list; struct list_head list;
struct interval_tree_node it; struct rb_node rb;
uint64_t start;
uint64_t last;
uint64_t __subtree_last;
uint64_t offset; uint64_t offset;
uint64_t flags; uint64_t flags;
}; };
......
...@@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
} }
if ((chunk_ib->va_start + chunk_ib->ib_bytes) > if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
(m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n"); DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL; return -EINVAL;
} }
...@@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r; return r;
} }
offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset; kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
...@@ -1388,8 +1388,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1388,8 +1388,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
continue; continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) { list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
if (mapping->it.start > addr || if (mapping->start > addr ||
addr > mapping->it.last) addr > mapping->last)
continue; continue;
*bo = lobj->bo_va->bo; *bo = lobj->bo_va->bo;
...@@ -1397,8 +1397,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1397,8 +1397,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
} }
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
if (mapping->it.start > addr || if (mapping->start > addr ||
addr > mapping->it.last) addr > mapping->last)
continue; continue;
*bo = lobj->bo_va->bo; *bo = lobj->bo_va->bo;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/interval_tree.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm.h> #include <drm/drm.h>
......
...@@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map, ...@@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map,
TP_fast_assign( TP_fast_assign(
__entry->bo = bo_va ? bo_va->bo : NULL; __entry->bo = bo_va ? bo_va->bo : NULL;
__entry->start = mapping->it.start; __entry->start = mapping->start;
__entry->last = mapping->it.last; __entry->last = mapping->last;
__entry->offset = mapping->offset; __entry->offset = mapping->offset;
__entry->flags = mapping->flags; __entry->flags = mapping->flags;
), ),
...@@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, ...@@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
TP_fast_assign( TP_fast_assign(
__entry->bo = bo_va->bo; __entry->bo = bo_va->bo;
__entry->start = mapping->it.start; __entry->start = mapping->start;
__entry->last = mapping->it.last; __entry->last = mapping->last;
__entry->offset = mapping->offset; __entry->offset = mapping->offset;
__entry->flags = mapping->flags; __entry->flags = mapping->flags;
), ),
...@@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, ...@@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
), ),
TP_fast_assign( TP_fast_assign(
__entry->soffset = mapping->it.start; __entry->soffset = mapping->start;
__entry->eoffset = mapping->it.last + 1; __entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags; __entry->flags = mapping->flags;
), ),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
......
...@@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
start = amdgpu_bo_gpu_offset(bo); start = amdgpu_bo_gpu_offset(bo);
end = (mapping->it.last + 1 - mapping->it.start); end = (mapping->last + 1 - mapping->start);
end = end * AMDGPU_GPU_PAGE_SIZE + start; end = end * AMDGPU_GPU_PAGE_SIZE + start;
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr; start += addr;
amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
......
...@@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, ...@@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
} }
if ((addr + (uint64_t)size) > if ((addr + (uint64_t)size) >
((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
addr, lo, hi); addr, lo, hi);
return -EINVAL; return -EINVAL;
} }
addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
addr += amdgpu_bo_gpu_offset(bo); addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index); addr -= ((uint64_t)size) * ((uint64_t)index);
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
* Jerome Glisse * Jerome Glisse
*/ */
#include <linux/dma-fence-array.h> #include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include "amdgpu.h" #include "amdgpu.h"
...@@ -51,6 +52,15 @@ ...@@ -51,6 +52,15 @@
* SI supports 16. * SI supports 16.
*/ */
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
START, LAST, static, amdgpu_vm_it)
#undef START
#undef LAST
/* Local structure. Encapsulate some VM table update parameters to reduce /* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters * the number of function parameters
*/ */
...@@ -1301,7 +1311,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, ...@@ -1301,7 +1311,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes, struct drm_mm_node *nodes,
struct dma_fence **fence) struct dma_fence **fence)
{ {
uint64_t pfn, src = 0, start = mapping->it.start; uint64_t pfn, src = 0, start = mapping->start;
int r; int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
...@@ -1353,7 +1363,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, ...@@ -1353,7 +1363,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
} }
addr += pfn << PAGE_SHIFT; addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->it.last, start + max_entries - 1); last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive, r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm, src, pages_addr, vm,
start, last, flags, addr, start, last, flags, addr,
...@@ -1368,7 +1378,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, ...@@ -1368,7 +1378,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
} }
start = last + 1; start = last + 1;
} while (unlikely(start != mapping->it.last + 1)); } while (unlikely(start != mapping->last + 1));
return 0; return 0;
} }
...@@ -1724,9 +1734,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1724,9 +1734,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t saddr, uint64_t offset, uint64_t saddr, uint64_t offset,
uint64_t size, uint64_t flags) uint64_t size, uint64_t flags)
{ {
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_vm *vm = bo_va->vm;
struct interval_tree_node *it;
uint64_t eaddr; uint64_t eaddr;
/* validate the parameters */ /* validate the parameters */
...@@ -1743,14 +1752,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1743,14 +1752,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE;
it = interval_tree_iter_first(&vm->va, saddr, eaddr); tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (it) { if (tmp) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
/* bo and tmp overlap, invalid addr */ /* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1); tmp->start, tmp->last + 1);
return -EINVAL; return -EINVAL;
} }
...@@ -1759,13 +1766,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1759,13 +1766,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&mapping->list); INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr; mapping->start = saddr;
mapping->it.last = eaddr; mapping->last = eaddr;
mapping->offset = offset; mapping->offset = offset;
mapping->flags = flags; mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids); list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va); amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT) if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
...@@ -1823,13 +1830,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, ...@@ -1823,13 +1830,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE;
mapping->it.start = saddr; mapping->start = saddr;
mapping->it.last = eaddr; mapping->last = eaddr;
mapping->offset = offset; mapping->offset = offset;
mapping->flags = flags; mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids); list_add(&mapping->list, &bo_va->invalids);
interval_tree_insert(&mapping->it, &vm->va); amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT) if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
...@@ -1860,7 +1867,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1860,7 +1867,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
list_for_each_entry(mapping, &bo_va->valids, list) { list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr) if (mapping->start == saddr)
break; break;
} }
...@@ -1868,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1868,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
valid = false; valid = false;
list_for_each_entry(mapping, &bo_va->invalids, list) { list_for_each_entry(mapping, &bo_va->invalids, list) {
if (mapping->it.start == saddr) if (mapping->start == saddr)
break; break;
} }
...@@ -1877,7 +1884,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1877,7 +1884,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
} }
list_del(&mapping->list); list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va); amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping); trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid) if (valid)
...@@ -1905,7 +1912,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -1905,7 +1912,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size) uint64_t saddr, uint64_t size)
{ {
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
struct interval_tree_node *it;
LIST_HEAD(removed); LIST_HEAD(removed);
uint64_t eaddr; uint64_t eaddr;
...@@ -1927,43 +1933,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -1927,43 +1933,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
INIT_LIST_HEAD(&after->list); INIT_LIST_HEAD(&after->list);
/* Now gather all removed mappings */ /* Now gather all removed mappings */
it = interval_tree_iter_first(&vm->va, saddr, eaddr); tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
while (it) { while (tmp) {
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
it = interval_tree_iter_next(it, saddr, eaddr);
/* Remember mapping split at the start */ /* Remember mapping split at the start */
if (tmp->it.start < saddr) { if (tmp->start < saddr) {
before->it.start = tmp->it.start; before->start = tmp->start;
before->it.last = saddr - 1; before->last = saddr - 1;
before->offset = tmp->offset; before->offset = tmp->offset;
before->flags = tmp->flags; before->flags = tmp->flags;
list_add(&before->list, &tmp->list); list_add(&before->list, &tmp->list);
} }
/* Remember mapping split at the end */ /* Remember mapping split at the end */
if (tmp->it.last > eaddr) { if (tmp->last > eaddr) {
after->it.start = eaddr + 1; after->start = eaddr + 1;
after->it.last = tmp->it.last; after->last = tmp->last;
after->offset = tmp->offset; after->offset = tmp->offset;
after->offset += after->it.start - tmp->it.start; after->offset += after->start - tmp->start;
after->flags = tmp->flags; after->flags = tmp->flags;
list_add(&after->list, &tmp->list); list_add(&after->list, &tmp->list);
} }
list_del(&tmp->list); list_del(&tmp->list);
list_add(&tmp->list, &removed); list_add(&tmp->list, &removed);
tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
} }
/* And free them up */ /* And free them up */
list_for_each_entry_safe(tmp, next, &removed, list) { list_for_each_entry_safe(tmp, next, &removed, list) {
interval_tree_remove(&tmp->it, &vm->va); amdgpu_vm_it_remove(tmp, &vm->va);
list_del(&tmp->list); list_del(&tmp->list);
if (tmp->it.start < saddr) if (tmp->start < saddr)
tmp->it.start = saddr; tmp->start = saddr;
if (tmp->it.last > eaddr) if (tmp->last > eaddr)
tmp->it.last = eaddr; tmp->last = eaddr;
list_add(&tmp->list, &vm->freed); list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp); trace_amdgpu_vm_bo_unmap(NULL, tmp);
...@@ -1971,7 +1976,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -1971,7 +1976,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */ /* Insert partial mapping before the range */
if (!list_empty(&before->list)) { if (!list_empty(&before->list)) {
interval_tree_insert(&before->it, &vm->va); amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT) if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
} else { } else {
...@@ -1980,7 +1985,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, ...@@ -1980,7 +1985,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping after the range */ /* Insert partial mapping after the range */
if (!list_empty(&after->list)) { if (!list_empty(&after->list)) {
interval_tree_insert(&after->it, &vm->va); amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT) if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
} else { } else {
...@@ -2014,13 +2019,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -2014,13 +2019,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list); list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va); amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping); trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed); list_add(&mapping->list, &vm->freed);
} }
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list); list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va); amdgpu_vm_it_remove(mapping, &vm->va);
amdgpu_vm_free_mapping(adev, vm, mapping, amdgpu_vm_free_mapping(adev, vm, mapping,
bo_va->last_pt_update); bo_va->last_pt_update);
} }
...@@ -2162,9 +2167,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2162,9 +2167,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (!RB_EMPTY_ROOT(&vm->va)) { if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(adev->dev, "still active bo inside vm\n"); dev_err(adev->dev, "still active bo inside vm\n");
} }
rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
list_del(&mapping->list); list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va); amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping); kfree(mapping);
} }
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment