Commit 8d00b77a authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Joerg Roedel

iommu/amd: Move allocation of the top table into v1_alloc_pgtable

All the page table memory should be allocated/free within the io_pgtable
struct. The v2 path is already doing this, make it consistent.

It is hard to see but the free of the root in protection_domain_free() is
a NOP on the success path because v1_free_pgtable() does
amd_iommu_domain_clr_pt_root().

The root memory is already freed because free_sub_pt() put it on the
freelist. The free path in protection_domain_free() is only used during
error unwind of protection_domain_alloc().
Reviewed-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 89ffb2c3
......@@ -573,20 +573,24 @@ static void v1_free_pgtable(struct io_pgtable *iop)
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
iommu_put_pages_list(&freelist);
/* Update data structure */
amd_iommu_domain_clr_pt_root(dom);
/* Make changes visible to IOMMUs */
amd_iommu_domain_update(dom);
iommu_put_pages_list(&freelist);
}
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
pgtable->root = iommu_alloc_page(GFP_KERNEL);
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
......
......@@ -52,8 +52,6 @@
#define HT_RANGE_START (0xfd00000000ULL)
#define HT_RANGE_END (0xffffffffffULL)
#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL
static DEFINE_SPINLOCK(pd_bitmap_lock);
LIST_HEAD(ioapic_map);
......@@ -2260,30 +2258,15 @@ void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
if (domain->iop.root)
iommu_free_page(domain->iop.root);
if (domain->id)
domain_id_free(domain->id);
kfree(domain);
}
static int protection_domain_init_v1(struct protection_domain *domain, int mode)
static int protection_domain_init_v1(struct protection_domain *domain)
{
u64 *pt_root = NULL;
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
if (mode != PAGE_MODE_NONE) {
pt_root = iommu_alloc_page(GFP_KERNEL);
if (!pt_root)
return -ENOMEM;
}
domain->pd_mode = PD_MODE_V1;
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
......@@ -2336,7 +2319,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type)
switch (pgtable) {
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
ret = protection_domain_init_v1(domain);
break;
case AMD_IOMMU_V2:
ret = protection_domain_init_v2(domain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment