Commit f728c17f authored by Farah Kassabri's avatar Farah Kassabri Committed by Oded Gabbay

accel/habanalabs/gaudi2: move HMMU page tables to device memory

Currently the HMMU page tables reside in the host memory,
which will cause host access from the device for every page walk.
This can affect PCIe bandwidth in certain scenarios.

To prevent that problem, HMMU page tables will be moved to the device
memory so the miss transaction will read the hops from there instead of
going to the host.
Signed-off-by: default avatarFarah Kassabri <fkassabri@habana.ai>
Reviewed-by: default avatarOded Gabbay <ogabbay@kernel.org>
Signed-off-by: default avatarOded Gabbay <ogabbay@kernel.org>
parent 246d8b6c
......@@ -443,18 +443,22 @@ enum hl_collective_mode {
* a CB handle can be provided for jobs on this queue.
* Otherwise, a CB address must be provided.
* @collective_mode: collective mode of current queue
* @q_dram_bd_address: PQ dram address, used when PQ need to reside in DRAM.
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
* @binned: True if the queue is binned out and should not be used
* @supports_sync_stream: True if queue supports sync stream
* @dram_bd: True if the bd should be copied to dram, needed for PQ which has been allocated on dram
*/
struct hw_queue_properties {
enum hl_queue_type type;
enum queue_cb_alloc_flags cb_alloc_flags;
enum hl_collective_mode collective_mode;
u64 q_dram_bd_address;
u8 driver_only;
u8 binned;
u8 supports_sync_stream;
u8 dram_bd;
};
/**
......@@ -1052,6 +1056,8 @@ struct hl_encaps_signals_mgr {
* @collective_mode: collective mode of current queue
* @kernel_address: holds the queue's kernel virtual address.
* @bus_address: holds the queue's DMA address.
* @pq_dram_address: hold the dram address when the PQ is allocated, used when dram_bd is true in
* queue properites.
* @pi: holds the queue's pi value.
* @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
* @hw_queue_id: the id of the H/W queue.
......@@ -1061,6 +1067,7 @@ struct hl_encaps_signals_mgr {
* @valid: is the queue valid (we have array of 32 queues, not all of them
* exist).
* @supports_sync_stream: True if queue supports sync stream
* @dram_bd: True if the bd should be copied to dram, needed for PQ which has been allocated on dram
*/
struct hl_hw_queue {
struct hl_cs_job **shadow_queue;
......@@ -1069,6 +1076,7 @@ struct hl_hw_queue {
enum hl_collective_mode collective_mode;
void *kernel_address;
dma_addr_t bus_address;
u64 pq_dram_address;
u32 pi;
atomic_t ci;
u32 hw_queue_id;
......@@ -1077,6 +1085,7 @@ struct hl_hw_queue {
u16 int_queue_len;
u8 valid;
u8 supports_sync_stream;
u8 dram_bd;
};
/**
......@@ -3889,6 +3898,7 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_
struct hl_hr_mmu_funcs *hr_func);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
......@@ -3896,6 +3906,22 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr);
void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr);
void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info);
u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx);
u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx);
void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val);
void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val);
void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr);
u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr);
int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr);
u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop);
u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx);
void hl_mmu_dr_flush(struct hl_ctx *ctx);
int hl_mmu_dr_init(struct hl_device *hdev);
void hl_mmu_dr_fini(struct hl_device *hdev);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
......
......@@ -84,6 +84,8 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
u32 ctl, u32 len, u64 ptr)
{
struct hl_bd *bd;
u64 addr;
int i;
bd = q->kernel_address;
bd += hl_pi_2_offset(q->pi);
......@@ -91,7 +93,16 @@ void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
bd->len = cpu_to_le32(len);
bd->ptr = cpu_to_le64(ptr);
if (q->dram_bd)
for (i = 0 ; i < 2 ; i++) {
addr = q->pq_dram_address +
((hl_pi_2_offset(q->pi) * sizeof(struct hl_bd)) + (i * sizeof(u64)));
hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM, addr,
(u64 *)(bd) + i, DEBUGFS_WRITE64);
}
q->pi = hl_queue_inc_ptr(q->pi);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
......@@ -1087,12 +1098,18 @@ int hl_hw_queues_create(struct hl_device *hdev)
q->supports_sync_stream =
asic->hw_queues_props[i].supports_sync_stream;
q->collective_mode = asic->hw_queues_props[i].collective_mode;
q->dram_bd = asic->hw_queues_props[i].dram_bd;
rc = queue_init(hdev, q, i);
if (rc) {
dev_err(hdev->dev,
"failed to initialize queue %d\n", i);
goto release_queues;
}
/* Set DRAM PQ address for the queue if it should be at DRAM */
if (q->dram_bd)
q->pq_dram_address = asic->hw_queues_props[i].q_dram_bd_address;
}
return 0;
......
# SPDX-License-Identifier: GPL-2.0-only
HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \
common/mmu/mmu_v2_hr.o
common/mmu/mmu_v2.o common/mmu/mmu_v2_hr.o
......@@ -585,6 +585,8 @@ int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
int hl_mmu_if_set_funcs(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
if (hdev->mmu_disable)
return 0;
......@@ -597,8 +599,9 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI2:
case ASIC_GAUDI2B:
case ASIC_GAUDI2C:
/* MMUs in Gaudi2 are always host resident */
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
if (prop->pmmu.host_resident)
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
......@@ -1209,3 +1212,219 @@ int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_
return 0;
}
struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = NULL;
hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
(unsigned long) hop_addr)
if (hop_addr == pgt_info->shadow_addr)
break;
return pgt_info;
}
void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr);
hl_mmu_dr_free_pgt_node(ctx, pgt_info);
}
void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info)
{
struct hl_device *hdev = ctx->hdev;
gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr,
hdev->asic_prop.mmu_hop_table_size);
hash_del(&pgt_info->node);
kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
kfree(pgt_info);
}
u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx)
{
return ctx->hdev->asic_prop.mmu_pgt_addr +
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx)
{
return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 +
(ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
}
u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
{
u64 page_mask = ctx->hdev->asic_prop.mmu_hop_table_size - 1;
u64 shadow_hop_addr = shadow_addr & (~page_mask);
u64 pte_offset = shadow_addr & page_mask;
u64 phys_hop_addr;
if (shadow_hop_addr != hl_mmu_dr_get_hop0_addr(ctx))
phys_hop_addr = hl_mmu_dr_get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
else
phys_hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
return phys_hop_addr + pte_offset;
}
void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
{
u64 phys_val = hl_mmu_dr_get_phys_addr(ctx, val);
ctx->hdev->asic_funcs->write_pte(ctx->hdev, hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr),
phys_val);
*(u64 *) (uintptr_t) shadow_pte_addr = val;
}
void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
{
ctx->hdev->asic_funcs->write_pte(ctx->hdev,
hl_mmu_dr_get_phys_addr(ctx, shadow_pte_addr), val);
*(u64 *) (uintptr_t) shadow_pte_addr = val;
}
void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr)
{
hl_mmu_dr_write_final_pte(ctx, pte_addr, 0);
}
void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr)
{
hl_mmu_dr_get_pgt_info(ctx, hop_addr)->num_of_ptes++;
}
int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr)
{
struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr);
int num_of_ptes_left;
pgt_info->num_of_ptes--;
/*
* Need to save the number of ptes left because hl_mmu_free_hop might free
* the pgt_info
*/
num_of_ptes_left = pgt_info->num_of_ptes;
if (!num_of_ptes_left)
hl_mmu_dr_free_pgt_node(ctx, pgt_info);
return num_of_ptes_left;
}
u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct pgt_info *pgt_info;
u64 phys_addr, shadow_addr;
pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
if (!pgt_info)
return ULLONG_MAX;
phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool,
prop->mmu_hop_table_size);
if (!phys_addr) {
dev_err(hdev->dev, "failed to allocate page\n");
goto pool_add_err;
}
shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
GFP_KERNEL);
if (!shadow_addr)
goto shadow_err;
pgt_info->phys_addr = phys_addr;
pgt_info->shadow_addr = shadow_addr;
pgt_info->ctx = ctx;
pgt_info->num_of_ptes = 0;
hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
return shadow_addr;
shadow_err:
gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool,
phys_addr, prop->mmu_hop_table_size);
pool_add_err:
kfree(pgt_info);
return ULLONG_MAX;
}
u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop)
{
u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr == ULLONG_MAX) {
hop_addr = hl_mmu_dr_alloc_hop(ctx);
*is_new_hop = (hop_addr != ULLONG_MAX);
}
return hop_addr;
}
void hl_mmu_dr_flush(struct hl_ctx *ctx)
{
/* flush all writes from all cores to reach PCI */
mb();
ctx->hdev->asic_funcs->read_pte(ctx->hdev, hl_mmu_dr_get_phys_hop0_addr(ctx));
}
int hl_mmu_dr_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
hdev->mmu_priv.dr.mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
if (!hdev->mmu_priv.dr.mmu_pgt_pool) {
dev_err(hdev->dev, "Failed to create page gen pool\n");
return -ENOMEM;
}
rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr +
prop->mmu_hop0_tables_total_size,
prop->dmmu.pgt_size - prop->mmu_hop0_tables_total_size,
-1);
if (rc) {
dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
goto err_pool_add;
}
hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid,
prop->mmu_hop_table_size, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
rc = -ENOMEM;
goto err_pool_add;
}
/* MMU H/W init will be done in device hw_init() */
return 0;
err_pool_add:
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
return rc;
}
void hl_mmu_dr_fini(struct hl_device *hdev)
{
/* MMU H/W fini was already done in device hw_fini() */
if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0))
return;
kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
/* Make sure that if we arrive here again without init was
* called we won't cause kernel panic. This can happen for
* example if we fail during hard reset code at certain points
*/
hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "../habanalabs.h"
#include "../../include/hw_ip/mmu/mmu_general.h"
#include "../../include/hw_ip/mmu/mmu_v2_0.h"
#include <linux/slab.h>
/**
* hl_mmu_v2_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
* page tables hops related to this context.
* Return: 0 on success, non-zero otherwise.
*/
static int hl_mmu_v2_ctx_init(struct hl_ctx *ctx)
{
hash_init(ctx->mmu_shadow_hash);
return 0;
}
/*
* hl_mmu_v2_ctx_fini - disable a ctx from using the mmu module
*
* @ctx: pointer to the context structure
*
* This function does the following:
* - Free any pgts which were not freed yet
* - Free the mutex
* - Free DRAM default page mapping hops
*/
static void hl_mmu_v2_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct pgt_info *pgt_info;
struct hlist_node *tmp;
int i;
if (!hash_empty(ctx->mmu_shadow_hash))
dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
ctx->asid);
hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
hl_mmu_dr_free_pgt_node(ctx, pgt_info);
}
}
static int hl_mmu_v2_unmap(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr)
{
u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 }, curr_pte,
scrambled_virt_addr;
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
struct hl_device *hdev = ctx->hdev;
struct hl_mmu_properties *mmu_prop;
bool is_huge = false;
int i, hop_last;
/* device resident in V2 are allowed only for HMMU */
if (!is_dram_addr)
return -EINVAL;
mmu_prop = &prop->dmmu;
hop_last = mmu_prop->num_hops - 1;
scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx);
hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
hop_addr[0], scrambled_virt_addr);
if (hop_pte_addr[0] == U64_MAX)
return -EFAULT;
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0];
for (i = 1 ; i < mmu_prop->num_hops ; i++) {
hop_addr[i] = hl_mmu_get_next_hop_addr(ctx, curr_pte);
if (hop_addr[i] == ULLONG_MAX)
goto not_mapped;
hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hop_addr[i], scrambled_virt_addr);
if (hop_pte_addr[i] == U64_MAX)
return -EFAULT;
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i];
if ((i <= hop_last) && (curr_pte & mmu_prop->last_mask)) {
hop_last = i;
is_huge = true;
break;
}
}
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
return -EFAULT;
}
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
for (i = hop_last ; i > 0 ; i--) {
hl_mmu_dr_clear_pte(ctx, hop_pte_addr[i]);
if (hl_mmu_dr_put_pte(ctx, hop_addr[i]))
goto mapped;
}
hl_mmu_dr_clear_pte(ctx, hop_pte_addr[0]);
mapped:
return 0;
not_mapped:
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
virt_addr);
return -EINVAL;
}
static int hl_mmu_v2_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
u32 page_size, bool is_dram_addr)
{
u64 hop_addr[MMU_ARCH_6_HOPS] = { 0 }, hop_pte_addr[MMU_ARCH_6_HOPS] = { 0 },
curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
bool hop_new[MMU_ARCH_6_HOPS] = { false };
struct hl_device *hdev = ctx->hdev;
struct hl_mmu_properties *mmu_prop;
int rc, i, hop_last;
/* device resident in V2 are allowed only for HMMU */
if (!is_dram_addr)
return -EINVAL;
mmu_prop = &prop->dmmu;
hop_last = mmu_prop->num_hops - 1;
scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
/* First hop is preallocated therefore it is treated differently */
hop_addr[0] = hl_mmu_dr_get_hop0_addr(ctx);
hop_pte_addr[0] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
hop_addr[0], scrambled_virt_addr);
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[0];
/* Handle hop1 to hop_last */
for (i = 1 ; i <= hop_last ; i++) {
hop_addr[i] = hl_mmu_dr_get_alloc_next_hop_addr(ctx, curr_pte, &hop_new[i]);
if (hop_addr[i] == ULLONG_MAX) {
rc = -ENOMEM;
goto err;
}
hop_pte_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hop_addr[i], scrambled_virt_addr);
if (hop_pte_addr[i] == U64_MAX) {
rc = -EINVAL;
goto err;
}
if (!hop_pte_addr[i]) {
rc = -EINVAL;
goto err;
}
curr_pte = *(u64 *) (uintptr_t) hop_pte_addr[i];
}
if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev,
"mapping already exists for virt_addr 0x%llx\n",
virt_addr);
for (i = 0 ; i <= hop_last ; i++)
dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
i, *(u64 *) (uintptr_t) hop_pte_addr[i],
hop_pte_addr[i]);
rc = -EINVAL;
goto err;
}
curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK)
| mmu_prop->last_mask | PAGE_PRESENT_MASK;
/* Write the PTEs */
hl_mmu_dr_write_final_pte(ctx, hop_pte_addr[hop_last], curr_pte);
/* for each new hop, add its address to the table of previous-hop */
for (i = 1 ; i <= hop_last ; i++) {
if (hop_new[i]) {
curr_pte = (hop_addr[i] & HOP_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
hl_mmu_dr_write_pte(ctx, hop_pte_addr[i - 1], curr_pte);
if (i - 1)
hl_mmu_dr_get_pte(ctx, hop_addr[i - 1]);
}
}
hl_mmu_dr_get_pte(ctx, hop_addr[hop_last]);
return 0;
err:
for (i = 1 ; i <= hop_last ; i++)
if (hop_new[i] && (hop_addr[i] != U64_MAX))
hl_mmu_dr_free_hop(ctx, hop_addr[i]);
return rc;
}
/*
* hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v2_swap_out(struct hl_ctx *ctx)
{
}
/*
* hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v2_swap_in(struct hl_ctx *ctx)
{
}
static int hl_mmu_v2_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops)
{
struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
struct hl_device *hdev = ctx->hdev;
struct hl_mmu_properties *mmu_prop;
bool is_dram_addr;
int i;
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
/* device resident in V2 are allowed only for HMMU */
if (!is_dram_addr)
return -EINVAL;
mmu_prop = &prop->dmmu;
hops->range_type = HL_VA_RANGE_TYPE_DRAM;
hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
hops->hop_info[0].hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx);
hops->hop_info[0].hop_pte_addr = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, 0,
hops->hop_info[0].hop_addr,
hops->scrambled_vaddr);
if (hops->hop_info[0].hop_pte_addr == U64_MAX)
return -EFAULT;
hops->hop_info[0].hop_pte_val = hdev->asic_funcs->read_pte(hdev,
hops->hop_info[0].hop_pte_addr);
if (hops->hop_info[0].hop_pte_val == U64_MAX)
return -EFAULT;
for (i = 1 ; i < mmu_prop->num_hops ; i++) {
hops->hop_info[i].hop_addr =
hl_mmu_get_next_hop_addr(ctx, hops->hop_info[i - 1].hop_pte_val);
if (hops->hop_info[i].hop_addr == ULLONG_MAX)
return -EFAULT;
hops->hop_info[i].hop_pte_addr =
hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops->hop_info[i].hop_addr,
hops->scrambled_vaddr);
if (hops->hop_info[i].hop_pte_addr == U64_MAX)
return -EFAULT;
hops->hop_info[i].hop_pte_val =
hdev->asic_funcs->read_pte(hdev,
hops->hop_info[i].hop_pte_addr);
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
break;
}
/* if passed over all hops then no last hop was found */
if (i == mmu_prop->num_hops)
return -EFAULT;
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
if (hops->scrambled_vaddr != virt_addr)
hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
(hdev, hops->hop_info[i].hop_pte_val);
else
hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
hops->used_hops = i + 1;
return 0;
}
/*
* hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
*
* @hdev: pointer to the device structure
* @mmu_if: pointer to the mmu interface structure
*/
void hl_mmu_v2_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
{
mmu->init = hl_mmu_dr_init;
mmu->fini = hl_mmu_dr_fini;
mmu->ctx_init = hl_mmu_v2_ctx_init;
mmu->ctx_fini = hl_mmu_v2_ctx_fini;
mmu->map = hl_mmu_v2_map;
mmu->unmap = hl_mmu_v2_unmap;
mmu->flush = hl_mmu_dr_flush;
mmu->swap_out = hl_mmu_v2_swap_out;
mmu->swap_in = hl_mmu_v2_swap_in;
mmu->get_tlb_info = hl_mmu_v2_get_tlb_info;
}
......@@ -649,6 +649,7 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
prop->dmmu.end_addr = VA_HOST_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
prop->dmmu.pgt_size = prop->mmu_pgt_size;
prop->cfg_size = CFG_SIZE;
prop->max_asid = MAX_ASID;
......
This diff is collapsed.
......@@ -19,8 +19,6 @@
#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
#define MMU_PAGE_TABLES_INITIAL_SIZE 0x10000000 /* 256MB */
#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define NUMBER_OF_PDMA_QUEUES 2
......@@ -109,13 +107,11 @@
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
/* This define should be used only when working in a debug mode without dram.
* When working with dram, the driver size will be calculated dynamically.
*/
#define NIC_DEFAULT_DRV_SIZE 0x20000000 /* 512MB */
#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
#define PMMU_PAGE_TABLES_SIZE 0x10000000 /* 256MB */
#define EDMA_PQS_SIZE SZ_2M
#define EDMA_SCRATCHPAD_SIZE SZ_1M
#define HMMU_PAGE_TABLES_SIZE SZ_1M
#define NIC_NUMBER_OF_PORTS NIC_NUMBER_OF_ENGINES
......
......@@ -26,6 +26,8 @@
#define LAST_MASK 0x0000000000800ull
#define FLAGS_MASK 0x0000000000FFFull
#define MMU_ARCH_3_HOPS 3
#define MMU_ARCH_4_HOPS 4
#define MMU_ARCH_5_HOPS 5
#define MMU_ARCH_6_HOPS 6
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment