Commit f24be42a authored by Christophe Lombard's avatar Christophe Lombard Committed by Michael Ellerman

cxl: Add psl9 specific code

The new Coherent Accelerator Interface Architecture, level 2, for the
IBM POWER9 brings new content and features:
- POWER9 Service Layer
- Registers
- Radix mode
- Process element entry
- Dedicated-Shared Process Programming Model
- Translation Fault Handling
- CAPP
- Memory Context ID
    If a valid mm_struct is found the memory context id is used for each
    transaction associated with the process handle. The PSL uses the
    context ID to find the corresponding process element.
Signed-off-by: default avatarChristophe Lombard <clombard@linux.vnet.ibm.com>
Acked-by: default avatarFrederic Barrat <fbarrat@linux.vnet.ibm.com>
[mpe: Fixup comment formatting, unsplit long strings]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent abd1d99b
...@@ -21,7 +21,7 @@ Introduction ...@@ -21,7 +21,7 @@ Introduction
Hardware overview Hardware overview
================= =================
POWER8 FPGA POWER8/9 FPGA
+----------+ +---------+ +----------+ +---------+
| | | | | | | |
| CPU | | AFU | | CPU | | AFU |
...@@ -34,7 +34,7 @@ Hardware overview ...@@ -34,7 +34,7 @@ Hardware overview
| | CAPP |<------>| | | | CAPP |<------>| |
+---+------+ PCIE +---------+ +---+------+ PCIE +---------+
The POWER8 chip has a Coherently Attached Processor Proxy (CAPP) The POWER8/9 chip has a Coherently Attached Processor Proxy (CAPP)
unit which is part of the PCIe Host Bridge (PHB). This is managed unit which is part of the PCIe Host Bridge (PHB). This is managed
by Linux by calls into OPAL. Linux doesn't directly program the by Linux by calls into OPAL. Linux doesn't directly program the
CAPP. CAPP.
...@@ -59,6 +59,17 @@ Hardware overview ...@@ -59,6 +59,17 @@ Hardware overview
the fault. The context to which this fault is serviced is based on the fault. The context to which this fault is serviced is based on
who owns that acceleration function. who owns that acceleration function.
POWER8 <-----> PSL Version 8 is compliant to the CAIA Version 1.0.
POWER9 <-----> PSL Version 9 is compliant to the CAIA Version 2.0.
This PSL Version 9 provides new features such as:
* Interaction with the nest MMU on the P9 chip.
* Native DMA support.
* Supports sending ASB_Notify messages for host thread wakeup.
* Supports Atomic operations.
* ....
Cards with a PSL9 won't work on a POWER8 system and cards with a
PSL8 won't work on a POWER9 system.
AFU Modes AFU Modes
========= =========
......
...@@ -188,13 +188,26 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) ...@@ -188,13 +188,26 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
if (start + len > ctx->afu->adapter->ps_size) if (start + len > ctx->afu->adapter->ps_size)
return -EINVAL; return -EINVAL;
if (cxl_is_psl9(ctx->afu)) {
/*
* Make sure there is a valid problem state
* area space for this AFU.
*/
if (ctx->master && !ctx->afu->psa) {
pr_devel("AFU doesn't support mmio space\n");
return -EINVAL;
}
/* Can't mmap until the AFU is enabled */
if (!ctx->afu->enabled)
return -EBUSY;
}
} else { } else {
if (start + len > ctx->psn_size) if (start + len > ctx->psn_size)
return -EINVAL; return -EINVAL;
}
if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { /* Make sure there is a valid per process space for this AFU */
/* make sure there is a valid per process space for this AFU */
if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
pr_devel("AFU doesn't support mmio space\n"); pr_devel("AFU doesn't support mmio space\n");
return -EINVAL; return -EINVAL;
......
This diff is collapsed.
...@@ -15,6 +15,12 @@ ...@@ -15,6 +15,12 @@
static struct dentry *cxl_debugfs; static struct dentry *cxl_debugfs;
void cxl_stop_trace_psl9(struct cxl *adapter)
{
/* Stop the trace */
cxl_p1_write(adapter, CXL_PSL9_TRACECFG, 0x4480000000000000ULL);
}
void cxl_stop_trace_psl8(struct cxl *adapter) void cxl_stop_trace_psl8(struct cxl *adapter)
{ {
int slice; int slice;
...@@ -53,6 +59,14 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode, ...@@ -53,6 +59,14 @@ static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
(void __force *)value, &fops_io_x64); (void __force *)value, &fops_io_x64);
} }
void cxl_debugfs_add_adapter_regs_psl9(struct cxl *adapter, struct dentry *dir)
{
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR1));
debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR2));
debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_FIR_CNTL));
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL9_TRACECFG));
}
void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir) void cxl_debugfs_add_adapter_regs_psl8(struct cxl *adapter, struct dentry *dir)
{ {
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1)); debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
...@@ -92,6 +106,11 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter) ...@@ -92,6 +106,11 @@ void cxl_debugfs_adapter_remove(struct cxl *adapter)
debugfs_remove_recursive(adapter->debugfs); debugfs_remove_recursive(adapter->debugfs);
} }
void cxl_debugfs_add_afu_regs_psl9(struct cxl_afu *afu, struct dentry *dir)
{
debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
}
void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir) void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct dentry *dir)
{ {
debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An)); debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
......
...@@ -146,25 +146,26 @@ static void cxl_handle_page_fault(struct cxl_context *ctx, ...@@ -146,25 +146,26 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
return cxl_ack_ae(ctx); return cxl_ack_ae(ctx);
} }
/* if (!radix_enabled()) {
* update_mmu_cache() will not have loaded the hash since current->trap /*
* is not a 0x400 or 0x300, so just call hash_page_mm() here. * update_mmu_cache() will not have loaded the hash since current->trap
*/ * is not a 0x400 or 0x300, so just call hash_page_mm() here.
access = _PAGE_PRESENT | _PAGE_READ; */
if (dsisr & CXL_PSL_DSISR_An_S) access = _PAGE_PRESENT | _PAGE_READ;
access |= _PAGE_WRITE; if (dsisr & CXL_PSL_DSISR_An_S)
access |= _PAGE_WRITE;
access |= _PAGE_PRIVILEGED;
if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) access |= _PAGE_PRIVILEGED;
access &= ~_PAGE_PRIVILEGED; if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
access &= ~_PAGE_PRIVILEGED;
if (dsisr & DSISR_NOHPTE)
inv_flags |= HPTE_NOHPTE_UPDATE; if (dsisr & DSISR_NOHPTE)
inv_flags |= HPTE_NOHPTE_UPDATE;
local_irq_save(flags);
hash_page_mm(mm, dar, access, 0x300, inv_flags); local_irq_save(flags);
local_irq_restore(flags); hash_page_mm(mm, dar, access, 0x300, inv_flags);
local_irq_restore(flags);
}
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
} }
...@@ -184,7 +185,28 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx) ...@@ -184,7 +185,28 @@ static struct mm_struct *get_mem_context(struct cxl_context *ctx)
return ctx->mm; return ctx->mm;
} }
static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr)
{
if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS))
return true;
return false;
}
static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr)
{
if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM))
return true;
if ((cxl_is_psl9(ctx->afu)) &&
((dsisr & CXL_PSL9_DSISR_An_CO_MASK) &
(CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC |
CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH |
CXL_PSL9_DSISR_An_PF_STEG)))
return true;
return false;
}
void cxl_handle_fault(struct work_struct *fault_work) void cxl_handle_fault(struct work_struct *fault_work)
{ {
...@@ -230,9 +252,9 @@ void cxl_handle_fault(struct work_struct *fault_work) ...@@ -230,9 +252,9 @@ void cxl_handle_fault(struct work_struct *fault_work)
} }
} }
if (dsisr & CXL_PSL_DSISR_An_DS) if (cxl_is_segment_miss(ctx, dsisr))
cxl_handle_segment_miss(ctx, mm, dar); cxl_handle_segment_miss(ctx, mm, dar);
else if (dsisr & CXL_PSL_DSISR_An_DM) else if (cxl_is_page_fault(ctx, dsisr))
cxl_handle_page_fault(ctx, mm, dsisr, dar); cxl_handle_page_fault(ctx, mm, dsisr, dar);
else else
WARN(1, "cxl_handle_fault has nothing to handle\n"); WARN(1, "cxl_handle_fault has nothing to handle\n");
......
...@@ -551,13 +551,13 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) ...@@ -551,13 +551,13 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
elem->common.tid = cpu_to_be32(0); /* Unused */ elem->common.tid = cpu_to_be32(0); /* Unused */
elem->common.pid = cpu_to_be32(pid); elem->common.pid = cpu_to_be32(pid);
elem->common.csrp = cpu_to_be64(0); /* disable */ elem->common.csrp = cpu_to_be64(0); /* disable */
elem->common.aurp0 = cpu_to_be64(0); /* disable */ elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */
elem->common.aurp1 = cpu_to_be64(0); /* disable */ elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */
cxl_prefault(ctx, wed); cxl_prefault(ctx, wed);
elem->common.sstp0 = cpu_to_be64(ctx->sstp0); elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
elem->common.sstp1 = cpu_to_be64(ctx->sstp1); elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
/* /*
* Ensure we have at least one interrupt allocated to take faults for * Ensure we have at least one interrupt allocated to take faults for
......
...@@ -34,6 +34,57 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da ...@@ -34,6 +34,57 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
return IRQ_HANDLED; return IRQ_HANDLED;
} }
irqreturn_t cxl_irq_psl9(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{
u64 dsisr, dar;
dsisr = irq_info->dsisr;
dar = irq_info->dar;
trace_cxl_psl9_irq(ctx, irq, dsisr, dar);
pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
if (dsisr & CXL_PSL9_DSISR_An_TF) {
pr_devel("CXL interrupt: Scheduling translation fault handling for later (pe: %i)\n", ctx->pe);
return schedule_cxl_fault(ctx, dsisr, dar);
}
if (dsisr & CXL_PSL9_DSISR_An_PE)
return cxl_ops->handle_psl_slice_error(ctx, dsisr,
irq_info->errstat);
if (dsisr & CXL_PSL9_DSISR_An_AE) {
pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
if (ctx->pending_afu_err) {
/*
* This shouldn't happen - the PSL treats these errors
* as fatal and will have reset the AFU, so there's not
* much point buffering multiple AFU errors.
* OTOH if we DO ever see a storm of these come in it's
* probably best that we log them somewhere:
*/
dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error undelivered to pe %i: 0x%016llx\n",
ctx->pe, irq_info->afu_err);
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
ctx->pending_afu_err = 1;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
}
cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
return IRQ_HANDLED;
}
if (dsisr & CXL_PSL9_DSISR_An_OC)
pr_devel("CXL interrupt: OS Context Warning\n");
WARN(1, "Unhandled CXL PSL IRQ\n");
return IRQ_HANDLED;
}
irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info) irqreturn_t cxl_irq_psl8(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{ {
u64 dsisr, dar; u64 dsisr, dar;
......
This diff is collapsed.
This diff is collapsed.
...@@ -17,6 +17,15 @@ ...@@ -17,6 +17,15 @@
#include "cxl.h" #include "cxl.h"
#define dsisr_psl9_flags(flags) \
__print_flags(flags, "|", \
{ CXL_PSL9_DSISR_An_CO_MASK, "FR" }, \
{ CXL_PSL9_DSISR_An_TF, "TF" }, \
{ CXL_PSL9_DSISR_An_PE, "PE" }, \
{ CXL_PSL9_DSISR_An_AE, "AE" }, \
{ CXL_PSL9_DSISR_An_OC, "OC" }, \
{ CXL_PSL9_DSISR_An_S, "S" })
#define DSISR_FLAGS \ #define DSISR_FLAGS \
{ CXL_PSL_DSISR_An_DS, "DS" }, \ { CXL_PSL_DSISR_An_DS, "DS" }, \
{ CXL_PSL_DSISR_An_DM, "DM" }, \ { CXL_PSL_DSISR_An_DM, "DM" }, \
...@@ -154,6 +163,40 @@ TRACE_EVENT(cxl_afu_irq, ...@@ -154,6 +163,40 @@ TRACE_EVENT(cxl_afu_irq,
) )
); );
TRACE_EVENT(cxl_psl9_irq,
TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
TP_ARGS(ctx, irq, dsisr, dar),
TP_STRUCT__entry(
__field(u8, card)
__field(u8, afu)
__field(u16, pe)
__field(int, irq)
__field(u64, dsisr)
__field(u64, dar)
),
TP_fast_assign(
__entry->card = ctx->afu->adapter->adapter_num;
__entry->afu = ctx->afu->slice;
__entry->pe = ctx->pe;
__entry->irq = irq;
__entry->dsisr = dsisr;
__entry->dar = dar;
),
TP_printk("afu%i.%i pe=%i irq=%i dsisr=0x%016llx dsisr=%s dar=0x%016llx",
__entry->card,
__entry->afu,
__entry->pe,
__entry->irq,
__entry->dsisr,
dsisr_psl9_flags(__entry->dsisr),
__entry->dar
)
);
TRACE_EVENT(cxl_psl_irq, TRACE_EVENT(cxl_psl_irq,
TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar), TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment