Commit 3fa116e8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-3.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux

Pull powerpc fixes from Michael Ellerman:
 "A few powerpc fixes"

* tag 'powerpc-3.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux:
  powerpc: Work around gcc bug in current_thread_info()
  cxl: Fix issues when unmapping contexts
  powernv: Fix OPAL tracepoint code
parents f800c25b a87e810f
...@@ -23,9 +23,9 @@ ...@@ -23,9 +23,9 @@
#define THREAD_SIZE (1 << THREAD_SHIFT) #define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else #else
#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT #define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -71,12 +71,13 @@ struct thread_info { ...@@ -71,12 +71,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
/* gcc4, at least, is smart enough to turn this into a single unsigned long val;
* rlwinm for ppc32 and clrrdi for ppc64 */
return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
return (struct thread_info *)val;
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \ ...@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
b 1f; \ b 1f; \
END_FTR_SECTION(0, 1); \ END_FTR_SECTION(0, 1); \
ld r12,opal_tracepoint_refcount@toc(r2); \ ld r12,opal_tracepoint_refcount@toc(r2); \
std r12,32(r1); \
cmpdi r12,0; \ cmpdi r12,0; \
bne- LABEL; \ bne- LABEL; \
1: 1:
......
...@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, ...@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0; return 0;
} }
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct cxl_context *ctx = vma->vm_file->private_data;
unsigned long address = (unsigned long)vmf->virtual_address;
u64 area, offset;
offset = vmf->pgoff << PAGE_SHIFT;
pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
__func__, ctx->pe, address, offset);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys;
if (offset > ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS;
} else {
area = ctx->psn_phys;
if (offset > ctx->psn_size)
return VM_FAULT_SIGBUS;
}
mutex_lock(&ctx->status_mutex);
if (ctx->status != STARTED) {
mutex_unlock(&ctx->status_mutex);
pr_devel("%s: Context not started, failing problem state access\n", __func__);
return VM_FAULT_SIGBUS;
}
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
mutex_unlock(&ctx->status_mutex);
return VM_FAULT_NOPAGE;
}
static const struct vm_operations_struct cxl_mmap_vmops = {
.fault = cxl_mmap_fault,
};
/* /*
* Map a per-context mmio space into the given vma. * Map a per-context mmio space into the given vma.
*/ */
...@@ -108,11 +148,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) ...@@ -108,11 +148,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start; u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size); len = min(len, ctx->psn_size);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
}
/* make sure there is a valid per process space for this AFU */ /* make sure there is a valid per process space for this AFU */
if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
pr_devel("AFU doesn't support mmio space\n"); pr_devel("AFU doesn't support mmio space\n");
...@@ -122,12 +158,15 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) ...@@ -122,12 +158,15 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
/* Can't mmap until the AFU is enabled */ /* Can't mmap until the AFU is enabled */
if (!ctx->afu->enabled) if (!ctx->afu->enabled)
return -EBUSY; return -EBUSY;
}
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master); ctx->psn_phys, ctx->pe , ctx->master);
vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, ctx->psn_phys, len); vma->vm_ops = &cxl_mmap_vmops;
return 0;
} }
/* /*
...@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx) ...@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx); afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */ flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq); wake_up_all(&ctx->wq);
/* Release Problem State Area mapping */
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
} }
/* /*
...@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu) ...@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed * created and torn down after the IDR removed
*/ */
__detach_context(ctx); __detach_context(ctx);
/*
* We are force detaching - remove any active PSA mappings so
* userspace cannot interfere with the card if it comes back.
* Easiest way to exercise this is to unbind and rebind the
* driver via sysfs while it is in use.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
} }
mutex_unlock(&afu->contexts_lock); mutex_unlock(&afu->contexts_lock);
} }
......
...@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, ...@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe); pr_devel("%s: pe: %i\n", __func__, ctx->pe);
mutex_lock(&ctx->status_mutex); /* Do this outside the status_mutex to avoid a circular dependency with
if (ctx->status != OPENED) { * the locking in cxl_mmap_fault() */
rc = -EIO;
goto out;
}
if (copy_from_user(&work, uwork, if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) { sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT; rc = -EFAULT;
goto out; goto out;
} }
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
rc = -EIO;
goto out;
}
/* /*
* if any of the reserved fields are set or any of the unused * if any of the reserved fields are set or any of the unused
* flags are set it's invalid * flags are set it's invalid
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment