Commit 116d7533 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Use lpa instruction to load physical addresses in driver code

Most I/O in the kernel is done using the kernel offset mapping.
However, there is one API that uses aliased kernel address ranges:

> The final category of APIs is for I/O to deliberately aliased address
> ranges inside the kernel.  Such aliases are set up by use of the
> vmap/vmalloc API.  Since kernel I/O goes via physical pages, the I/O
> subsystem assumes that the user mapping and kernel offset mapping are
> the only aliases.  This isn't true for vmap aliases, so anything in
> the kernel trying to do I/O to vmap areas must manually manage
> coherency.  It must do this by flushing the vmap range before doing
> I/O and invalidating it after the I/O returns.

For this reason, we should use the hardware lpa instruction to load the
physical address of kernel virtual addresses in the driver code.

I believe we only use the vmap/vmalloc API with old PA 1.x processors
which don't have a sba, so we don't hit this problem.

Tested on c3750, c8000 and rp3440.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent ec13c82d
...@@ -2,6 +2,30 @@ ...@@ -2,6 +2,30 @@
#ifndef __PARISC_SPECIAL_INSNS_H #ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H #define __PARISC_SPECIAL_INSNS_H
#define lpa(va) ({ \
unsigned long pa; \
__asm__ __volatile__( \
"copy %%r0,%0\n\t" \
"lpa %%r0(%1),%0" \
: "=r" (pa) \
: "r" (va) \
: "memory" \
); \
pa; \
})
#define lpa_user(va) ({ \
unsigned long pa; \
__asm__ __volatile__( \
"copy %%r0,%0\n\t" \
"lpa %%r0(%%sr3,%1),%0" \
: "=r" (pa) \
: "r" (va) \
: "memory" \
); \
pa; \
})
#define mfctl(reg) ({ \ #define mfctl(reg) ({ \
unsigned long cr; \ unsigned long cr; \
__asm__ __volatile__( \ __asm__ __volatile__( \
......
...@@ -570,7 +570,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, ...@@ -570,7 +570,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
** "hints" parm includes the VALID bit! ** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well. ** "dep" clobbers the physical address offset bits as well.
*/ */
pa = virt_to_phys(vba); pa = lpa(vba);
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints)); asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa; ((u32 *)pdir_ptr)[1] = (u32) pa;
......
...@@ -572,7 +572,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, ...@@ -572,7 +572,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
u64 pa; /* physical address */ u64 pa; /* physical address */
register unsigned ci; /* coherent index */ register unsigned ci; /* coherent index */
pa = virt_to_phys(vba); pa = lpa(vba);
pa &= IOVP_MASK; pa &= IOVP_MASK;
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba)); asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment