Commit 45f7fdc2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc updates from Ben Herrenschmidt:
 "Here is some powerpc goodness for -rc2.  Arguably -rc1 material more
  than -rc2 but I was travelling (again !)

  It's mostly bug fixes including regressions, but there are a couple of
  new things that I decided to drop-in.

  One is a straightforward patch from Michael to add a bunch of P8 cache
  events to perf.

  The other one is a patch by myself to add the direct DMA (iommu
  bypass) for PCIe on Power8 for 64-bit capable devices.  This has been
  around for a while, I had lost track of it.  However it's been in our
  internal kernels we use for testing P8 already and it affects only P8
  related code.  Since P8 is still unreleased the risk is pretty much
  nil at this point"

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc/powernv: Add iommu DMA bypass support for IODA2
  powerpc: Fix endian issues in kexec and crash dump code
  powerpc/ppc32: Fix the bug in the init of non-base exception stack for UP
  powerpc/xmon: Don't signal we've entered until we're finished printing
  powerpc/xmon: Fix timeout loop in get_output_lock()
  powerpc/xmon: Don't loop forever in get_output_lock()
  powerpc/perf: Configure BHRB filter before enabling PMU interrupts
  crypto/nx/nx-842: Fix handling of vmalloc addresses
  powerpc/pseries: Select ARCH_RANDOM on pseries
  powerpc/perf: Add Power8 cache & TLB events
  powerpc/relocate fix relocate processing in LE mode
  powerpc: Fix kdump hang issue on p8 with relocation on exception enabled.
  powerpc/pseries: Disable relocation on exception while going down during crash.
  powerpc/eeh: Drop taken reference to driver on eeh_rmv_device
  powerpc: Fix build failure in sysdev/mpic.c for MPIC_WEIRD=y
parents bbb19555 cd15b048
...@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask) ...@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
} }
extern int dma_set_mask(struct device *dev, u64 dma_mask); extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
......
...@@ -77,6 +77,7 @@ struct iommu_table { ...@@ -77,6 +77,7 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
struct iommu_group *it_group; struct iommu_group *it_group;
#endif #endif
void (*set_bypass)(struct iommu_table *tbl, bool enable);
}; };
/* Pure 2^n version of get_order */ /* Pure 2^n version of get_order */
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#ifdef __powerpc64__ #ifdef __powerpc64__
extern char __start_interrupts[];
extern char __end_interrupts[]; extern char __end_interrupts[];
extern char __prom_init_toc_start[]; extern char __prom_init_toc_start[];
...@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr) ...@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
return 0; return 0;
} }
static inline int overlaps_interrupt_vector_text(unsigned long start,
unsigned long end)
{
unsigned long real_start, real_end;
real_start = __start_interrupts - _stext;
real_end = __end_interrupts - _stext;
return start < (unsigned long)__va(real_end) &&
(unsigned long)__va(real_start) < end;
}
static inline int overlaps_kernel_text(unsigned long start, unsigned long end) static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
{ {
return start < (unsigned long)__init_end && return start < (unsigned long)__init_end &&
......
...@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); ...@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
int dma_set_mask(struct device *dev, u64 dma_mask) int __dma_set_mask(struct device *dev, u64 dma_mask)
{ {
struct dma_map_ops *dma_ops = get_dma_ops(dev); struct dma_map_ops *dma_ops = get_dma_ops(dev);
if (ppc_md.dma_set_mask)
return ppc_md.dma_set_mask(dev, dma_mask);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask); return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask)) if (!dev->dma_mask || !dma_supported(dev, dma_mask))
...@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) ...@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
*dev->dma_mask = dma_mask; *dev->dma_mask = dma_mask;
return 0; return 0;
} }
int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (ppc_md.dma_set_mask)
return ppc_md.dma_set_mask(dev, dma_mask);
return __dma_set_mask(dev, dma_mask);
}
EXPORT_SYMBOL(dma_set_mask); EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev) u64 dma_get_required_mask(struct device *dev)
......
...@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata) ...@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
*/ */
if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
return NULL; return NULL;
driver = eeh_pcid_get(dev); driver = eeh_pcid_get(dev);
if (driver && driver->err_handler) if (driver) {
eeh_pcid_put(dev);
if (driver->err_handler)
return NULL; return NULL;
}
/* Remove it from PCI subsystem */ /* Remove it from PCI subsystem */
pr_debug("EEH: Removing %s without EEH sensitive driver\n", pr_debug("EEH: Removing %s without EEH sensitive driver\n",
......
...@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) ...@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz); memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
/*
* Disable iommu bypass, otherwise the user can DMA to all of
* our physical memory via the bypass window instead of just
* the pages that has been explicitly mapped into the iommu
*/
if (tbl->set_bypass)
tbl->set_bypass(tbl, false);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_take_ownership); EXPORT_SYMBOL_GPL(iommu_take_ownership);
...@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) ...@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */ /* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0) if (tbl->it_offset == 0)
set_bit(0, tbl->it_map); set_bit(0, tbl->it_map);
/* The kernel owns the device now, we can restore the iommu bypass */
if (tbl->set_bypass)
tbl->set_bypass(tbl, true);
} }
EXPORT_SYMBOL_GPL(iommu_release_ownership); EXPORT_SYMBOL_GPL(iommu_release_ownership);
......
...@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void) ...@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
cpu_nr = i; cpu_nr = i;
#else #else
#ifdef CONFIG_SMP
cpu_nr = get_hard_smp_processor_id(i); cpu_nr = get_hard_smp_processor_id(i);
#else
cpu_nr = 0;
#endif #endif
#endif
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr]; tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr; tp->cpu = cpu_nr;
......
...@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) ...@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
/* Values we need to export to the second kernel via the device tree. */ /* Values we need to export to the second kernel via the device tree. */
static phys_addr_t kernel_end; static phys_addr_t kernel_end;
static phys_addr_t crashk_base;
static phys_addr_t crashk_size; static phys_addr_t crashk_size;
static unsigned long long mem_limit;
static struct property kernel_end_prop = { static struct property kernel_end_prop = {
.name = "linux,kernel-end", .name = "linux,kernel-end",
...@@ -207,7 +209,7 @@ static struct property kernel_end_prop = { ...@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
static struct property crashk_base_prop = { static struct property crashk_base_prop = {
.name = "linux,crashkernel-base", .name = "linux,crashkernel-base",
.length = sizeof(phys_addr_t), .length = sizeof(phys_addr_t),
.value = &crashk_res.start, .value = &crashk_base
}; };
static struct property crashk_size_prop = { static struct property crashk_size_prop = {
...@@ -219,9 +221,11 @@ static struct property crashk_size_prop = { ...@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
static struct property memory_limit_prop = { static struct property memory_limit_prop = {
.name = "linux,memory-limit", .name = "linux,memory-limit",
.length = sizeof(unsigned long long), .length = sizeof(unsigned long long),
.value = &memory_limit, .value = &mem_limit,
}; };
#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
static void __init export_crashk_values(struct device_node *node) static void __init export_crashk_values(struct device_node *node)
{ {
struct property *prop; struct property *prop;
...@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) ...@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
of_remove_property(node, prop); of_remove_property(node, prop);
if (crashk_res.start != 0) { if (crashk_res.start != 0) {
crashk_base = cpu_to_be_ulong(crashk_res.start),
of_add_property(node, &crashk_base_prop); of_add_property(node, &crashk_base_prop);
crashk_size = resource_size(&crashk_res); crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
of_add_property(node, &crashk_size_prop); of_add_property(node, &crashk_size_prop);
} }
...@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) ...@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
* memory_limit is required by the kexec-tools to limit the * memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used. * crash regions to the actual memory used.
*/ */
mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop); of_update_property(node, &memory_limit_prop);
} }
...@@ -264,7 +270,7 @@ static int __init kexec_setup(void) ...@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
of_remove_property(node, prop); of_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */ /* information needed by userspace when using default_machine_kexec */
kernel_end = __pa(_end); kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop); of_add_property(node, &kernel_end_prop);
export_crashk_values(node); export_crashk_values(node);
......
...@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) ...@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
/* Values we need to export to the second kernel via the device tree. */ /* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base; static unsigned long htab_base;
static unsigned long htab_size;
static struct property htab_base_prop = { static struct property htab_base_prop = {
.name = "linux,htab-base", .name = "linux,htab-base",
...@@ -379,7 +380,7 @@ static struct property htab_base_prop = { ...@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = { static struct property htab_size_prop = {
.name = "linux,htab-size", .name = "linux,htab-size",
.length = sizeof(unsigned long), .length = sizeof(unsigned long),
.value = &htab_size_bytes, .value = &htab_size,
}; };
static int __init export_htab_values(void) static int __init export_htab_values(void)
...@@ -403,8 +404,9 @@ static int __init export_htab_values(void) ...@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
if (prop) if (prop)
of_remove_property(node, prop); of_remove_property(node, prop);
htab_base = __pa(htab_address); htab_base = cpu_to_be64(__pa(htab_address));
of_add_property(node, &htab_base_prop); of_add_property(node, &htab_base_prop);
htab_size = cpu_to_be64(htab_size_bytes);
of_add_property(node, &htab_size_prop); of_add_property(node, &htab_size_prop);
of_node_put(node); of_node_put(node);
......
...@@ -69,8 +69,8 @@ _GLOBAL(relocate) ...@@ -69,8 +69,8 @@ _GLOBAL(relocate)
* R_PPC64_RELATIVE ones. * R_PPC64_RELATIVE ones.
*/ */
mtctr r8 mtctr r8
5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ 5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
cmpwi r0,R_PPC64_RELATIVE cmpdi r0,R_PPC64_RELATIVE
bne 6f bne 6f
ld r6,0(r9) /* reloc->r_offset */ ld r6,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */ ld r0,16(r9) /* reloc->r_addend */
......
...@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void) ...@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32 /* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
#ifdef CONFIG_SMP
hw_cpu = get_hard_smp_processor_id(i); hw_cpu = get_hard_smp_processor_id(i);
#else
hw_cpu = 0;
#endif
critirq_ctx[hw_cpu] = (struct thread_info *) critirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
......
...@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ...@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (overlaps_kernel_text(vaddr, vaddr + step)) if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N; tprot &= ~HPTE_R_N;
/*
* If relocatable, check if it overlaps interrupt vectors that
* are copied down to real 0. For relocatable kernel
* (e.g. kdump case) we copy interrupt vectors down to real
* address 0. Mark that region as executable. This is
* because on p8 system with relocation on exception feature
* enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
* in order to execute the interrupt handlers in virtual
* mode the vector region need to be marked as executable.
*/
if ((PHYSICAL_START > MEMORY_START) &&
overlaps_interrupt_vector_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
hash = hpt_hash(vpn, shift, ssize); hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
......
...@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu) ...@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb(); mb();
if (cpuhw->bhrb_users)
ppmu->config_bhrb(cpuhw->bhrb_filter);
write_mmcr0(cpuhw, mmcr0); write_mmcr0(cpuhw, mmcr0);
/* /*
...@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu) ...@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
} }
out: out:
if (cpuhw->bhrb_users)
ppmu->config_bhrb(cpuhw->bhrb_filter);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -25,6 +25,37 @@ ...@@ -25,6 +25,37 @@
#define PM_BRU_FIN 0x10068 #define PM_BRU_FIN 0x10068
#define PM_BR_MPRED_CMPL 0x400f6 #define PM_BR_MPRED_CMPL 0x400f6
/* All L1 D cache load references counted at finish, gated by reject */
#define PM_LD_REF_L1 0x100ee
/* Load Missed L1 */
#define PM_LD_MISS_L1 0x3e054
/* Store Missed L1 */
#define PM_ST_MISS_L1 0x300f0
/* L1 cache data prefetches */
#define PM_L1_PREF 0x0d8b8
/* Instruction fetches from L1 */
#define PM_INST_FROM_L1 0x04080
/* Demand iCache Miss */
#define PM_L1_ICACHE_MISS 0x200fd
/* Instruction Demand sectors wriittent into IL1 */
#define PM_L1_DEMAND_WRITE 0x0408c
/* Instruction prefetch written into IL1 */
#define PM_IC_PREF_WRITE 0x0408e
/* The data cache was reloaded from local core's L3 due to a demand load */
#define PM_DATA_FROM_L3 0x4c042
/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
#define PM_DATA_FROM_L3MISS 0x300fe
/* All successful D-side store dispatches for this thread */
#define PM_L2_ST 0x17080
/* All successful D-side store dispatches for this thread that were L2 Miss */
#define PM_L2_ST_MISS 0x17082
/* Total HW L3 prefetches(Load+store) */
#define PM_L3_PREF_ALL 0x4e052
/* Data PTEG reload */
#define PM_DTLB_MISS 0x300fc
/* ITLB Reloaded */
#define PM_ITLB_MISS 0x400fc
/* /*
* Raw event encoding for POWER8: * Raw event encoding for POWER8:
...@@ -557,6 +588,8 @@ static int power8_generic_events[] = { ...@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL, [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL, [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
}; };
static u64 power8_bhrb_filter_map(u64 branch_sample_type) static u64 power8_bhrb_filter_map(u64 branch_sample_type)
...@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter) ...@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter)); mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
} }
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
[ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_PREF,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
[ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L2_ST,
[ C(RESULT_MISS) ] = PM_L2_ST_MISS,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_BRU_FIN,
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
#undef C
static struct power_pmu power8_pmu = { static struct power_pmu power8_pmu = {
.name = "POWER8", .name = "POWER8",
.n_counter = 6, .n_counter = 6,
...@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = { ...@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
.n_generic = ARRAY_SIZE(power8_generic_events), .n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events, .generic_events = power8_generic_events,
.cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups, .attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32, .bhrb_nr = 32,
}; };
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/memblock.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev ...@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
return; return;
pe = &phb->ioda.pe_array[pdn->pe_number]; pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table); set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
} }
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
struct pci_dev *pdev, u64 dma_mask)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
uint64_t top;
bool bypass = false;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
return -ENODEV;;
pe = &phb->ioda.pe_array[pdn->pe_number];
if (pe->tce_bypass_enabled) {
top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
bypass = (dma_mask >= top);
}
if (bypass) {
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
set_dma_ops(&pdev->dev, &dma_direct_ops);
set_dma_offset(&pdev->dev, pe->tce_bypass_base);
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
return 0;
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
{ {
struct pci_dev *dev; struct pci_dev *dev;
...@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, ...@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
} }
static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
{
struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
tce32_table);
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
if (enable) {
phys_addr_t top = memblock_end_of_DRAM();
top = roundup_pow_of_two(top);
rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
pe->pe_number,
window_id,
pe->tce_bypass_base,
top);
} else {
rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
pe->pe_number,
window_id,
pe->tce_bypass_base,
0);
/*
* We might want to reset the DMA ops of all devices on
* this PE. However in theory, that shouldn't be necessary
* as this is used for VFIO/KVM pass-through and the device
* hasn't yet been returned to its kernel driver
*/
}
if (rc)
pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
else
pe->tce_bypass_enabled = enable;
}
static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe)
{
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
/* Install set_bypass callback for VFIO */
pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
/* Enable bypass by default */
pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
}
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe) struct pnv_ioda_pe *pe)
{ {
...@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, ...@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
else else
pnv_ioda_setup_bus_dma(pe, pe->pbus); pnv_ioda_setup_bus_dma(pe, pe->pbus);
/* Also create a bypass window */
pnv_pci_ioda2_setup_bypass_pe(phb, pe);
return; return;
fail: fail:
if (pe->tce32_seg >= 0) if (pe->tce32_seg >= 0)
...@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, ...@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */ /* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
/* Setup shutdown function for kexec */ /* Setup shutdown function for kexec */
phb->shutdown = pnv_pci_ioda_shutdown; phb->shutdown = pnv_pci_ioda_shutdown;
......
...@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) ...@@ -634,6 +634,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev); pnv_pci_dma_fallback_setup(hose, pdev);
} }
int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
if (phb && phb->dma_set_mask)
return phb->dma_set_mask(phb, pdev, dma_mask);
return __dma_set_mask(&pdev->dev, dma_mask);
}
void pnv_pci_shutdown(void) void pnv_pci_shutdown(void)
{ {
struct pci_controller *hose; struct pci_controller *hose;
......
...@@ -54,7 +54,9 @@ struct pnv_ioda_pe { ...@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
struct iommu_table tce32_table; struct iommu_table tce32_table;
phys_addr_t tce_inval_reg_phys; phys_addr_t tce_inval_reg_phys;
/* XXX TODO: Add support for additional 64-bit iommus */ /* 64-bit TCE bypass region */
bool tce_bypass_enabled;
uint64_t tce_bypass_base;
/* MSIs. MVE index is identical for for 32 and 64 bit MSI /* MSIs. MVE index is identical for for 32 and 64 bit MSI
* and -1 if not supported. (It's actually identical to the * and -1 if not supported. (It's actually identical to the
...@@ -113,6 +115,8 @@ struct pnv_phb { ...@@ -113,6 +115,8 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq, unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg); unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
u64 dma_mask);
void (*fixup_phb)(struct pci_controller *hose); void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb); void (*shutdown)(struct pnv_phb *phb);
......
...@@ -7,12 +7,20 @@ extern void pnv_smp_init(void); ...@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
static inline void pnv_smp_init(void) { } static inline void pnv_smp_init(void) { }
#endif #endif
struct pci_dev;
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern void pnv_pci_init(void); extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void); extern void pnv_pci_shutdown(void);
extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
#else #else
static inline void pnv_pci_init(void) { } static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { } static inline void pnv_pci_shutdown(void) { }
static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
{
return -ENODEV;
}
#endif #endif
extern void pnv_lpc_init(void); extern void pnv_lpc_init(void);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/pci.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/firmware.h> #include <asm/firmware.h>
...@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex) ...@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex)
{ {
} }
static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (dev_is_pci(dev))
return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
return __dma_set_mask(dev, dma_mask);
}
static void pnv_shutdown(void) static void pnv_shutdown(void)
{ {
/* Let the PCI code clear up IODA tables */ /* Let the PCI code clear up IODA tables */
...@@ -238,6 +246,7 @@ define_machine(powernv) { ...@@ -238,6 +246,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown, .machine_shutdown = pnv_shutdown,
.power_save = powernv_idle, .power_save = powernv_idle,
.calibrate_decr = generic_calibrate_decr, .calibrate_decr = generic_calibrate_decr,
.dma_set_mask = pnv_dma_set_mask,
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down, .kexec_cpu_down = pnv_kexec_cpu_down,
#endif #endif
......
...@@ -20,6 +20,7 @@ config PPC_PSERIES ...@@ -20,6 +20,7 @@ config PPC_PSERIES
select PPC_DOORBELL select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP select HOTPLUG_CPU if SMP
select ARCH_RANDOM
default y default y
config PPC_SPLPAR config PPC_SPLPAR
......
...@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image) ...@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
{ {
long rc; long rc;
if (firmware_has_feature(FW_FEATURE_SET_MODE) && if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
(image->type != KEXEC_TYPE_CRASH)) {
rc = pSeries_disable_reloc_on_exc(); rc = pSeries_disable_reloc_on_exc();
if (rc != H_SUCCESS) if (rc != H_SUCCESS)
pr_warning("Warning: Failed to disable relocation on " pr_warning("Warning: Failed to disable relocation on "
......
...@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) ...@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
/* Default: read HW settings */ /* Default: read HW settings */
if (flow_type == IRQ_TYPE_DEFAULT) { if (flow_type == IRQ_TYPE_DEFAULT) {
switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | int vold_ps;
MPIC_INFO(VECPRI_SENSE_MASK))) {
case MPIC_INFO(VECPRI_SENSE_EDGE) | vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE): MPIC_INFO(VECPRI_SENSE_MASK));
if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
flow_type = IRQ_TYPE_EDGE_RISING; flow_type = IRQ_TYPE_EDGE_RISING;
break; else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
case MPIC_INFO(VECPRI_SENSE_EDGE) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
flow_type = IRQ_TYPE_EDGE_FALLING; flow_type = IRQ_TYPE_EDGE_FALLING;
break; else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
case MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
MPIC_INFO(VECPRI_POLARITY_POSITIVE):
flow_type = IRQ_TYPE_LEVEL_HIGH; flow_type = IRQ_TYPE_LEVEL_HIGH;
break; else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
case MPIC_INFO(VECPRI_SENSE_LEVEL) | MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
flow_type = IRQ_TYPE_LEVEL_LOW; flow_type = IRQ_TYPE_LEVEL_LOW;
break; else
} WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
} }
/* Apply to irq desc */ /* Apply to irq desc */
......
...@@ -309,16 +309,23 @@ static void get_output_lock(void) ...@@ -309,16 +309,23 @@ static void get_output_lock(void)
if (xmon_speaker == me) if (xmon_speaker == me)
return; return;
for (;;) { for (;;) {
if (xmon_speaker == 0) {
last_speaker = cmpxchg(&xmon_speaker, 0, me); last_speaker = cmpxchg(&xmon_speaker, 0, me);
if (last_speaker == 0) if (last_speaker == 0)
return; return;
}
timeout = 10000000; /*
* Wait a full second for the lock, we might be on a slow
* console, but check every 100us.
*/
timeout = 10000;
while (xmon_speaker == last_speaker) { while (xmon_speaker == last_speaker) {
if (--timeout > 0) if (--timeout > 0) {
udelay(100);
continue; continue;
}
/* hostile takeover */ /* hostile takeover */
prev = cmpxchg(&xmon_speaker, last_speaker, me); prev = cmpxchg(&xmon_speaker, last_speaker, me);
if (prev == last_speaker) if (prev == last_speaker)
...@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi) ...@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
} }
xmon_fault_jmp[cpu] = recurse_jmp; xmon_fault_jmp[cpu] = recurse_jmp;
cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL; bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
...@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi) ...@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
release_output_lock(); release_output_lock();
} }
cpumask_set_cpu(cpu, &cpus_in_xmon);
waiting: waiting:
secondary = 1; secondary = 1;
while (secondary && !xmon_gate) { while (secondary && !xmon_gate) {
......
...@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size( ...@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry); return sl->entry_nr * sizeof(struct nx842_slentry);
} }
static inline unsigned long nx842_get_pa(void *addr)
{
if (is_vmalloc_addr(addr))
return page_to_phys(vmalloc_to_page(addr))
+ offset_in_page(addr);
else
return __pa(addr);
}
static int nx842_build_scatterlist(unsigned long buf, int len, static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl) struct nx842_scatterlist *sl)
{ {
...@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len, ...@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries; entry = sl->entries;
while (len) { while (len) {
entry->ptr = __pa(buf); entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE); nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) { if (nextpage < buf + len) {
/* we aren't at the end yet */ /* we aren't at the end yet */
...@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen, ...@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS; op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb; csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb)); memset(csbcpb, 0, sizeof(*csbcpb));
op.csbcpb = __pa(csbcpb); op.csbcpb = nx842_get_pa(csbcpb);
op.out = __pa(slout.entries); op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) { for (i = 0; i < hdr->blocks_nr; i++) {
/* /*
...@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen, ...@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/ */
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */ /* Create direct DDE */
op.in = __pa(inbuf); op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size; op.inlen = max_sync_size;
} else { } else {
/* Create indirect DDE (scatterlist) */ /* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin); nx842_build_scatterlist(inbuf, max_sync_size, &slin);
op.in = __pa(slin.entries); op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin); op.inlen = -nx842_get_scatterlist_size(&slin);
} }
...@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, ...@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS; op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb; csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb)); memset(csbcpb, 0, sizeof(*csbcpb));
op.csbcpb = __pa(csbcpb); op.csbcpb = nx842_get_pa(csbcpb);
/* /*
* max_sync_size may have changed since compression, * max_sync_size may have changed since compression,
...@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, ...@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) == if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) { ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */ /* Create direct DDE */
op.in = __pa(inbuf); op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i]; op.inlen = hdr->sizes[i];
} else { } else {
/* Create indirect DDE (scatterlist) */ /* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin); nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
op.in = __pa(slin.entries); op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin); op.inlen = -nx842_get_scatterlist_size(&slin);
} }
...@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen, ...@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/ */
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) { if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */ /* Create direct DDE */
op.out = __pa(outbuf); op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size; op.outlen = max_sync_size;
} else { } else {
/* Create indirect DDE (scatterlist) */ /* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout); nx842_build_scatterlist(outbuf, max_sync_size, &slout);
op.out = __pa(slout.entries); op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout); op.outlen = -nx842_get_scatterlist_size(&slout);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment