Commit 48901165 authored by Dan Williams's avatar Dan Williams

Merge branch 'for-4.6/pfn' into libnvdimm-for-next

parents 59e64739 ff8e92d5
...@@ -176,13 +176,13 @@ static struct resource mem_res[] = { ...@@ -176,13 +176,13 @@ static struct resource mem_res[] = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM .flags = IORESOURCE_SYSTEM_RAM
}, },
{ {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM .flags = IORESOURCE_SYSTEM_RAM
} }
}; };
...@@ -851,7 +851,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) ...@@ -851,7 +851,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
res->name = "System RAM"; res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
......
...@@ -53,8 +53,8 @@ static void s3c_pm_run_res(struct resource *ptr, run_fn_t fn, u32 *arg) ...@@ -53,8 +53,8 @@ static void s3c_pm_run_res(struct resource *ptr, run_fn_t fn, u32 *arg)
if (ptr->child != NULL) if (ptr->child != NULL)
s3c_pm_run_res(ptr->child, fn, arg); s3c_pm_run_res(ptr->child, fn, arg);
if ((ptr->flags & IORESOURCE_MEM) && if ((ptr->flags & IORESOURCE_SYSTEM_RAM)
strcmp(ptr->name, "System RAM") == 0) { == IORESOURCE_SYSTEM_RAM) {
S3C_PMDBG("Found system RAM at %08lx..%08lx\n", S3C_PMDBG("Found system RAM at %08lx..%08lx\n",
(unsigned long)ptr->start, (unsigned long)ptr->start,
(unsigned long)ptr->end); (unsigned long)ptr->end);
......
...@@ -73,13 +73,13 @@ static struct resource mem_res[] = { ...@@ -73,13 +73,13 @@ static struct resource mem_res[] = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM .flags = IORESOURCE_SYSTEM_RAM
}, },
{ {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM .flags = IORESOURCE_SYSTEM_RAM
} }
}; };
...@@ -210,7 +210,7 @@ static void __init request_standard_resources(void) ...@@ -210,7 +210,7 @@ static void __init request_standard_resources(void)
res->name = "System RAM"; res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
......
...@@ -49,13 +49,13 @@ static struct resource __initdata kernel_data = { ...@@ -49,13 +49,13 @@ static struct resource __initdata kernel_data = {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_SYSTEM_RAM,
}; };
static struct resource __initdata kernel_code = { static struct resource __initdata kernel_code = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_SYSTEM_RAM,
.sibling = &kernel_data, .sibling = &kernel_data,
}; };
...@@ -134,7 +134,7 @@ add_physical_memory(resource_size_t start, resource_size_t end) ...@@ -134,7 +134,7 @@ add_physical_memory(resource_size_t start, resource_size_t end)
new->start = start; new->start = start;
new->end = end; new->end = end;
new->name = "System RAM"; new->name = "System RAM";
new->flags = IORESOURCE_MEM; new->flags = IORESOURCE_SYSTEM_RAM;
*pprev = new; *pprev = new;
} }
......
...@@ -1178,7 +1178,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, ...@@ -1178,7 +1178,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
efi_memory_desc_t *md; efi_memory_desc_t *md;
u64 efi_desc_size; u64 efi_desc_size;
char *name; char *name;
unsigned long flags; unsigned long flags, desc;
efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_start = __va(ia64_boot_param->efi_memmap);
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
...@@ -1193,6 +1193,8 @@ efi_initialize_iomem_resources(struct resource *code_resource, ...@@ -1193,6 +1193,8 @@ efi_initialize_iomem_resources(struct resource *code_resource,
continue; continue;
flags = IORESOURCE_MEM | IORESOURCE_BUSY; flags = IORESOURCE_MEM | IORESOURCE_BUSY;
desc = IORES_DESC_NONE;
switch (md->type) { switch (md->type) {
case EFI_MEMORY_MAPPED_IO: case EFI_MEMORY_MAPPED_IO:
...@@ -1207,14 +1209,17 @@ efi_initialize_iomem_resources(struct resource *code_resource, ...@@ -1207,14 +1209,17 @@ efi_initialize_iomem_resources(struct resource *code_resource,
if (md->attribute & EFI_MEMORY_WP) { if (md->attribute & EFI_MEMORY_WP) {
name = "System ROM"; name = "System ROM";
flags |= IORESOURCE_READONLY; flags |= IORESOURCE_READONLY;
} else if (md->attribute == EFI_MEMORY_UC) } else if (md->attribute == EFI_MEMORY_UC) {
name = "Uncached RAM"; name = "Uncached RAM";
else } else {
name = "System RAM"; name = "System RAM";
flags |= IORESOURCE_SYSRAM;
}
break; break;
case EFI_ACPI_MEMORY_NVS: case EFI_ACPI_MEMORY_NVS:
name = "ACPI Non-volatile Storage"; name = "ACPI Non-volatile Storage";
desc = IORES_DESC_ACPI_NV_STORAGE;
break; break;
case EFI_UNUSABLE_MEMORY: case EFI_UNUSABLE_MEMORY:
...@@ -1224,6 +1229,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, ...@@ -1224,6 +1229,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
case EFI_PERSISTENT_MEMORY: case EFI_PERSISTENT_MEMORY:
name = "Persistent Memory"; name = "Persistent Memory";
desc = IORES_DESC_PERSISTENT_MEMORY;
break; break;
case EFI_RESERVED_TYPE: case EFI_RESERVED_TYPE:
...@@ -1246,6 +1252,7 @@ efi_initialize_iomem_resources(struct resource *code_resource, ...@@ -1246,6 +1252,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
res->start = md->phys_addr; res->start = md->phys_addr;
res->end = md->phys_addr + efi_md_size(md) - 1; res->end = md->phys_addr + efi_md_size(md) - 1;
res->flags = flags; res->flags = flags;
res->desc = desc;
if (insert_resource(&iomem_resource, res) < 0) if (insert_resource(&iomem_resource, res) < 0)
kfree(res); kfree(res);
......
...@@ -80,17 +80,17 @@ unsigned long vga_console_membase; ...@@ -80,17 +80,17 @@ unsigned long vga_console_membase;
static struct resource data_resource = { static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource bss_resource = { static struct resource bss_resource = {
.name = "Kernel bss", .name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
unsigned long ia64_max_cacheline_size; unsigned long ia64_max_cacheline_size;
......
...@@ -70,14 +70,14 @@ static struct resource data_resource = { ...@@ -70,14 +70,14 @@ static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
unsigned long memory_start; unsigned long memory_start;
......
...@@ -732,21 +732,23 @@ static void __init resource_init(void) ...@@ -732,21 +732,23 @@ static void __init resource_init(void)
end = HIGHMEM_START - 1; end = HIGHMEM_START - 1;
res = alloc_bootmem(sizeof(struct resource)); res = alloc_bootmem(sizeof(struct resource));
res->start = start;
res->end = end;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
switch (boot_mem_map.map[i].type) { switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM: case BOOT_MEM_RAM:
case BOOT_MEM_INIT_RAM: case BOOT_MEM_INIT_RAM:
case BOOT_MEM_ROM_DATA: case BOOT_MEM_ROM_DATA:
res->name = "System RAM"; res->name = "System RAM";
res->flags |= IORESOURCE_SYSRAM;
break; break;
case BOOT_MEM_RESERVED: case BOOT_MEM_RESERVED:
default: default:
res->name = "reserved"; res->name = "reserved";
} }
res->start = start;
res->end = end;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
/* /*
......
...@@ -55,12 +55,12 @@ signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; ...@@ -55,12 +55,12 @@ signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
static struct resource data_resource = { static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource pdcdata_resource = { static struct resource pdcdata_resource = {
...@@ -201,7 +201,7 @@ static void __init setup_bootmem(void) ...@@ -201,7 +201,7 @@ static void __init setup_bootmem(void)
res->name = "System RAM"; res->name = "System RAM";
res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
} }
......
...@@ -541,7 +541,7 @@ static int __init add_system_ram_resources(void) ...@@ -541,7 +541,7 @@ static int __init add_system_ram_resources(void)
res->name = "System RAM"; res->name = "System RAM";
res->start = base; res->start = base;
res->end = base + size - 1; res->end = base + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
WARN_ON(request_resource(&iomem_resource, res) < 0); WARN_ON(request_resource(&iomem_resource, res) < 0);
} }
} }
......
...@@ -374,17 +374,17 @@ static void __init setup_lowcore(void) ...@@ -374,17 +374,17 @@ static void __init setup_lowcore(void)
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource data_resource = { static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource bss_resource = { static struct resource bss_resource = {
.name = "Kernel bss", .name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource __initdata *standard_resources[] = { static struct resource __initdata *standard_resources[] = {
...@@ -408,7 +408,7 @@ static void __init setup_resources(void) ...@@ -408,7 +408,7 @@ static void __init setup_resources(void)
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
res = alloc_bootmem_low(sizeof(*res)); res = alloc_bootmem_low(sizeof(*res));
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM"; res->name = "System RAM";
res->start = reg->base; res->start = reg->base;
......
...@@ -101,7 +101,7 @@ static void __init resource_init(void) ...@@ -101,7 +101,7 @@ static void __init resource_init(void)
res->name = "System RAM"; res->name = "System RAM";
res->start = MEMORY_START; res->start = MEMORY_START;
res->end = MEMORY_START + MEMORY_SIZE - 1; res->end = MEMORY_START + MEMORY_SIZE - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
request_resource(res, &code_resource); request_resource(res, &code_resource);
......
...@@ -78,17 +78,17 @@ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, }; ...@@ -78,17 +78,17 @@ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource data_resource = { static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
static struct resource bss_resource = { static struct resource bss_resource = {
.name = "Kernel bss", .name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM, .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
}; };
unsigned long memory_start; unsigned long memory_start;
...@@ -202,7 +202,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, ...@@ -202,7 +202,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
res->name = "System RAM"; res->name = "System RAM";
res->start = start; res->start = start;
res->end = end - 1; res->end = end - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res)) { if (request_resource(&iomem_resource, res)) {
pr_err("unable to request memory_resource 0x%lx 0x%lx\n", pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
......
...@@ -2863,17 +2863,17 @@ void hugetlb_setup(struct pt_regs *regs) ...@@ -2863,17 +2863,17 @@ void hugetlb_setup(struct pt_regs *regs)
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource data_resource = { static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource bss_resource = { static struct resource bss_resource = {
.name = "Kernel bss", .name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static inline resource_size_t compute_kern_paddr(void *addr) static inline resource_size_t compute_kern_paddr(void *addr)
...@@ -2909,7 +2909,7 @@ static int __init report_memory(void) ...@@ -2909,7 +2909,7 @@ static int __init report_memory(void)
res->name = "System RAM"; res->name = "System RAM";
res->start = pavail[i].phys_addr; res->start = pavail[i].phys_addr;
res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
if (insert_resource(&iomem_resource, res) < 0) { if (insert_resource(&iomem_resource, res) < 0) {
pr_warn("Resource insertion failed.\n"); pr_warn("Resource insertion failed.\n");
......
...@@ -1632,14 +1632,14 @@ static struct resource data_resource = { ...@@ -1632,14 +1632,14 @@ static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
/* /*
...@@ -1673,10 +1673,15 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved) ...@@ -1673,10 +1673,15 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
kzalloc(sizeof(struct resource), GFP_ATOMIC); kzalloc(sizeof(struct resource), GFP_ATOMIC);
if (!res) if (!res)
return NULL; return NULL;
res->name = reserved ? "Reserved" : "System RAM";
res->start = start_pfn << PAGE_SHIFT; res->start = start_pfn << PAGE_SHIFT;
res->end = (end_pfn << PAGE_SHIFT) - 1; res->end = (end_pfn << PAGE_SHIFT) - 1;
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
if (reserved) {
res->name = "Reserved";
} else {
res->name = "System RAM";
res->flags |= IORESOURCE_SYSRAM;
}
if (insert_resource(&iomem_resource, res)) { if (insert_resource(&iomem_resource, res)) {
kfree(res); kfree(res);
return NULL; return NULL;
......
...@@ -72,13 +72,13 @@ static struct resource mem_res[] = { ...@@ -72,13 +72,13 @@ static struct resource mem_res[] = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM .flags = IORESOURCE_SYSTEM_RAM
}, },
{ {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_MEM .flags = IORESOURCE_SYSTEM_RAM
} }
}; };
...@@ -211,7 +211,7 @@ request_standard_resources(struct meminfo *mi) ...@@ -211,7 +211,7 @@ request_standard_resources(struct meminfo *mi)
res->name = "System RAM"; res->name = "System RAM";
res->start = mi->bank[i].start; res->start = mi->bank[i].start;
res->end = mi->bank[i].start + mi->bank[i].size - 1; res->end = mi->bank[i].start + mi->bank[i].size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res); request_resource(&iomem_resource, res);
......
...@@ -57,10 +57,9 @@ struct crash_elf_data { ...@@ -57,10 +57,9 @@ struct crash_elf_data {
struct kimage *image; struct kimage *image;
/* /*
* Total number of ram ranges we have after various adjustments for * Total number of ram ranges we have after various adjustments for
* GART, crash reserved region etc. * crash reserved region, etc.
*/ */
unsigned int max_nr_ranges; unsigned int max_nr_ranges;
unsigned long gart_start, gart_end;
/* Pointer to elf header */ /* Pointer to elf header */
void *ehdr; void *ehdr;
...@@ -201,17 +200,6 @@ static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) ...@@ -201,17 +200,6 @@ static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
return 0; return 0;
} }
static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
{
struct crash_elf_data *ced = arg;
ced->gart_start = start;
ced->gart_end = end;
/* Not expecting more than 1 gart aperture */
return 1;
}
/* Gather all the required information to prepare elf headers for ram regions */ /* Gather all the required information to prepare elf headers for ram regions */
static void fill_up_crash_elf_data(struct crash_elf_data *ced, static void fill_up_crash_elf_data(struct crash_elf_data *ced,
...@@ -226,22 +214,6 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, ...@@ -226,22 +214,6 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->max_nr_ranges = nr_ranges; ced->max_nr_ranges = nr_ranges;
/*
* We don't create ELF headers for GART aperture as an attempt
* to dump this memory in second kernel leads to hang/crash.
* If gart aperture is present, one needs to exclude that region
* and that could lead to need of extra phdr.
*/
walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
ced, get_gart_ranges_callback);
/*
* If we have gart region, excluding that could potentially split
* a memory range, resulting in extra header. Account for that.
*/
if (ced->gart_end)
ced->max_nr_ranges++;
/* Exclusion of crash region could split memory ranges */ /* Exclusion of crash region could split memory ranges */
ced->max_nr_ranges++; ced->max_nr_ranges++;
...@@ -350,13 +322,6 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced, ...@@ -350,13 +322,6 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
return ret; return ret;
} }
/* Exclude GART region */
if (ced->gart_end) {
ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
if (ret)
return ret;
}
return ret; return ret;
} }
...@@ -599,12 +564,12 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) ...@@ -599,12 +564,12 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
/* Add ACPI tables */ /* Add ACPI tables */
cmd.type = E820_ACPI; cmd.type = E820_ACPI;
flags = IORESOURCE_MEM | IORESOURCE_BUSY; flags = IORESOURCE_MEM | IORESOURCE_BUSY;
walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd, walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
memmap_entry_callback); memmap_entry_callback);
/* Add ACPI Non-volatile Storage */ /* Add ACPI Non-volatile Storage */
cmd.type = E820_NVS; cmd.type = E820_NVS;
walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd, walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
memmap_entry_callback); memmap_entry_callback);
/* Add crashk_low_res region */ /* Add crashk_low_res region */
......
...@@ -925,6 +925,41 @@ static const char *e820_type_to_string(int e820_type) ...@@ -925,6 +925,41 @@ static const char *e820_type_to_string(int e820_type)
} }
} }
static unsigned long e820_type_to_iomem_type(int e820_type)
{
switch (e820_type) {
case E820_RESERVED_KERN:
case E820_RAM:
return IORESOURCE_SYSTEM_RAM;
case E820_ACPI:
case E820_NVS:
case E820_UNUSABLE:
case E820_PRAM:
case E820_PMEM:
default:
return IORESOURCE_MEM;
}
}
static unsigned long e820_type_to_iores_desc(int e820_type)
{
switch (e820_type) {
case E820_ACPI:
return IORES_DESC_ACPI_TABLES;
case E820_NVS:
return IORES_DESC_ACPI_NV_STORAGE;
case E820_PMEM:
return IORES_DESC_PERSISTENT_MEMORY;
case E820_PRAM:
return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
case E820_RESERVED_KERN:
case E820_RAM:
case E820_UNUSABLE:
default:
return IORES_DESC_NONE;
}
}
static bool do_mark_busy(u32 type, struct resource *res) static bool do_mark_busy(u32 type, struct resource *res)
{ {
/* this is the legacy bios/dos rom-shadow + mmio region */ /* this is the legacy bios/dos rom-shadow + mmio region */
...@@ -967,7 +1002,8 @@ void __init e820_reserve_resources(void) ...@@ -967,7 +1002,8 @@ void __init e820_reserve_resources(void)
res->start = e820.map[i].addr; res->start = e820.map[i].addr;
res->end = end; res->end = end;
res->flags = IORESOURCE_MEM; res->flags = e820_type_to_iomem_type(e820.map[i].type);
res->desc = e820_type_to_iores_desc(e820.map[i].type);
/* /*
* don't register the region that could be conflicted with * don't register the region that could be conflicted with
......
...@@ -13,11 +13,11 @@ static int found(u64 start, u64 end, void *data) ...@@ -13,11 +13,11 @@ static int found(u64 start, u64 end, void *data)
static __init int register_e820_pmem(void) static __init int register_e820_pmem(void)
{ {
char *pmem = "Persistent Memory (legacy)";
struct platform_device *pdev; struct platform_device *pdev;
int rc; int rc;
rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found); rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
IORESOURCE_MEM, 0, -1, NULL, found);
if (rc <= 0) if (rc <= 0)
return 0; return 0;
......
...@@ -152,21 +152,21 @@ static struct resource data_resource = { ...@@ -152,21 +152,21 @@ static struct resource data_resource = {
.name = "Kernel data", .name = "Kernel data",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource code_resource = { static struct resource code_resource = {
.name = "Kernel code", .name = "Kernel code",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
static struct resource bss_resource = { static struct resource bss_resource = {
.name = "Kernel bss", .name = "Kernel bss",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
}; };
......
...@@ -62,7 +62,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) ...@@ -62,7 +62,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
if (count < 0) { if (count < 0) {
return NULL; return NULL;
} else if (count > 0) { } else if (count > 0) {
resources = kmalloc(count * sizeof(struct resource), resources = kzalloc(count * sizeof(struct resource),
GFP_KERNEL); GFP_KERNEL);
if (!resources) { if (!resources) {
dev_err(&adev->dev, "No memory for resources\n"); dev_err(&adev->dev, "No memory for resources\n");
......
...@@ -519,7 +519,7 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, ...@@ -519,7 +519,7 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4) u64 param3, u64 param4)
{ {
int rc; int rc;
unsigned long pfn; u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */ /* If user manually set "flags", make sure it is legal */
if (flags && (flags & if (flags && (flags &
...@@ -545,10 +545,17 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, ...@@ -545,10 +545,17 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
/* /*
* Disallow crazy address masks that give BIOS leeway to pick * Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or * injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM. * better granularity and that target address is normal RAM or
* NVDIMM.
*/ */
pfn = PFN_DOWN(param1 & param2); base_addr = param1 & param2;
if (!page_is_ram(pfn) || ((param2 & PAGE_MASK) != PAGE_MASK)) size = ~param2 + 1;
if (((param2 & PAGE_MASK) != PAGE_MASK) ||
((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
!= REGION_INTERSECTS) &&
(region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
!= REGION_INTERSECTS)))
return -EINVAL; return -EINVAL;
inject: inject:
......
...@@ -1658,6 +1658,48 @@ static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, ...@@ -1658,6 +1658,48 @@ static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
return 0; return 0;
} }
static void acpi_nfit_remove_resource(void *data)
{
struct resource *res = data;
remove_resource(res);
}
static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
struct nd_region_desc *ndr_desc)
{
struct resource *res, *nd_res = ndr_desc->res;
int is_pmem, ret;
/* No operation if the region is already registered as PMEM */
is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
if (is_pmem == REGION_INTERSECTS)
return 0;
res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
res->name = "Persistent Memory";
res->start = nd_res->start;
res->end = nd_res->end;
res->flags = IORESOURCE_MEM;
res->desc = IORES_DESC_PERSISTENT_MEMORY;
ret = insert_resource(&iomem_resource, res);
if (ret)
return ret;
ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
if (ret) {
remove_resource(res);
return ret;
}
return 0;
}
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
struct acpi_nfit_memory_map *memdev, struct acpi_nfit_memory_map *memdev,
...@@ -1773,6 +1815,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, ...@@ -1773,6 +1815,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
nvdimm_bus = acpi_desc->nvdimm_bus; nvdimm_bus = acpi_desc->nvdimm_bus;
if (nfit_spa_type(spa) == NFIT_SPA_PM) { if (nfit_spa_type(spa) == NFIT_SPA_PM) {
rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
if (rc) {
dev_warn(acpi_desc->dev,
"failed to insert pmem resource to iomem: %d\n",
rc);
goto out;
}
nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
ndr_desc); ndr_desc);
if (!nfit_spa->nd_region) if (!nfit_spa->nd_region)
......
...@@ -31,8 +31,6 @@ struct nd_blk_device { ...@@ -31,8 +31,6 @@ struct nd_blk_device {
u32 internal_lbasize; u32 internal_lbasize;
}; };
static int nd_blk_major;
static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev) static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
{ {
return blk_dev->nsblk->lbasize - blk_dev->sector_size; return blk_dev->nsblk->lbasize - blk_dev->sector_size;
...@@ -264,7 +262,6 @@ static int nd_blk_attach_disk(struct nd_namespace_common *ndns, ...@@ -264,7 +262,6 @@ static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
} }
disk->driverfs_dev = &ndns->dev; disk->driverfs_dev = &ndns->dev;
disk->major = nd_blk_major;
disk->first_minor = 0; disk->first_minor = 0;
disk->fops = &nd_blk_fops; disk->fops = &nd_blk_fops;
disk->private_data = blk_dev; disk->private_data = blk_dev;
...@@ -358,25 +355,12 @@ static struct nd_device_driver nd_blk_driver = { ...@@ -358,25 +355,12 @@ static struct nd_device_driver nd_blk_driver = {
static int __init nd_blk_init(void) static int __init nd_blk_init(void)
{ {
int rc; return nd_driver_register(&nd_blk_driver);
rc = register_blkdev(0, "nd_blk");
if (rc < 0)
return rc;
nd_blk_major = rc;
rc = nd_driver_register(&nd_blk_driver);
if (rc < 0)
unregister_blkdev(nd_blk_major, "nd_blk");
return rc;
} }
static void __exit nd_blk_exit(void) static void __exit nd_blk_exit(void)
{ {
driver_unregister(&nd_blk_driver.drv); driver_unregister(&nd_blk_driver.drv);
unregister_blkdev(nd_blk_major, "nd_blk");
} }
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
......
...@@ -31,8 +31,6 @@ enum log_ent_request { ...@@ -31,8 +31,6 @@ enum log_ent_request {
LOG_OLD_ENT LOG_OLD_ENT
}; };
static int btt_major;
static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
void *buf, size_t n) void *buf, size_t n)
{ {
...@@ -1246,7 +1244,6 @@ static int btt_blk_init(struct btt *btt) ...@@ -1246,7 +1244,6 @@ static int btt_blk_init(struct btt *btt)
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
btt->btt_disk->driverfs_dev = &btt->nd_btt->dev; btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
btt->btt_disk->major = btt_major;
btt->btt_disk->first_minor = 0; btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops; btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt; btt->btt_disk->private_data = btt;
...@@ -1423,22 +1420,11 @@ EXPORT_SYMBOL(nvdimm_namespace_detach_btt); ...@@ -1423,22 +1420,11 @@ EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
static int __init nd_btt_init(void) static int __init nd_btt_init(void)
{ {
int rc; int rc = 0;
btt_major = register_blkdev(0, "btt");
if (btt_major < 0)
return btt_major;
debugfs_root = debugfs_create_dir("btt", NULL); debugfs_root = debugfs_create_dir("btt", NULL);
if (IS_ERR_OR_NULL(debugfs_root)) { if (IS_ERR_OR_NULL(debugfs_root))
rc = -ENXIO; rc = -ENXIO;
goto err_debugfs;
}
return 0;
err_debugfs:
unregister_blkdev(btt_major, "btt");
return rc; return rc;
} }
...@@ -1446,7 +1432,6 @@ static int __init nd_btt_init(void) ...@@ -1446,7 +1432,6 @@ static int __init nd_btt_init(void)
static void __exit nd_btt_exit(void) static void __exit nd_btt_exit(void)
{ {
debugfs_remove_recursive(debugfs_root); debugfs_remove_recursive(debugfs_root);
unregister_blkdev(btt_major, "btt");
} }
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
......
...@@ -55,7 +55,7 @@ static int e820_pmem_probe(struct platform_device *pdev) ...@@ -55,7 +55,7 @@ static int e820_pmem_probe(struct platform_device *pdev)
for (p = iomem_resource.child; p ; p = p->sibling) { for (p = iomem_resource.child; p ; p = p->sibling) {
struct nd_region_desc ndr_desc; struct nd_region_desc ndr_desc;
if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0) if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY)
continue; continue;
memset(&ndr_desc, 0, sizeof(ndr_desc)); memset(&ndr_desc, 0, sizeof(ndr_desc));
......
...@@ -133,6 +133,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid) ...@@ -133,6 +133,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
bool pmem_should_map_pages(struct device *dev) bool pmem_should_map_pages(struct device *dev)
{ {
struct nd_region *nd_region = to_nd_region(dev->parent); struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
return false; return false;
...@@ -143,6 +144,12 @@ bool pmem_should_map_pages(struct device *dev) ...@@ -143,6 +144,12 @@ bool pmem_should_map_pages(struct device *dev)
if (is_nd_pfn(dev) || is_nd_btt(dev)) if (is_nd_pfn(dev) || is_nd_btt(dev))
return false; return false;
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED)
return false;
#ifdef ARCH_MEMREMAP_PMEM #ifdef ARCH_MEMREMAP_PMEM
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
#else #else
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define __NVDIMM_PFN_H #define __NVDIMM_PFN_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/mmzone.h>
#define PFN_SIG_LEN 16 #define PFN_SIG_LEN 16
#define PFN_SIG "NVDIMM_PFN_INFO\0" #define PFN_SIG "NVDIMM_PFN_INFO\0"
...@@ -26,10 +27,28 @@ struct nd_pfn_sb { ...@@ -26,10 +27,28 @@ struct nd_pfn_sb {
__le32 flags; __le32 flags;
__le16 version_major; __le16 version_major;
__le16 version_minor; __le16 version_minor;
__le64 dataoff; __le64 dataoff; /* relative to namespace_base + start_pad */
__le64 npfns; __le64 npfns;
__le32 mode; __le32 mode;
u8 padding[4012]; /* minor-version-1 additions for section alignment */
__le32 start_pad;
__le32 end_trunc;
u8 padding[4004];
__le64 checksum; __le64 checksum;
}; };
#ifdef CONFIG_SPARSEMEM
#define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x)
#define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x)
#else
/*
* In this case ZONE_DEVICE=n and we will disable 'pfn' device support,
* but we still want pmem to compile.
*/
#define PFN_SECTION_ALIGN_DOWN(x) (x)
#define PFN_SECTION_ALIGN_UP(x) (x)
#endif
#define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x)))
#define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x)))
#endif /* __NVDIMM_PFN_H */ #endif /* __NVDIMM_PFN_H */
...@@ -205,11 +205,67 @@ static ssize_t namespace_store(struct device *dev, ...@@ -205,11 +205,67 @@ static ssize_t namespace_store(struct device *dev,
} }
static DEVICE_ATTR_RW(namespace); static DEVICE_ATTR_RW(namespace);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
ssize_t rc;
device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
+ start_pad + offset);
} else {
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(resource);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
ssize_t rc;
device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
rc = sprintf(buf, "%llu\n", (unsigned long long)
resource_size(&nsio->res) - start_pad
- end_trunc - offset);
} else {
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(size);
static struct attribute *nd_pfn_attributes[] = { static struct attribute *nd_pfn_attributes[] = {
&dev_attr_mode.attr, &dev_attr_mode.attr,
&dev_attr_namespace.attr, &dev_attr_namespace.attr,
&dev_attr_uuid.attr, &dev_attr_uuid.attr,
&dev_attr_align.attr, &dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_size.attr,
NULL, NULL,
}; };
...@@ -299,6 +355,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) ...@@ -299,6 +355,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0) if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
return -ENODEV; return -ENODEV;
if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
pfn_sb->start_pad = 0;
pfn_sb->end_trunc = 0;
}
switch (le32_to_cpu(pfn_sb->mode)) { switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM: case PFN_MODE_RAM:
case PFN_MODE_PMEM: case PFN_MODE_PMEM:
......
...@@ -43,12 +43,13 @@ struct pmem_device { ...@@ -43,12 +43,13 @@ struct pmem_device {
phys_addr_t data_offset; phys_addr_t data_offset;
u64 pfn_flags; u64 pfn_flags;
void __pmem *virt_addr; void __pmem *virt_addr;
/* immutable base size of the namespace */
size_t size; size_t size;
/* trim size when namespace capacity has been section aligned */
u32 pfn_pad;
struct badblocks bb; struct badblocks bb;
}; };
static int pmem_major;
static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
{ {
if (bb->count) { if (bb->count) {
...@@ -175,7 +176,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector, ...@@ -175,7 +176,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector,
*kaddr = pmem->virt_addr + offset; *kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
return pmem->size - offset; return pmem->size - pmem->pfn_pad - offset;
} }
static const struct block_device_operations pmem_fops = { static const struct block_device_operations pmem_fops = {
...@@ -258,15 +259,14 @@ static int pmem_attach_disk(struct device *dev, ...@@ -258,15 +259,14 @@ static int pmem_attach_disk(struct device *dev,
return -ENOMEM; return -ENOMEM;
} }
disk->major = pmem_major;
disk->first_minor = 0;
disk->fops = &pmem_fops; disk->fops = &pmem_fops;
disk->private_data = pmem; disk->private_data = pmem;
disk->queue = pmem->pmem_queue; disk->queue = pmem->pmem_queue;
disk->flags = GENHD_FL_EXT_DEVT; disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name); nvdimm_namespace_disk_name(ndns, disk->disk_name);
disk->driverfs_dev = dev; disk->driverfs_dev = dev;
set_capacity(disk, (pmem->size - pmem->data_offset) / 512); set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
pmem->pmem_disk = disk; pmem->pmem_disk = disk;
devm_exit_badblocks(dev, &pmem->bb); devm_exit_badblocks(dev, &pmem->bb);
if (devm_init_badblocks(dev, &pmem->bb)) if (devm_init_badblocks(dev, &pmem->bb))
...@@ -309,6 +309,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) ...@@ -309,6 +309,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev); struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = 0, end_trunc = 0;
resource_size_t start, size;
struct nd_namespace_io *nsio;
struct nd_region *nd_region; struct nd_region *nd_region;
unsigned long npfns; unsigned long npfns;
phys_addr_t offset; phys_addr_t offset;
...@@ -334,21 +337,56 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) ...@@ -334,21 +337,56 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
} }
memset(pfn_sb, 0, sizeof(*pfn_sb)); memset(pfn_sb, 0, sizeof(*pfn_sb));
npfns = (pmem->size - SZ_8K) / SZ_4K;
/*
* Check if pmem collides with 'System RAM' when section aligned and
* trim it accordingly
*/
nsio = to_nd_namespace_io(&ndns->dev);
start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
size = resource_size(&nsio->res);
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED) {
start = nsio->res.start;
start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
}
start = nsio->res.start;
size = PHYS_SECTION_ALIGN_UP(start + size) - start;
if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
IORES_DESC_NONE) == REGION_MIXED) {
size = resource_size(&nsio->res);
end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
}
if (start_pad + end_trunc)
dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
dev_name(&ndns->dev), start_pad + end_trunc);
/* /*
* Note, we use 64 here for the standard size of struct page, * Note, we use 64 here for the standard size of struct page,
* debugging options may cause it to be larger in which case the * debugging options may cause it to be larger in which case the
* implementation will limit the pfns advertised through * implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap. * ->direct_access() to those that are included in the memmap.
*/ */
start += start_pad;
npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
if (nd_pfn->mode == PFN_MODE_PMEM) if (nd_pfn->mode == PFN_MODE_PMEM)
offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align); offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
- start;
else if (nd_pfn->mode == PFN_MODE_RAM) else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(SZ_8K, nd_pfn->align); offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
else else
goto err; goto err;
npfns = (pmem->size - offset) / SZ_4K; if (offset + start_pad + end_trunc >= pmem->size) {
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
dev_name(&ndns->dev));
goto err;
}
npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
pfn_sb->mode = cpu_to_le32(nd_pfn->mode); pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset); pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns); pfn_sb->npfns = cpu_to_le64(npfns);
...@@ -356,6 +394,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) ...@@ -356,6 +394,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1); pfn_sb->version_major = cpu_to_le16(1);
pfn_sb->version_minor = cpu_to_le16(1);
pfn_sb->start_pad = cpu_to_le32(start_pad);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum); pfn_sb->checksum = cpu_to_le64(checksum);
...@@ -386,41 +427,56 @@ static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns) ...@@ -386,41 +427,56 @@ static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
return 0; return 0;
} }
static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) /*
* We hotplug memory at section granularity, pad the reserved area from
* the previous section base to the namespace base address.
*/
static unsigned long init_altmap_base(resource_size_t base)
{
unsigned long base_pfn = PHYS_PFN(base);
return PFN_SECTION_ALIGN_DOWN(base_pfn);
}
static unsigned long init_altmap_reserve(resource_size_t base)
{
unsigned long reserve = PHYS_PFN(SZ_8K);
unsigned long base_pfn = PHYS_PFN(base);
reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
return reserve;
}
static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
{ {
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
struct device *dev = &nd_pfn->dev;
struct nd_region *nd_region;
struct vmem_altmap *altmap;
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
struct request_queue *q;
phys_addr_t offset;
int rc; int rc;
struct resource res;
struct request_queue *q;
struct pmem_device *pmem;
struct vmem_altmap *altmap;
struct device *dev = &nd_pfn->dev;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t base = nsio->res.start + start_pad;
struct vmem_altmap __altmap = { struct vmem_altmap __altmap = {
.base_pfn = __phys_to_pfn(nsio->res.start), .base_pfn = init_altmap_base(base),
.reserve = __phys_to_pfn(SZ_8K), .reserve = init_altmap_reserve(base),
}; };
if (!nd_pfn->uuid || !nd_pfn->ndns) pmem = dev_get_drvdata(dev);
return -ENODEV; pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
pmem->pfn_pad = start_pad + end_trunc;
nd_region = to_nd_region(dev->parent);
rc = nd_pfn_init(nd_pfn);
if (rc)
return rc;
pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
if (nd_pfn->mode == PFN_MODE_RAM) { if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < SZ_8K) if (pmem->data_offset < SZ_8K)
return -EINVAL; return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
altmap = NULL; altmap = NULL;
} else if (nd_pfn->mode == PFN_MODE_PMEM) { } else if (nd_pfn->mode == PFN_MODE_PMEM) {
nd_pfn->npfns = (resource_size(&nsio->res) - offset) nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ PAGE_SIZE; / PAGE_SIZE;
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev, dev_info(&nd_pfn->dev,
...@@ -428,7 +484,7 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -428,7 +484,7 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
le64_to_cpu(nd_pfn->pfn_sb->npfns), le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns); nd_pfn->npfns);
altmap = & __altmap; altmap = & __altmap;
altmap->free = __phys_to_pfn(offset - SZ_8K); altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
altmap->alloc = 0; altmap->alloc = 0;
} else { } else {
rc = -ENXIO; rc = -ENXIO;
...@@ -436,10 +492,12 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -436,10 +492,12 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
} }
/* establish pfn range for lookup, and switch to direct map */ /* establish pfn range for lookup, and switch to direct map */
pmem = dev_get_drvdata(dev);
q = pmem->pmem_queue; q = pmem->pmem_queue;
memcpy(&res, &nsio->res, sizeof(res));
res.start += start_pad;
res.end -= end_trunc;
devm_memunmap(dev, (void __force *) pmem->virt_addr); devm_memunmap(dev, (void __force *) pmem->virt_addr);
pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res, pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
&q->q_usage_counter, altmap); &q->q_usage_counter, altmap);
pmem->pfn_flags |= PFN_MAP; pmem->pfn_flags |= PFN_MAP;
if (IS_ERR(pmem->virt_addr)) { if (IS_ERR(pmem->virt_addr)) {
...@@ -448,7 +506,6 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -448,7 +506,6 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
} }
/* attach pmem disk in "pfn-mode" */ /* attach pmem disk in "pfn-mode" */
pmem->data_offset = offset;
rc = pmem_attach_disk(dev, ndns, pmem); rc = pmem_attach_disk(dev, ndns, pmem);
if (rc) if (rc)
goto err; goto err;
...@@ -457,6 +514,22 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) ...@@ -457,6 +514,22 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
err: err:
nvdimm_namespace_detach_pfn(ndns); nvdimm_namespace_detach_pfn(ndns);
return rc; return rc;
}
static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
{
struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
int rc;
if (!nd_pfn->uuid || !nd_pfn->ndns)
return -ENODEV;
rc = nd_pfn_init(nd_pfn);
if (rc)
return rc;
/* we need a valid pfn_sb before we can init a vmem_altmap */
return __nvdimm_namespace_attach_pfn(nd_pfn);
} }
static int nd_pmem_probe(struct device *dev) static int nd_pmem_probe(struct device *dev)
...@@ -547,26 +620,13 @@ static struct nd_device_driver nd_pmem_driver = { ...@@ -547,26 +620,13 @@ static struct nd_device_driver nd_pmem_driver = {
static int __init pmem_init(void) static int __init pmem_init(void)
{ {
int error; return nd_driver_register(&nd_pmem_driver);
pmem_major = register_blkdev(0, "pmem");
if (pmem_major < 0)
return pmem_major;
error = nd_driver_register(&nd_pmem_driver);
if (error) {
unregister_blkdev(pmem_major, "pmem");
return error;
}
return 0;
} }
module_init(pmem_init); module_init(pmem_init);
static void pmem_exit(void) static void pmem_exit(void)
{ {
driver_unregister(&nd_pmem_driver.drv); driver_unregister(&nd_pmem_driver.drv);
unregister_blkdev(pmem_major, "pmem");
} }
module_exit(pmem_exit); module_exit(pmem_exit);
......
...@@ -91,7 +91,7 @@ static int configure_memory(const unsigned char *buf, ...@@ -91,7 +91,7 @@ static int configure_memory(const unsigned char *buf,
for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) { for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) {
c = get_8(buf+len); c = get_8(buf+len);
if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) { if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
int result; int result;
res->name = name; res->name = name;
...@@ -183,7 +183,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent, ...@@ -183,7 +183,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
for (i=0;i<HPEE_PORT_MAX_ENT;i++) { for (i=0;i<HPEE_PORT_MAX_ENT;i++) {
c = get_8(buf+len); c = get_8(buf+len);
if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) { if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
res->name = board; res->name = board;
res->start = get_16(buf+len+1); res->start = get_16(buf+len+1);
res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1; res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
......
...@@ -117,7 +117,7 @@ int rio_request_inb_mbox(struct rio_mport *mport, ...@@ -117,7 +117,7 @@ int rio_request_inb_mbox(struct rio_mport *mport,
if (mport->ops->open_inb_mbox == NULL) if (mport->ops->open_inb_mbox == NULL)
goto out; goto out;
res = kmalloc(sizeof(struct resource), GFP_KERNEL); res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) { if (res) {
rio_init_mbox_res(res, mbox, mbox); rio_init_mbox_res(res, mbox, mbox);
...@@ -185,7 +185,7 @@ int rio_request_outb_mbox(struct rio_mport *mport, ...@@ -185,7 +185,7 @@ int rio_request_outb_mbox(struct rio_mport *mport,
if (mport->ops->open_outb_mbox == NULL) if (mport->ops->open_outb_mbox == NULL)
goto out; goto out;
res = kmalloc(sizeof(struct resource), GFP_KERNEL); res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) { if (res) {
rio_init_mbox_res(res, mbox, mbox); rio_init_mbox_res(res, mbox, mbox);
...@@ -285,7 +285,7 @@ int rio_request_inb_dbell(struct rio_mport *mport, ...@@ -285,7 +285,7 @@ int rio_request_inb_dbell(struct rio_mport *mport,
{ {
int rc = 0; int rc = 0;
struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) { if (res) {
rio_init_dbell_res(res, start, end); rio_init_dbell_res(res, start, end);
...@@ -360,7 +360,7 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) ...@@ -360,7 +360,7 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start,
u16 end) u16 end)
{ {
struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) { if (res) {
rio_init_dbell_res(res, start, end); rio_init_dbell_res(res, start, end);
......
...@@ -66,7 +66,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev, ...@@ -66,7 +66,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev,
superhyway_read_vcr(dev, base, &dev->vcr); superhyway_read_vcr(dev, base, &dev->vcr);
if (!dev->resource) { if (!dev->resource) {
dev->resource = kmalloc(sizeof(struct resource), GFP_KERNEL); dev->resource = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!dev->resource) { if (!dev->resource) {
kfree(dev); kfree(dev);
return -ENOMEM; return -ENOMEM;
......
...@@ -257,7 +257,7 @@ static struct resource *additional_memory_resource(phys_addr_t size) ...@@ -257,7 +257,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL; return NULL;
res->name = "System RAM"; res->name = "System RAM";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res, ret = allocate_resource(&iomem_resource, res,
size, 0, -1, size, 0, -1,
......
...@@ -20,6 +20,7 @@ struct resource { ...@@ -20,6 +20,7 @@ struct resource {
resource_size_t end; resource_size_t end;
const char *name; const char *name;
unsigned long flags; unsigned long flags;
unsigned long desc;
struct resource *parent, *sibling, *child; struct resource *parent, *sibling, *child;
}; };
...@@ -49,12 +50,19 @@ struct resource { ...@@ -49,12 +50,19 @@ struct resource {
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ #define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */ #define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ #define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000 #define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
/* I/O resource extended types */
#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
/* PnP IRQ specific bits (IORESOURCE_BITS) */ /* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0) #define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1) #define IORESOURCE_IRQ_LOWEDGE (1<<1)
...@@ -105,6 +113,22 @@ struct resource { ...@@ -105,6 +113,22 @@ struct resource {
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
/*
* I/O Resource Descriptors
*
* Descriptors are used by walk_iomem_res_desc() and region_intersects()
* for searching a specific resource range in the iomem table. Assign
* a new descriptor when a resource range supports the search interfaces.
* Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
*/
enum {
IORES_DESC_NONE = 0,
IORES_DESC_CRASH_KERNEL = 1,
IORES_DESC_ACPI_TABLES = 2,
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
};
/* helpers to define resources */ /* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
...@@ -113,6 +137,7 @@ struct resource { ...@@ -113,6 +137,7 @@ struct resource {
.end = (_start) + (_size) - 1, \ .end = (_start) + (_size) - 1, \
.name = (_name), \ .name = (_name), \
.flags = (_flags), \ .flags = (_flags), \
.desc = IORES_DESC_NONE, \
} }
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \ #define DEFINE_RES_IO_NAMED(_start, _size, _name) \
...@@ -149,6 +174,7 @@ extern void reserve_region_with_split(struct resource *root, ...@@ -149,6 +174,7 @@ extern void reserve_region_with_split(struct resource *root,
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new); extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail); extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new, extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min, resource_size_t size, resource_size_t min,
...@@ -170,6 +196,10 @@ static inline unsigned long resource_type(const struct resource *res) ...@@ -170,6 +196,10 @@ static inline unsigned long resource_type(const struct resource *res)
{ {
return res->flags & IORESOURCE_TYPE_BITS; return res->flags & IORESOURCE_TYPE_BITS;
} }
static inline unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */ /* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2) static inline bool resource_contains(struct resource *r1, struct resource *r2)
{ {
...@@ -239,8 +269,8 @@ extern int ...@@ -239,8 +269,8 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg, walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(u64, u64, void *)); int (*func)(u64, u64, void *));
extern int extern int
walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg, walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
int (*func)(u64, u64, void *)); void *arg, int (*func)(u64, u64, void *));
/* True if any part of r1 overlaps r2 */ /* True if any part of r1 overlaps r2 */
static inline bool resource_overlaps(struct resource *r1, struct resource *r2) static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
......
...@@ -387,7 +387,8 @@ enum { ...@@ -387,7 +387,8 @@ enum {
REGION_MIXED, REGION_MIXED,
}; };
int region_intersects(resource_size_t offset, size_t size, const char *type); int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
unsigned long desc);
/* Support for virtually mapped pages */ /* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr); struct page *vmalloc_to_page(const void *addr);
......
...@@ -66,13 +66,15 @@ struct resource crashk_res = { ...@@ -66,13 +66,15 @@ struct resource crashk_res = {
.name = "Crash kernel", .name = "Crash kernel",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
.desc = IORES_DESC_CRASH_KERNEL
}; };
struct resource crashk_low_res = { struct resource crashk_low_res = {
.name = "Crash kernel", .name = "Crash kernel",
.start = 0, .start = 0,
.end = 0, .end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
.desc = IORES_DESC_CRASH_KERNEL
}; };
int kexec_should_crash(struct task_struct *p) int kexec_should_crash(struct task_struct *p)
...@@ -959,7 +961,7 @@ int crash_shrink_memory(unsigned long new_size) ...@@ -959,7 +961,7 @@ int crash_shrink_memory(unsigned long new_size)
ram_res->start = end; ram_res->start = end;
ram_res->end = crashk_res.end; ram_res->end = crashk_res.end;
ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
ram_res->name = "System RAM"; ram_res->name = "System RAM";
crashk_res.end = end - 1; crashk_res.end = end - 1;
......
...@@ -524,10 +524,10 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, ...@@ -524,10 +524,10 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
/* Walk the RAM ranges and allocate a suitable range for the buffer */ /* Walk the RAM ranges and allocate a suitable range for the buffer */
if (image->type == KEXEC_TYPE_CRASH) if (image->type == KEXEC_TYPE_CRASH)
ret = walk_iomem_res("Crash kernel", ret = walk_iomem_res_desc(crashk_res.desc,
IORESOURCE_MEM | IORESOURCE_BUSY, IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
crashk_res.start, crashk_res.end, kbuf, crashk_res.start, crashk_res.end, kbuf,
locate_mem_hole_callback); locate_mem_hole_callback);
else else
ret = walk_system_ram_res(0, -1, kbuf, ret = walk_system_ram_res(0, -1, kbuf,
locate_mem_hole_callback); locate_mem_hole_callback);
......
...@@ -47,7 +47,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size) ...@@ -47,7 +47,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
* being mapped does not have i/o side effects and the __iomem * being mapped does not have i/o side effects and the __iomem
* annotation is not applicable. * annotation is not applicable.
* *
* MEMREMAP_WB - matches the default mapping for "System RAM" on * MEMREMAP_WB - matches the default mapping for System RAM on
* the architecture. This is usually a read-allocate write-back cache. * the architecture. This is usually a read-allocate write-back cache.
* Morever, if MEMREMAP_WB is specified and the requested remap region is RAM * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
* memremap() will bypass establishing a new mapping and instead return * memremap() will bypass establishing a new mapping and instead return
...@@ -56,11 +56,12 @@ static void *try_ram_remap(resource_size_t offset, size_t size) ...@@ -56,11 +56,12 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
* MEMREMAP_WT - establish a mapping whereby writes either bypass the * MEMREMAP_WT - establish a mapping whereby writes either bypass the
* cache or are written through to memory and never exist in a * cache or are written through to memory and never exist in a
* cache-dirty state with respect to program visibility. Attempts to * cache-dirty state with respect to program visibility. Attempts to
* map "System RAM" with this mapping type will fail. * map System RAM with this mapping type will fail.
*/ */
void *memremap(resource_size_t offset, size_t size, unsigned long flags) void *memremap(resource_size_t offset, size_t size, unsigned long flags)
{ {
int is_ram = region_intersects(offset, size, "System RAM"); int is_ram = region_intersects(offset, size,
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
void *addr = NULL; void *addr = NULL;
if (is_ram == REGION_MIXED) { if (is_ram == REGION_MIXED) {
...@@ -76,7 +77,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags) ...@@ -76,7 +77,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
* MEMREMAP_WB is special in that it can be satisifed * MEMREMAP_WB is special in that it can be satisifed
* from the direct map. Some archs depend on the * from the direct map. Some archs depend on the
* capability of memremap() to autodetect cases where * capability of memremap() to autodetect cases where
* the requested range is potentially in "System RAM" * the requested range is potentially in System RAM.
*/ */
if (is_ram == REGION_INTERSECTS) if (is_ram == REGION_INTERSECTS)
addr = try_ram_remap(offset, size); addr = try_ram_remap(offset, size);
...@@ -88,7 +89,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags) ...@@ -88,7 +89,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
* If we don't have a mapping yet and more request flags are * If we don't have a mapping yet and more request flags are
* pending then we will be attempting to establish a new virtual * pending then we will be attempting to establish a new virtual
* address mapping. Enforce that this mapping is not aliasing * address mapping. Enforce that this mapping is not aliasing
* "System RAM" * System RAM.
*/ */
if (!addr && is_ram == REGION_INTERSECTS && flags) { if (!addr && is_ram == REGION_INTERSECTS && flags) {
WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
...@@ -271,7 +272,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, ...@@ -271,7 +272,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
struct percpu_ref *ref, struct vmem_altmap *altmap) struct percpu_ref *ref, struct vmem_altmap *altmap)
{ {
int is_ram = region_intersects(res->start, resource_size(res), int is_ram = region_intersects(res->start, resource_size(res),
"System RAM"); IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
resource_size_t key, align_start, align_size, align_end; resource_size_t key, align_start, align_size, align_end;
struct dev_pagemap *pgmap; struct dev_pagemap *pgmap;
struct page_map *page_map; struct page_map *page_map;
......
This diff is collapsed.
...@@ -138,7 +138,7 @@ static struct resource *register_memory_resource(u64 start, u64 size) ...@@ -138,7 +138,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
res->name = "System RAM"; res->name = "System RAM";
res->start = start; res->start = start;
res->end = start + size - 1; res->end = start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) { if (request_resource(&iomem_resource, res) < 0) {
pr_debug("System RAM resource %pR cannot be added\n", res); pr_debug("System RAM resource %pR cannot be added\n", res);
kfree(res); kfree(res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment