Commit 0ddbbb89 authored by Bjorn Helgaas's avatar Bjorn Helgaas Committed by Michael Ellerman

powerpc: Fix typos

Fix typos, most reported by "codespell arch/powerpc".  Only touches
comments, no code changes.
Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240103231605.1801364-8-helgaas@kernel.org
parent 39434af1
...@@ -108,8 +108,8 @@ DTC_FLAGS ?= -p 1024 ...@@ -108,8 +108,8 @@ DTC_FLAGS ?= -p 1024
# these files into the build dir, fix up any includes and ensure that dependent # these files into the build dir, fix up any includes and ensure that dependent
# files are copied in the right order. # files are copied in the right order.
# these need to be seperate variables because they are copied out of different # these need to be separate variables because they are copied out of different
# directories in the kernel tree. Sure you COULd merge them, but it's a # directories in the kernel tree. Sure you COULD merge them, but it's a
# cure-is-worse-than-disease situation. # cure-is-worse-than-disease situation.
zlib-decomp-$(CONFIG_KERNEL_GZIP) := decompress_inflate.c zlib-decomp-$(CONFIG_KERNEL_GZIP) := decompress_inflate.c
zlib-$(CONFIG_KERNEL_GZIP) := inffast.c inflate.c inftrees.c zlib-$(CONFIG_KERNEL_GZIP) := inffast.c inflate.c inftrees.c
......
...@@ -172,7 +172,7 @@ ieee1588@ef602800 { ...@@ -172,7 +172,7 @@ ieee1588@ef602800 {
reg = <0xef602800 0x60>; reg = <0xef602800 0x60>;
interrupt-parent = <&UIC0>; interrupt-parent = <&UIC0>;
interrupts = <0x4 0x4>; interrupts = <0x4 0x4>;
/* This thing is a bit weird. It has it's own UIC /* This thing is a bit weird. It has its own UIC
* that it uses to generate snapshot triggers. We * that it uses to generate snapshot triggers. We
* don't really support this device yet, and it needs * don't really support this device yet, and it needs
* work to figure this out. * work to figure this out.
......
...@@ -188,7 +188,7 @@ static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { } ...@@ -188,7 +188,7 @@ static inline void prep_esm_blob(struct addr_range vmlinux, void *chosen) { }
/* A buffer that may be edited by tools operating on a zImage binary so as to /* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs). * edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier. * The buffer is put in its own section so that tools may locate it easier.
*/ */
static char cmdline[BOOT_COMMAND_LINE_SIZE] static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline"))); __attribute__((__section__("__builtin_cmdline")));
......
...@@ -25,7 +25,7 @@ BSS_STACK(4096); ...@@ -25,7 +25,7 @@ BSS_STACK(4096);
/* A buffer that may be edited by tools operating on a zImage binary so as to /* A buffer that may be edited by tools operating on a zImage binary so as to
* edit the command line passed to vmlinux (by setting /chosen/bootargs). * edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier. * The buffer is put in its own section so that tools may locate it easier.
*/ */
static char cmdline[BOOT_COMMAND_LINE_SIZE] static char cmdline[BOOT_COMMAND_LINE_SIZE]
......
...@@ -982,7 +982,7 @@ static inline phys_addr_t page_to_phys(struct page *page) ...@@ -982,7 +982,7 @@ static inline phys_addr_t page_to_phys(struct page *page)
} }
/* /*
* 32 bits still uses virt_to_bus() for it's implementation of DMA * 32 bits still uses virt_to_bus() for its implementation of DMA
* mappings se we have to keep it defined here. We also have some old * mappings se we have to keep it defined here. We also have some old
* drivers (shame shame shame) that use bus_to_virt() and haven't been * drivers (shame shame shame) that use bus_to_virt() and haven't been
* fixed yet so I need to define it here. * fixed yet so I need to define it here.
......
...@@ -1027,10 +1027,10 @@ struct opal_i2c_request { ...@@ -1027,10 +1027,10 @@ struct opal_i2c_request {
* The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
* with individual elements being 16 bits wide to fetch the system * with individual elements being 16 bits wide to fetch the system
* wide EPOW status. Each element in the buffer will contain the * wide EPOW status. Each element in the buffer will contain the
* EPOW status in it's bit representation for a particular EPOW sub * EPOW status in its bit representation for a particular EPOW sub
* class as defined here. So multiple detailed EPOW status bits * class as defined here. So multiple detailed EPOW status bits
* specific for any sub class can be represented in a single buffer * specific for any sub class can be represented in a single buffer
* element as it's bit representation. * element as its bit representation.
*/ */
/* System EPOW type */ /* System EPOW type */
......
...@@ -192,7 +192,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node, ...@@ -192,7 +192,7 @@ static inline long pmac_call_feature(int selector, struct device_node* node,
/* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value) /* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value)
* enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive * enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
* it's reset line * its reset line
*/ */
#define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6) #define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6)
......
...@@ -144,7 +144,7 @@ ...@@ -144,7 +144,7 @@
#define UNI_N_HWINIT_STATE_SLEEPING 0x01 #define UNI_N_HWINIT_STATE_SLEEPING 0x01
#define UNI_N_HWINIT_STATE_RUNNING 0x02 #define UNI_N_HWINIT_STATE_RUNNING 0x02
/* This last bit appear to be used by the bootROM to know the second /* This last bit appear to be used by the bootROM to know the second
* CPU has started and will enter it's sleep loop with IP=0 * CPU has started and will enter its sleep loop with IP=0
*/ */
#define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000 #define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000
......
...@@ -108,7 +108,7 @@ typedef struct boot_infos ...@@ -108,7 +108,7 @@ typedef struct boot_infos
/* ALL BELOW NEW (vers. 4) */ /* ALL BELOW NEW (vers. 4) */
/* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag /* This defines the physical memory. Valid with BOOT_ARCH_NUBUS flag
(non-PCI) only. On PCI, memory is contiguous and it's size is in the (non-PCI) only. On PCI, memory is contiguous and its size is in the
device-tree. */ device-tree. */
boot_info_map_entry_t boot_info_map_entry_t
physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */ physMemoryMap[MAX_MEM_MAP_SIZE]; /* Where the phys memory is */
......
...@@ -527,7 +527,7 @@ EXPORT_SYMBOL_GPL(eeh_pe_state_mark); ...@@ -527,7 +527,7 @@ EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
* eeh_pe_mark_isolated * eeh_pe_mark_isolated
* @pe: EEH PE * @pe: EEH PE
* *
* Record that a PE has been isolated by marking the PE and it's children as * Record that a PE has been isolated by marking the PE and its children as
* EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
* as pci_channel_io_frozen. * as pci_channel_io_frozen.
*/ */
......
...@@ -681,7 +681,7 @@ void crash_fadump(struct pt_regs *regs, const char *str) ...@@ -681,7 +681,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
* old_cpu == -1 means this is the first CPU which has come here, * old_cpu == -1 means this is the first CPU which has come here,
* go ahead and trigger fadump. * go ahead and trigger fadump.
* *
* old_cpu != -1 means some other CPU has already on it's way * old_cpu != -1 means some other CPU has already on its way
* to trigger fadump, just keep looping here. * to trigger fadump, just keep looping here.
*/ */
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
......
...@@ -192,7 +192,7 @@ _GLOBAL(scom970_read) ...@@ -192,7 +192,7 @@ _GLOBAL(scom970_read)
xori r0,r0,MSR_EE xori r0,r0,MSR_EE
mtmsrd r0,1 mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits /* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits
* (including parity). On current CPUs they must be 0'd, * (including parity). On current CPUs they must be 0'd,
* and finally or in RW bit * and finally or in RW bit
*/ */
...@@ -226,7 +226,7 @@ _GLOBAL(scom970_write) ...@@ -226,7 +226,7 @@ _GLOBAL(scom970_write)
xori r0,r0,MSR_EE xori r0,r0,MSR_EE
mtmsrd r0,1 mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits /* rotate 24 bits SCOM address 8 bits left and mask out its low 8 bits
* (including parity). On current CPUs they must be 0'd. * (including parity). On current CPUs they must be 0'd.
*/ */
......
...@@ -1661,7 +1661,7 @@ void arch_setup_new_exec(void) ...@@ -1661,7 +1661,7 @@ void arch_setup_new_exec(void)
* cases will happen: * cases will happen:
* *
* 1. The correct thread is running, the wrong thread is not * 1. The correct thread is running, the wrong thread is not
* In this situation, the correct thread is woken and proceeds to pass it's * In this situation, the correct thread is woken and proceeds to pass its
* condition check. * condition check.
* *
* 2. Neither threads are running * 2. Neither threads are running
...@@ -1671,15 +1671,15 @@ void arch_setup_new_exec(void) ...@@ -1671,15 +1671,15 @@ void arch_setup_new_exec(void)
* for the wrong thread, or they will execute the condition check immediately. * for the wrong thread, or they will execute the condition check immediately.
* *
* 3. The wrong thread is running, the correct thread is not * 3. The wrong thread is running, the correct thread is not
* The wrong thread will be woken, but will fail it's condition check and * The wrong thread will be woken, but will fail its condition check and
* re-execute wait. The correct thread, when scheduled, will execute either * re-execute wait. The correct thread, when scheduled, will execute either
* it's condition check (which will pass), or wait, which returns immediately * its condition check (which will pass), or wait, which returns immediately
* when called the first time after the thread is scheduled, followed by it's * when called the first time after the thread is scheduled, followed by its
* condition check (which will pass). * condition check (which will pass).
* *
* 4. Both threads are running * 4. Both threads are running
* Both threads will be woken. The wrong thread will fail it's condition check * Both threads will be woken. The wrong thread will fail its condition check
* and execute another wait, while the correct thread will pass it's condition * and execute another wait, while the correct thread will pass its condition
* check. * check.
* *
* @t: the task to set the thread ID for * @t: the task to set the thread ID for
......
...@@ -12,7 +12,7 @@ void flush_tmregs_to_thread(struct task_struct *tsk) ...@@ -12,7 +12,7 @@ void flush_tmregs_to_thread(struct task_struct *tsk)
{ {
/* /*
* If task is not current, it will have been flushed already to * If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to(). * its thread_struct during __switch_to().
* *
* A reclaim flushes ALL the state or if not in TM save TM SPRs * A reclaim flushes ALL the state or if not in TM save TM SPRs
* in the appropriate thread structures from live. * in the appropriate thread structures from live.
......
...@@ -1567,7 +1567,7 @@ static void add_cpu_to_masks(int cpu) ...@@ -1567,7 +1567,7 @@ static void add_cpu_to_masks(int cpu)
/* /*
* This CPU will not be in the online mask yet so we need to manually * This CPU will not be in the online mask yet so we need to manually
* add it to it's own thread sibling mask. * add it to its own thread sibling mask.
*/ */
map_cpu_to_node(cpu, cpu_to_node(cpu)); map_cpu_to_node(cpu, cpu_to_node(cpu));
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
......
...@@ -139,7 +139,7 @@ static unsigned long dscr_default; ...@@ -139,7 +139,7 @@ static unsigned long dscr_default;
* @val: Returned cpu specific DSCR default value * @val: Returned cpu specific DSCR default value
* *
* This function returns the per cpu DSCR default value * This function returns the per cpu DSCR default value
* for any cpu which is contained in it's PACA structure. * for any cpu which is contained in its PACA structure.
*/ */
static void read_dscr(void *val) static void read_dscr(void *val)
{ {
...@@ -152,7 +152,7 @@ static void read_dscr(void *val) ...@@ -152,7 +152,7 @@ static void read_dscr(void *val)
* @val: New cpu specific DSCR default value to update * @val: New cpu specific DSCR default value to update
* *
* This function updates the per cpu DSCR default value * This function updates the per cpu DSCR default value
* for any cpu which is contained in it's PACA structure. * for any cpu which is contained in its PACA structure.
*/ */
static void write_dscr(void *val) static void write_dscr(void *val)
{ {
......
...@@ -531,7 +531,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) ...@@ -531,7 +531,7 @@ static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
xc->cppr = xive_prio_from_guest(new_cppr); xc->cppr = xive_prio_from_guest(new_cppr);
/* /*
* IPIs are synthetized from MFRR and thus don't need * IPIs are synthesized from MFRR and thus don't need
* any special EOI handling. The underlying interrupt * any special EOI handling. The underlying interrupt
* used to signal MFRR changes is EOId when fetched from * used to signal MFRR changes is EOId when fetched from
* the queue. * the queue.
......
...@@ -78,7 +78,7 @@ EXPORT_SYMBOL(flush_icache_range); ...@@ -78,7 +78,7 @@ EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/** /**
* flush_dcache_icache_phys() - Flush a page by it's physical address * flush_dcache_icache_phys() - Flush a page by its physical address
* @physaddr: the physical address of the page * @physaddr: the physical address of the page
*/ */
static void flush_dcache_icache_phys(unsigned long physaddr) static void flush_dcache_icache_phys(unsigned long physaddr)
......
...@@ -376,7 +376,7 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size) ...@@ -376,7 +376,7 @@ notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
create_kaslr_tlb_entry(1, tlb_virt, tlb_phys); create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
} }
/* Copy the kernel to it's new location and run */ /* Copy the kernel to its new location and run */
memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz); memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz); flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
......
...@@ -279,7 +279,7 @@ static void __init mpc512x_setup_diu(void) ...@@ -279,7 +279,7 @@ static void __init mpc512x_setup_diu(void)
* and so negatively affect boot time. Instead we reserve the * and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be * already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and * destroyed. The starting address of the area to reserve and
* also it's length is passed to memblock_reserve(). It will be * also its length is passed to memblock_reserve(). It will be
* freed later on first open of fbdev, when splash image is not * freed later on first open of fbdev, when splash image is not
* needed any more. * needed any more.
*/ */
......
...@@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) ...@@ -868,7 +868,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
} }
/** /**
* spu_deactivate - unbind a context from it's physical spu * spu_deactivate - unbind a context from its physical spu
* @ctx: spu context to unbind * @ctx: spu context to unbind
* *
* Unbind @ctx from the physical spu it is running on and schedule * Unbind @ctx from the physical spu it is running on and schedule
......
...@@ -595,7 +595,7 @@ void __init maple_pci_init(void) ...@@ -595,7 +595,7 @@ void __init maple_pci_init(void)
/* Probe root PCI hosts, that is on U3 the AGP host and the /* Probe root PCI hosts, that is on U3 the AGP host and the
* HyperTransport host. That one is actually "kept" around * HyperTransport host. That one is actually "kept" around
* and actually added last as it's resource management relies * and actually added last as its resource management relies
* on the AGP resources to have been setup first * on the AGP resources to have been setup first
*/ */
root = of_find_node_by_path("/"); root = of_find_node_by_path("/");
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Support for the interrupt controllers found on Power Macintosh, * Support for the interrupt controllers found on Power Macintosh,
* currently Apple's "Grand Central" interrupt controller in all * currently Apple's "Grand Central" interrupt controller in all
* it's incarnations. OpenPIC support used on newer machines is * its incarnations. OpenPIC support used on newer machines is
* in a separate file * in a separate file
* *
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org) * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
......
...@@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) ...@@ -176,7 +176,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
* memory location containing the PC to resume from * memory location containing the PC to resume from
* at address 0. * at address 0.
* - On Core99, we must store the wakeup vector at * - On Core99, we must store the wakeup vector at
* address 0x80 and eventually it's parameters * address 0x80 and eventually its parameters
* at address 0x84. I've have some trouble with those * at address 0x84. I've have some trouble with those
* parameters however and I no longer use them. * parameters however and I no longer use them.
*/ */
......
...@@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) ...@@ -238,7 +238,7 @@ void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
} else if (pdev->is_physfn) { } else if (pdev->is_physfn) {
/* /*
* For PFs adjust their allocated IOV resources to match what * For PFs adjust their allocated IOV resources to match what
* the PHB can support using it's M64 BAR table. * the PHB can support using its M64 BAR table.
*/ */
pnv_pci_ioda_fixup_iov_resources(pdev); pnv_pci_ioda_fixup_iov_resources(pdev);
} }
...@@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) ...@@ -658,7 +658,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
list_add_tail(&pe->list, &phb->ioda.pe_list); list_add_tail(&pe->list, &phb->ioda.pe_list);
mutex_unlock(&phb->ioda.pe_list_mutex); mutex_unlock(&phb->ioda.pe_list_mutex);
/* associate this pe to it's pdn */ /* associate this pe to its pdn */
list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
if (vf_pdn->busno == vf_bus && if (vf_pdn->busno == vf_bus &&
vf_pdn->devfn == vf_devfn) { vf_pdn->devfn == vf_devfn) {
......
...@@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, ...@@ -1059,7 +1059,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
} }
} else { } else {
/* /*
* Interrupt hanlder or fault window setup failed. Means * Interrupt handler or fault window setup failed. Means
* NX can not generate fault for page fault. So not * NX can not generate fault for page fault. So not
* opening for user space tx window. * opening for user space tx window.
*/ */
......
...@@ -228,7 +228,7 @@ static irqreturn_t pseries_vas_irq_handler(int irq, void *data) ...@@ -228,7 +228,7 @@ static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
struct pseries_vas_window *txwin = data; struct pseries_vas_window *txwin = data;
/* /*
* The thread hanlder will process this interrupt if it is * The thread handler will process this interrupt if it is
* already running. * already running.
*/ */
atomic_inc(&txwin->pending_faults); atomic_inc(&txwin->pending_faults);
......
...@@ -383,7 +383,7 @@ static unsigned int xive_get_irq(void) ...@@ -383,7 +383,7 @@ static unsigned int xive_get_irq(void)
* CPU. * CPU.
* *
* If we find that there is indeed more in there, we call * If we find that there is indeed more in there, we call
* force_external_irq_replay() to make Linux synthetize an * force_external_irq_replay() to make Linux synthesize an
* external interrupt on the next call to local_irq_restore(). * external interrupt on the next call to local_irq_restore().
*/ */
static void xive_do_queue_eoi(struct xive_cpu *xc) static void xive_do_queue_eoi(struct xive_cpu *xc)
...@@ -874,7 +874,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) ...@@ -874,7 +874,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
* *
* This also tells us that it's in flight to a host queue * This also tells us that it's in flight to a host queue
* or has already been fetched but hasn't been EOIed yet * or has already been fetched but hasn't been EOIed yet
* by the host. This it's potentially using up a host * by the host. Thus it's potentially using up a host
* queue slot. This is important to know because as long * queue slot. This is important to know because as long
* as this is the case, we must not hard-unmask it when * as this is the case, we must not hard-unmask it when
* "returning" that interrupt to the host. * "returning" that interrupt to the host.
......
...@@ -415,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) ...@@ -415,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return; return;
} }
/* Grab it's CAM value */ /* Grab its CAM value */
rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL); rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
if (rc) { if (rc) {
pr_err("Failed to get pool VP info CPU %d\n", cpu); pr_err("Failed to get pool VP info CPU %d\n", cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment