Commit 09074950 authored by Vineet Gupta's avatar Vineet Gupta

ARC: add/fix some comments in code - no functional change

Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 6de6066c
...@@ -72,12 +72,13 @@ arcpct0: pct { ...@@ -72,12 +72,13 @@ arcpct0: pct {
}; };
/* /*
* This INTC is actually connected to DW APB GPIO * The DW APB ICTL intc on MB is connected to CPU intc via a
* which acts as a wire between MB INTC and CPU INTC. * DT "invisible" DW APB GPIO block, configured to simply pass thru
* GPIO INTC is configured in platform init code * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
* and here we mimic direct connection from MB INTC to *
* CPU INTC, thus we set "interrupts = <7>" instead of * So here we mimic a direct connection betwen them, ignoring the
* "interrupts = <12>" * ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
* instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
* *
* This intc actually resides on MB, but we move it here to * This intc actually resides on MB, but we move it here to
* avoid duplicating the MB dtsi file given that IRQ from * avoid duplicating the MB dtsi file given that IRQ from
......
...@@ -110,18 +110,18 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, ...@@ -110,18 +110,18 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
sizeof(*(ptr)))) sizeof(*(ptr))))
/* /*
* On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need * xchg() maps directly to ARC EX instruction which guarantees atomicity.
* not require any locking. However there's a quirk. * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
* ARC lacks native CMPXCHG, thus emulated (see above), using external locking - * due to a subtle reason:
* incidently it "reuses" the same atomic_ops_lock used by atomic APIs. * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
* Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
* abide by same serializing rules, thus ends up using atomic_ops_lock as well. * Hence xchg() needs to follow same locking rules.
* *
* This however is only relevant if SMP and/or ARC lacks LLSC * Technically the lock is also needed for UP (boils down to irq save/restore)
* if (UP or LLSC) * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
* xchg doesn't need serialization * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
* else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC) * Other way around, xchg is one instruction anyways, so can't be interrupted
* xchg needs serialization * as such
*/ */
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
......
...@@ -95,7 +95,7 @@ static const char * const arc_pmu_ev_hw_map[] = { ...@@ -95,7 +95,7 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */ /* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall", [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
......
...@@ -199,8 +199,8 @@ static void arc_pmu_start(struct perf_event *event, int flags) ...@@ -199,8 +199,8 @@ static void arc_pmu_start(struct perf_event *event, int flags)
event->hw.state = 0; event->hw.state = 0;
/* enable ARC pmu here */ /* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx); write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */
} }
static void arc_pmu_stop(struct perf_event *event, int flags) static void arc_pmu_stop(struct perf_event *event, int flags)
......
...@@ -65,7 +65,7 @@ asmlinkage void ret_from_fork(void); ...@@ -65,7 +65,7 @@ asmlinkage void ret_from_fork(void);
* ------------------ * ------------------
* | r25 | <==== top of Stack (thread.ksp) * | r25 | <==== top of Stack (thread.ksp)
* ~ ~ * ~ ~
* | --to-- | (CALLEE Regs of user mode) * | --to-- | (CALLEE Regs of kernel mode)
* | r13 | * | r13 |
* ------------------ * ------------------
* | fp | * | fp |
......
...@@ -46,7 +46,7 @@ static void __init axs10x_enable_gpio_intc_wire(void) ...@@ -46,7 +46,7 @@ static void __init axs10x_enable_gpio_intc_wire(void)
* ------------------- ------------------- * ------------------- -------------------
* | snps,dw-apb-gpio | | snps,dw-apb-gpio | * | snps,dw-apb-gpio | | snps,dw-apb-gpio |
* ------------------- ------------------- * ------------------- -------------------
* | | * | #12 |
* | [ Debug UART on cpu card ] * | [ Debug UART on cpu card ]
* | * |
* ------------------------ * ------------------------
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment