Commit 06369a1e authored by Masahiro Yamada's avatar Masahiro Yamada Committed by Russell King

ARM: 8652/1: cache-uniphier: clean up active way setup code

Now, the active way setup function is called with a fixed value zero
for the second argument.  The code can be simpler.
Signed-off-by: default avatarMasahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: default avatarRussell King <rmk+kernel@armlinux.org.uk>
parent 1c63d4c5
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "uniphier: " fmt #define pr_fmt(fmt) "uniphier: " fmt
#include <linux/bitops.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/log2.h> #include <linux/log2.h>
...@@ -71,8 +72,7 @@ ...@@ -71,8 +72,7 @@
* @ctrl_base: virtual base address of control registers * @ctrl_base: virtual base address of control registers
* @rev_base: virtual base address of revision registers * @rev_base: virtual base address of revision registers
* @op_base: virtual base address of operation registers * @op_base: virtual base address of operation registers
* @way_present_mask: each bit specifies if the way is present * @way_mask: each bit specifies if the way is present
* @way_locked_mask: each bit specifies if the way is locked
* @nsets: number of associativity sets * @nsets: number of associativity sets
* @line_size: line size in bytes * @line_size: line size in bytes
* @range_op_max_size: max size that can be handled by a single range operation * @range_op_max_size: max size that can be handled by a single range operation
...@@ -83,8 +83,7 @@ struct uniphier_cache_data { ...@@ -83,8 +83,7 @@ struct uniphier_cache_data {
void __iomem *rev_base; void __iomem *rev_base;
void __iomem *op_base; void __iomem *op_base;
void __iomem *way_ctrl_base; void __iomem *way_ctrl_base;
u32 way_present_mask; u32 way_mask;
u32 way_locked_mask;
u32 nsets; u32 nsets;
u32 line_size; u32 line_size;
u32 range_op_max_size; u32 range_op_max_size;
...@@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on) ...@@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
} }
static void __init __uniphier_cache_set_locked_ways( static void __init __uniphier_cache_set_active_ways(
struct uniphier_cache_data *data, struct uniphier_cache_data *data)
u32 way_mask)
{ {
unsigned int cpu; unsigned int cpu;
data->way_locked_mask = way_mask & data->way_present_mask;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
writel_relaxed(~data->way_locked_mask & data->way_present_mask, writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
data->way_ctrl_base + 4 * cpu);
} }
static void uniphier_cache_maint_range(unsigned long start, unsigned long end, static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
...@@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void) ...@@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void)
list_for_each_entry(data, &uniphier_cache_list, list) { list_for_each_entry(data, &uniphier_cache_list, list) {
__uniphier_cache_enable(data, true); __uniphier_cache_enable(data, true);
__uniphier_cache_set_locked_ways(data, 0); __uniphier_cache_set_active_ways(data);
} }
} }
...@@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np, ...@@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np,
goto err; goto err;
} }
data->way_present_mask = data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
((u32)1 << cache_size / data->nsets / data->line_size) - 1; 0);
data->ctrl_base = of_iomap(np, 0); data->ctrl_base = of_iomap(np, 0);
if (!data->ctrl_base) { if (!data->ctrl_base) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment