Commit 3d0c872a authored by Arnd Bergmann's avatar Arnd Bergmann

Merge branch 'exynos/iommu' into next/soc2

This is a dependency for the following samsung changes.
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents 090a80cb 2a96536e
......@@ -85,10 +85,10 @@ config EXYNOS4_SETUP_FIMD0
help
Common setup code for FIMD0.
config EXYNOS4_DEV_SYSMMU
config EXYNOS_DEV_SYSMMU
bool
help
Common setup code for SYSTEM MMU in EXYNOS4
Common setup code for SYSTEM MMU in EXYNOS platforms
config EXYNOS4_DEV_DWMCI
bool
......@@ -200,12 +200,12 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select SAMSUNG_DEV_BACKLIGHT
select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
select EXYNOS4_DEV_DMA
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_KEYPAD
......@@ -224,7 +224,6 @@ config MACH_ARMLEX4210
select S3C_DEV_HSMMC3
select EXYNOS4_DEV_AHCI
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_SDHCI
help
Machine support for Samsung ARMLEX4210 based on EXYNOS4210
......@@ -254,6 +253,7 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_MFC
select S5P_DEV_ONENAND
select S5P_DEV_TV
select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
......@@ -325,6 +325,7 @@ config MACH_ORIGEN
select S5P_DEV_USB_EHCI
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
......@@ -348,6 +349,7 @@ config MACH_SMDK4212
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_I2C3
......
......@@ -50,7 +50,7 @@ obj-$(CONFIG_MACH_EXYNOS5_DT) += mach-exynos5-dt.o
obj-y += dev-uart.o
obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_EXYNOS_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o
obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o
obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
......
......@@ -168,7 +168,7 @@ static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
}
static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
}
......@@ -198,6 +198,11 @@ static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
}
int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_DMC, clk, enable);
}
static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
......@@ -678,61 +683,55 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 14),
}, {
.name = "SYSMMU_MDMA",
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 5),
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_FIMC0",
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(fimc0, 5),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "SYSMMU_FIMC1",
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(fimc1, 6),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = "SYSMMU_FIMC2",
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(fimc2, 7),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = "SYSMMU_FIMC3",
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(fimc3, 8),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = "SYSMMU_JPEG",
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = "SYSMMU_FIMD0",
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(fimd0, 10),
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_FIMD1",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_PCIe",
.enable = exynos4_clk_ip_fsys_ctrl,
.ctrlbit = (1 << 18),
}, {
.name = "SYSMMU_G2D",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "SYSMMU_ROTATOR",
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_TV",
.enable = exynos4_clk_ip_tv_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "SYSMMU_MFC_L",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = "SYSMMU_MFC_R",
.enable = exynos4_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}
};
......
......@@ -26,5 +26,7 @@ extern struct clk *exynos4_clkset_group_list[];
extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable);
#endif /* __ASM_ARCH_CLOCK_H */
......@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
......@@ -94,6 +95,16 @@ static struct clk init_clocks_off[] = {
.devname = "exynos4-fb.1",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 0),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
.enable = exynos4_clk_ip_image_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 4),
},
};
......
......@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
......@@ -39,6 +40,16 @@ static struct sleep_save exynos4212_clock_save[] = {
};
#endif
static int exynos4212_clk_ip_isp0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP0, clk, enable);
}
static int exynos4212_clk_ip_isp1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP1, clk, enable);
}
static struct clk *clk_src_mpll_user_list[] = {
[0] = &clk_fin_mpll,
[1] = &exynos4_clk_mout_mpll.clk,
......@@ -66,7 +77,22 @@ static struct clksrc_clk clksrcs[] = {
};
static struct clk init_clocks_off[] = {
/* nothing here yet */
{
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
.enable = exynos4_clk_ip_dmc_ctrl,
.ctrlbit = (1 << 24),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
.enable = exynos4212_clk_ip_isp0_ctrl,
.ctrlbit = (7 << 8),
}, {
.name = SYSMMU_CLOCK_NAME2,
.devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
.enable = exynos4212_clk_ip_isp1_ctrl,
.ctrlbit = (1 << 4),
}
};
#ifdef CONFIG_PM_SLEEP
......
......@@ -82,6 +82,11 @@ static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
}
static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
}
static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable);
......@@ -127,6 +132,21 @@ static int exynos5_clk_ip_peris_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable);
}
static int exynos5_clk_ip_gscl_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GSCL, clk, enable);
}
static int exynos5_clk_ip_isp0_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP0, clk, enable);
}
static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
}
/* Core list of CMU_CPU side */
static struct clksrc_clk exynos5_clk_mout_apll = {
......@@ -630,6 +650,76 @@ static struct clk exynos5_init_clocks_off[] = {
.parent = &exynos5_clk_aclk_66.clk,
.enable = exynos5_clk_ip_peric_ctrl,
.ctrlbit = (1 << 14),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
.enable = &exynos5_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 1),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
.enable = &exynos5_clk_ip_mfc_ctrl,
.ctrlbit = (1 << 2),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
.enable = &exynos5_clk_ip_disp1_ctrl,
.ctrlbit = (1 << 9)
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
.enable = &exynos5_clk_ip_gen_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
.enable = &exynos5_clk_ip_gen_ctrl,
.ctrlbit = (1 << 6)
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(gsc0, 5),
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(gsc1, 6),
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 8),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(gsc2, 7),
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 9),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(gsc3, 8),
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 10),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
.enable = &exynos5_clk_ip_isp0_ctrl,
.ctrlbit = (0x3F << 8),
}, {
.name = SYSMMU_CLOCK_NAME2,
.devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
.enable = &exynos5_clk_ip_isp1_ctrl,
.ctrlbit = (0xF << 4),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(camif0, 12),
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 11),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(camif1, 13),
.enable = &exynos5_clk_ip_gscl_ctrl,
.ctrlbit = (1 << 12),
}, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
.enable = &exynos5_clk_ip_acp_ctrl,
.ctrlbit = (1 << 7)
}
};
......
/* linux/arch/arm/mach-exynos4/dev-sysmmu.c
/* linux/arch/arm/mach-exynos/dev-sysmmu.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4 - System MMU support
* EXYNOS - System MMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -12,222 +12,263 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <plat/cpu.h>
#include <mach/map.h>
#include <mach/irqs.h>
#include <mach/sysmmu.h>
#include <plat/s5p-clock.h>
/* These names must be equal to the clock names in mach-exynos4/clock.c */
const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
"SYSMMU_MDMA" ,
"SYSMMU_SSS" ,
"SYSMMU_FIMC0" ,
"SYSMMU_FIMC1" ,
"SYSMMU_FIMC2" ,
"SYSMMU_FIMC3" ,
"SYSMMU_JPEG" ,
"SYSMMU_FIMD0" ,
"SYSMMU_FIMD1" ,
"SYSMMU_PCIe" ,
"SYSMMU_G2D" ,
"SYSMMU_ROTATOR",
"SYSMMU_MDMA2" ,
"SYSMMU_TV" ,
"SYSMMU_MFC_L" ,
"SYSMMU_MFC_R" ,
};
static struct resource exynos4_sysmmu_resource[] = {
[0] = {
.start = EXYNOS4_PA_SYSMMU_MDMA,
.end = EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_SYSMMU_MDMA0_0,
.end = IRQ_SYSMMU_MDMA0_0,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = EXYNOS4_PA_SYSMMU_SSS,
.end = EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[3] = {
.start = IRQ_SYSMMU_SSS_0,
.end = IRQ_SYSMMU_SSS_0,
.flags = IORESOURCE_IRQ,
},
[4] = {
.start = EXYNOS4_PA_SYSMMU_FIMC0,
.end = EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[5] = {
.start = IRQ_SYSMMU_FIMC0_0,
.end = IRQ_SYSMMU_FIMC0_0,
.flags = IORESOURCE_IRQ,
},
[6] = {
.start = EXYNOS4_PA_SYSMMU_FIMC1,
.end = EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[7] = {
.start = IRQ_SYSMMU_FIMC1_0,
.end = IRQ_SYSMMU_FIMC1_0,
.flags = IORESOURCE_IRQ,
},
[8] = {
.start = EXYNOS4_PA_SYSMMU_FIMC2,
.end = EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[9] = {
.start = IRQ_SYSMMU_FIMC2_0,
.end = IRQ_SYSMMU_FIMC2_0,
.flags = IORESOURCE_IRQ,
},
[10] = {
.start = EXYNOS4_PA_SYSMMU_FIMC3,
.end = EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[11] = {
.start = IRQ_SYSMMU_FIMC3_0,
.end = IRQ_SYSMMU_FIMC3_0,
.flags = IORESOURCE_IRQ,
},
[12] = {
.start = EXYNOS4_PA_SYSMMU_JPEG,
.end = EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[13] = {
.start = IRQ_SYSMMU_JPEG_0,
.end = IRQ_SYSMMU_JPEG_0,
.flags = IORESOURCE_IRQ,
},
[14] = {
.start = EXYNOS4_PA_SYSMMU_FIMD0,
.end = EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[15] = {
.start = IRQ_SYSMMU_LCD0_M0_0,
.end = IRQ_SYSMMU_LCD0_M0_0,
.flags = IORESOURCE_IRQ,
},
[16] = {
.start = EXYNOS4_PA_SYSMMU_FIMD1,
.end = EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[17] = {
.start = IRQ_SYSMMU_LCD1_M1_0,
.end = IRQ_SYSMMU_LCD1_M1_0,
.flags = IORESOURCE_IRQ,
},
[18] = {
.start = EXYNOS4_PA_SYSMMU_PCIe,
.end = EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[19] = {
.start = IRQ_SYSMMU_PCIE_0,
.end = IRQ_SYSMMU_PCIE_0,
.flags = IORESOURCE_IRQ,
},
[20] = {
.start = EXYNOS4_PA_SYSMMU_G2D,
.end = EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[21] = {
.start = IRQ_SYSMMU_2D_0,
.end = IRQ_SYSMMU_2D_0,
.flags = IORESOURCE_IRQ,
},
[22] = {
.start = EXYNOS4_PA_SYSMMU_ROTATOR,
.end = EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[23] = {
.start = IRQ_SYSMMU_ROTATOR_0,
.end = IRQ_SYSMMU_ROTATOR_0,
.flags = IORESOURCE_IRQ,
},
[24] = {
.start = EXYNOS4_PA_SYSMMU_MDMA2,
.end = EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[25] = {
.start = IRQ_SYSMMU_MDMA1_0,
.end = IRQ_SYSMMU_MDMA1_0,
.flags = IORESOURCE_IRQ,
},
[26] = {
.start = EXYNOS4_PA_SYSMMU_TV,
.end = EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[27] = {
.start = IRQ_SYSMMU_TV_M0_0,
.end = IRQ_SYSMMU_TV_M0_0,
.flags = IORESOURCE_IRQ,
},
[28] = {
.start = EXYNOS4_PA_SYSMMU_MFC_L,
.end = EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[29] = {
.start = IRQ_SYSMMU_MFC_M0_0,
.end = IRQ_SYSMMU_MFC_M0_0,
.flags = IORESOURCE_IRQ,
},
[30] = {
.start = EXYNOS4_PA_SYSMMU_MFC_R,
.end = EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[31] = {
.start = IRQ_SYSMMU_MFC_M1_0,
.end = IRQ_SYSMMU_MFC_M1_0,
.flags = IORESOURCE_IRQ,
},
};
static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
#define SYSMMU_PLATFORM_DEVICE(ipname, devid) \
static struct sysmmu_platform_data platdata_##ipname = { \
.dbgname = #ipname, \
}; \
struct platform_device SYSMMU_PLATDEV(ipname) = \
{ \
.name = SYSMMU_DEVNAME_BASE, \
.id = devid, \
.dev = { \
.dma_mask = &exynos_sysmmu_dma_mask, \
.coherent_dma_mask = DMA_BIT_MASK(32), \
.platform_data = &platdata_##ipname, \
}, \
}
SYSMMU_PLATFORM_DEVICE(mfc_l, 0);
SYSMMU_PLATFORM_DEVICE(mfc_r, 1);
SYSMMU_PLATFORM_DEVICE(tv, 2);
SYSMMU_PLATFORM_DEVICE(jpeg, 3);
SYSMMU_PLATFORM_DEVICE(rot, 4);
SYSMMU_PLATFORM_DEVICE(fimc0, 5); /* fimc* and gsc* exist exclusively */
SYSMMU_PLATFORM_DEVICE(fimc1, 6);
SYSMMU_PLATFORM_DEVICE(fimc2, 7);
SYSMMU_PLATFORM_DEVICE(fimc3, 8);
SYSMMU_PLATFORM_DEVICE(gsc0, 5);
SYSMMU_PLATFORM_DEVICE(gsc1, 6);
SYSMMU_PLATFORM_DEVICE(gsc2, 7);
SYSMMU_PLATFORM_DEVICE(gsc3, 8);
SYSMMU_PLATFORM_DEVICE(isp, 9);
SYSMMU_PLATFORM_DEVICE(fimd0, 10);
SYSMMU_PLATFORM_DEVICE(fimd1, 11);
SYSMMU_PLATFORM_DEVICE(camif0, 12);
SYSMMU_PLATFORM_DEVICE(camif1, 13);
SYSMMU_PLATFORM_DEVICE(2d, 14);
#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
#define SYSMMU_RESOURCE(core, ipname) \
static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
#define DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem), \
DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq) \
SYSMMU_RESOURCE(core, ipname) { \
DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
}
struct platform_device exynos4_device_sysmmu = {
.name = "s5p-sysmmu",
.id = 32,
.num_resources = ARRAY_SIZE(exynos4_sysmmu_resource),
.resource = exynos4_sysmmu_resource,
struct sysmmu_resource_map {
struct platform_device *pdev;
struct resource *res;
u32 rnum;
struct device *pdd;
char *clocknames;
};
EXPORT_SYMBOL(exynos4_device_sysmmu);
static struct clk *sysmmu_clk[S5P_SYSMMU_TOTAL_IPNUM];
void sysmmu_clk_init(struct device *dev, sysmmu_ips ips)
{
sysmmu_clk[ips] = clk_get(dev, sysmmu_ips_name[ips]);
if (IS_ERR(sysmmu_clk[ips]))
sysmmu_clk[ips] = NULL;
else
clk_put(sysmmu_clk[ips]);
#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) { \
.pdev = &SYSMMU_PLATDEV(ipname), \
.res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
.clocknames = SYSMMU_CLOCK_NAME, \
}
void sysmmu_clk_enable(sysmmu_ips ips)
{
if (sysmmu_clk[ips])
clk_enable(sysmmu_clk[ips]);
#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) { \
.pdev = &SYSMMU_PLATDEV(ipname), \
.res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
.clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
}
#ifdef CONFIG_EXYNOS_DEV_PD
#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) { \
.pdev = &SYSMMU_PLATDEV(ipname), \
.res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
.clocknames = SYSMMU_CLOCK_NAME, \
.pdd = &exynos##core##_device_pd[pd].dev, \
}
#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
.pdev = &SYSMMU_PLATDEV(ipname), \
.res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
.rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
.clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
.pdd = &exynos##core##_device_pd[pd].dev, \
}
#else
#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) \
SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) \
SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
#endif /* CONFIG_EXYNOS_DEV_PD */
#ifdef CONFIG_ARCH_EXYNOS4
SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0, FIMC0, FIMC0);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1, FIMC1, FIMC1);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2, FIMC2, FIMC2);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3, FIMC3, FIMC3);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg, JPEG, JPEG);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d, G2D, 2D);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv, TV, TV_M0);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp, 2D_ACP, 2D);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot, ROTATOR, ROTATOR);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0, FIMD0, LCD0_M0);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1, FIMD1, LCD1_M1);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0, FIMC_LITE0, FIMC_LITE0);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1, FIMC_LITE1, FIMC_LITE1);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r, MFC_R, MFC_M0);
SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l, MFC_L, MFC_M1);
SYSMMU_RESOURCE(EXYNOS4, isp) {
DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
};
static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
SYSMMU_RESOURCE_MAPPING_PD(4, fimc0, fimc0, PD_CAM),
SYSMMU_RESOURCE_MAPPING_PD(4, fimc1, fimc1, PD_CAM),
SYSMMU_RESOURCE_MAPPING_PD(4, fimc2, fimc2, PD_CAM),
SYSMMU_RESOURCE_MAPPING_PD(4, fimc3, fimc3, PD_CAM),
SYSMMU_RESOURCE_MAPPING_PD(4, tv, tv, PD_TV),
SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r, mfc_r, PD_MFC),
SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l, mfc_l, PD_MFC),
SYSMMU_RESOURCE_MAPPING_PD(4, rot, rot, PD_LCD0),
SYSMMU_RESOURCE_MAPPING_PD(4, jpeg, jpeg, PD_CAM),
SYSMMU_RESOURCE_MAPPING_PD(4, fimd0, fimd0, PD_LCD0),
};
static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
SYSMMU_RESOURCE_MAPPING_PD(4, 2d, 2d, PD_LCD0),
SYSMMU_RESOURCE_MAPPING_PD(4, fimd1, fimd1, PD_LCD1),
};
static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
SYSMMU_RESOURCE_MAPPING(4, 2d, 2d_acp),
SYSMMU_RESOURCE_MAPPING_PD(4, camif0, flite0, PD_ISP),
SYSMMU_RESOURCE_MAPPING_PD(4, camif1, flite1, PD_ISP),
SYSMMU_RESOURCE_MAPPING_PD(4, isp, isp, PD_ISP),
};
#endif /* CONFIG_ARCH_EXYNOS4 */
void sysmmu_clk_disable(sysmmu_ips ips)
#ifdef CONFIG_ARCH_EXYNOS5
SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg, JPEG, JPEG);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1, FIMD1, FIMD1);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d, 2D, 2D);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot, ROTATOR, ROTATOR);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv, TV, TV);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0, LITE0, LITE0);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1, LITE1, LITE1);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0, GSC0, GSC0);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1, GSC1, GSC1);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2, GSC2, GSC2);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3, GSC3, GSC3);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r, MFC_R, MFC_R);
SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l, MFC_L, MFC_L);
SYSMMU_RESOURCE(EXYNOS5, isp) {
DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, ODC, ODC),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
};
static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
SYSMMU_RESOURCE_MAPPING(5, jpeg, jpeg),
SYSMMU_RESOURCE_MAPPING(5, fimd1, fimd1),
SYSMMU_RESOURCE_MAPPING(5, 2d, 2d),
SYSMMU_RESOURCE_MAPPING(5, rot, rot),
SYSMMU_RESOURCE_MAPPING_PD(5, tv, tv, PD_DISP1),
SYSMMU_RESOURCE_MAPPING_PD(5, camif0, flite0, PD_GSCL),
SYSMMU_RESOURCE_MAPPING_PD(5, camif1, flite1, PD_GSCL),
SYSMMU_RESOURCE_MAPPING_PD(5, gsc0, gsc0, PD_GSCL),
SYSMMU_RESOURCE_MAPPING_PD(5, gsc1, gsc1, PD_GSCL),
SYSMMU_RESOURCE_MAPPING_PD(5, gsc2, gsc2, PD_GSCL),
SYSMMU_RESOURCE_MAPPING_PD(5, gsc3, gsc3, PD_GSCL),
SYSMMU_RESOURCE_MAPPING_PD(5, mfc_r, mfc_r, PD_MFC),
SYSMMU_RESOURCE_MAPPING_PD(5, mfc_l, mfc_l, PD_MFC),
SYSMMU_RESOURCE_MAPPING_MCPD(5, isp, isp, PD_ISP, mc_platdata),
};
#endif /* CONFIG_ARCH_EXYNOS5 */
static int __init init_sysmmu_platform_device(void)
{
if (sysmmu_clk[ips])
clk_disable(sysmmu_clk[ips]);
int i, j;
struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
int nmap[2] = {0, 0};
#ifdef CONFIG_ARCH_EXYNOS5
if (soc_is_exynos5250()) {
resmap[0] = sysmmu_resmap5;
nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
nmap[1] = 0;
}
#endif
#ifdef CONFIG_ARCH_EXYNOS4
if (resmap[0] == NULL) {
resmap[0] = sysmmu_resmap4;
nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
}
if (soc_is_exynos4210()) {
resmap[1] = sysmmu_resmap4210;
nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
}
if (soc_is_exynos4412() || soc_is_exynos4212()) {
resmap[1] = sysmmu_resmap4212;
nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
}
#endif
for (j = 0; j < 2; j++) {
for (i = 0; i < nmap[j]; i++) {
struct sysmmu_resource_map *map;
struct sysmmu_platform_data *platdata;
map = &resmap[j][i];
map->pdev->dev.parent = map->pdd;
platdata = map->pdev->dev.platform_data;
platdata->clockname = map->clocknames;
if (platform_device_add_resources(map->pdev, map->res,
map->rnum)) {
pr_err("%s: Failed to add device resources for "
"%s.%d\n", __func__,
map->pdev->name, map->pdev->id);
continue;
}
if (platform_device_register(map->pdev)) {
pr_err("%s: Failed to register %s.%d\n",
__func__, map->pdev->name,
map->pdev->id);
}
}
}
return 0;
}
arch_initcall(init_sysmmu_platform_device);
......@@ -154,6 +154,13 @@
#define EXYNOS4_IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6)
#define EXYNOS4_IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7)
#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE0_0 COMBINER_IRQ(16, 0)
#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE1_0 COMBINER_IRQ(16, 1)
#define EXYNOS4_IRQ_SYSMMU_FIMC_ISP_0 COMBINER_IRQ(16, 2)
#define EXYNOS4_IRQ_SYSMMU_FIMC_DRC_0 COMBINER_IRQ(16, 3)
#define EXYNOS4_IRQ_SYSMMU_FIMC_FD_0 COMBINER_IRQ(16, 4)
#define EXYNOS4_IRQ_SYSMMU_FIMC_CX_0 COMBINER_IRQ(16, 5)
#define EXYNOS4_IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0)
#define EXYNOS4_IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1)
#define EXYNOS4_IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2)
......@@ -220,24 +227,6 @@
#define IRQ_KEYPAD EXYNOS4_IRQ_KEYPAD
#define IRQ_PMU EXYNOS4_IRQ_PMU
#define IRQ_SYSMMU_MDMA0_0 EXYNOS4_IRQ_SYSMMU_MDMA0_0
#define IRQ_SYSMMU_SSS_0 EXYNOS4_IRQ_SYSMMU_SSS_0
#define IRQ_SYSMMU_FIMC0_0 EXYNOS4_IRQ_SYSMMU_FIMC0_0
#define IRQ_SYSMMU_FIMC1_0 EXYNOS4_IRQ_SYSMMU_FIMC1_0
#define IRQ_SYSMMU_FIMC2_0 EXYNOS4_IRQ_SYSMMU_FIMC2_0
#define IRQ_SYSMMU_FIMC3_0 EXYNOS4_IRQ_SYSMMU_FIMC3_0
#define IRQ_SYSMMU_JPEG_0 EXYNOS4_IRQ_SYSMMU_JPEG_0
#define IRQ_SYSMMU_2D_0 EXYNOS4_IRQ_SYSMMU_2D_0
#define IRQ_SYSMMU_ROTATOR_0 EXYNOS4_IRQ_SYSMMU_ROTATOR_0
#define IRQ_SYSMMU_MDMA1_0 EXYNOS4_IRQ_SYSMMU_MDMA1_0
#define IRQ_SYSMMU_LCD0_M0_0 EXYNOS4_IRQ_SYSMMU_LCD0_M0_0
#define IRQ_SYSMMU_LCD1_M1_0 EXYNOS4_IRQ_SYSMMU_LCD1_M1_0
#define IRQ_SYSMMU_TV_M0_0 EXYNOS4_IRQ_SYSMMU_TV_M0_0
#define IRQ_SYSMMU_MFC_M0_0 EXYNOS4_IRQ_SYSMMU_MFC_M0_0
#define IRQ_SYSMMU_MFC_M1_0 EXYNOS4_IRQ_SYSMMU_MFC_M1_0
#define IRQ_SYSMMU_PCIE_0 EXYNOS4_IRQ_SYSMMU_PCIE_0
#define IRQ_FIMD0_FIFO EXYNOS4_IRQ_FIMD0_FIFO
#define IRQ_FIMD0_VSYNC EXYNOS4_IRQ_FIMD0_VSYNC
#define IRQ_FIMD0_SYSTEM EXYNOS4_IRQ_FIMD0_SYSTEM
......
......@@ -95,6 +95,7 @@
#define EXYNOS5_PA_PDMA1 0x121B0000
#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
#define EXYNOS4_PA_SYSMMU_2D_ACP 0x10A40000
#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
#define EXYNOS4_PA_SYSMMU_FIMC0 0x11A20000
#define EXYNOS4_PA_SYSMMU_FIMC1 0x11A30000
......@@ -103,6 +104,12 @@
#define EXYNOS4_PA_SYSMMU_JPEG 0x11A60000
#define EXYNOS4_PA_SYSMMU_FIMD0 0x11E20000
#define EXYNOS4_PA_SYSMMU_FIMD1 0x12220000
#define EXYNOS4_PA_SYSMMU_FIMC_ISP 0x12260000
#define EXYNOS4_PA_SYSMMU_FIMC_DRC 0x12270000
#define EXYNOS4_PA_SYSMMU_FIMC_FD 0x122A0000
#define EXYNOS4_PA_SYSMMU_ISPCPU 0x122B0000
#define EXYNOS4_PA_SYSMMU_FIMC_LITE0 0x123B0000
#define EXYNOS4_PA_SYSMMU_FIMC_LITE1 0x123C0000
#define EXYNOS4_PA_SYSMMU_PCIe 0x12620000
#define EXYNOS4_PA_SYSMMU_G2D 0x12A20000
#define EXYNOS4_PA_SYSMMU_ROTATOR 0x12A30000
......@@ -110,6 +117,37 @@
#define EXYNOS4_PA_SYSMMU_TV 0x12E20000
#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000
#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000
#define EXYNOS5_PA_SYSMMU_MDMA1 0x10A40000
#define EXYNOS5_PA_SYSMMU_SSS 0x10A50000
#define EXYNOS5_PA_SYSMMU_2D 0x10A60000
#define EXYNOS5_PA_SYSMMU_MFC_L 0x11200000
#define EXYNOS5_PA_SYSMMU_MFC_R 0x11210000
#define EXYNOS5_PA_SYSMMU_ROTATOR 0x11D40000
#define EXYNOS5_PA_SYSMMU_MDMA2 0x11D50000
#define EXYNOS5_PA_SYSMMU_JPEG 0x11F20000
#define EXYNOS5_PA_SYSMMU_IOP 0x12360000
#define EXYNOS5_PA_SYSMMU_RTIC 0x12370000
#define EXYNOS5_PA_SYSMMU_GPS 0x12630000
#define EXYNOS5_PA_SYSMMU_ISP 0x13260000
#define EXYNOS5_PA_SYSMMU_DRC 0x12370000
#define EXYNOS5_PA_SYSMMU_SCALERC 0x13280000
#define EXYNOS5_PA_SYSMMU_SCALERP 0x13290000
#define EXYNOS5_PA_SYSMMU_FD 0x132A0000
#define EXYNOS5_PA_SYSMMU_ISPCPU 0x132B0000
#define EXYNOS5_PA_SYSMMU_ODC 0x132C0000
#define EXYNOS5_PA_SYSMMU_DIS0 0x132D0000
#define EXYNOS5_PA_SYSMMU_DIS1 0x132E0000
#define EXYNOS5_PA_SYSMMU_3DNR 0x132F0000
#define EXYNOS5_PA_SYSMMU_LITE0 0x13C40000
#define EXYNOS5_PA_SYSMMU_LITE1 0x13C50000
#define EXYNOS5_PA_SYSMMU_GSC0 0x13E80000
#define EXYNOS5_PA_SYSMMU_GSC1 0x13E90000
#define EXYNOS5_PA_SYSMMU_GSC2 0x13EA0000
#define EXYNOS5_PA_SYSMMU_GSC3 0x13EB0000
#define EXYNOS5_PA_SYSMMU_FIMD1 0x14640000
#define EXYNOS5_PA_SYSMMU_TV 0x14650000
#define EXYNOS4_PA_SPI0 0x13920000
#define EXYNOS4_PA_SPI1 0x13930000
#define EXYNOS4_PA_SPI2 0x13940000
......
......@@ -135,6 +135,9 @@
#define EXYNOS4_CLKGATE_SCLKCPU EXYNOS_CLKREG(0x14800)
#define EXYNOS4_CLKGATE_IP_CPU EXYNOS_CLKREG(0x14900)
#define EXYNOS4_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x18800)
#define EXYNOS4_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x18804)
#define EXYNOS4_APLL_LOCKTIME (0x1C20) /* 300us */
#define EXYNOS4_APLLCON0_ENABLE_SHIFT (31)
......@@ -303,6 +306,8 @@
#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558)
#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800)
#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800)
#define EXYNOS5_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x0C804)
#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920)
#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928)
#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C)
......
/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4 - System MMU register
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_REGS_SYSMMU_H
#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
#define S5P_MMU_CTRL 0x000
#define S5P_MMU_CFG 0x004
#define S5P_MMU_STATUS 0x008
#define S5P_MMU_FLUSH 0x00C
#define S5P_PT_BASE_ADDR 0x014
#define S5P_INT_STATUS 0x018
#define S5P_INT_CLEAR 0x01C
#define S5P_PAGE_FAULT_ADDR 0x024
#define S5P_AW_FAULT_ADDR 0x028
#define S5P_AR_FAULT_ADDR 0x02C
#define S5P_DEFAULT_SLAVE_ADDR 0x030
#endif /* __ASM_ARCH_REGS_SYSMMU_H */
/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
/*
* Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung sysmmu driver for EXYNOS4
* EXYNOS - System MMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_ARCH_SYSMMU_H
#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
enum exynos4_sysmmu_ips {
SYSMMU_MDMA,
SYSMMU_SSS,
SYSMMU_FIMC0,
SYSMMU_FIMC1,
SYSMMU_FIMC2,
SYSMMU_FIMC3,
SYSMMU_JPEG,
SYSMMU_FIMD0,
SYSMMU_FIMD1,
SYSMMU_PCIe,
SYSMMU_G2D,
SYSMMU_ROTATOR,
SYSMMU_MDMA2,
SYSMMU_TV,
SYSMMU_MFC_L,
SYSMMU_MFC_R,
EXYNOS4_SYSMMU_TOTAL_IPNUM,
*/
#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
#define _ARM_MACH_EXYNOS_SYSMMU_H_
struct sysmmu_platform_data {
char *dbgname;
/* comma(,) separated list of clock names for clock gating */
char *clockname;
};
#define S5P_SYSMMU_TOTAL_IPNUM EXYNOS4_SYSMMU_TOTAL_IPNUM
#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
#define SYSMMU_CLOCK_NAME "sysmmu"
#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
#ifdef CONFIG_EXYNOS_DEV_SYSMMU
#include <linux/device.h>
struct platform_device;
#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
extern struct platform_device SYSMMU_PLATDEV(mfc_l);
extern struct platform_device SYSMMU_PLATDEV(mfc_r);
extern struct platform_device SYSMMU_PLATDEV(tv);
extern struct platform_device SYSMMU_PLATDEV(jpeg);
extern struct platform_device SYSMMU_PLATDEV(rot);
extern struct platform_device SYSMMU_PLATDEV(fimc0);
extern struct platform_device SYSMMU_PLATDEV(fimc1);
extern struct platform_device SYSMMU_PLATDEV(fimc2);
extern struct platform_device SYSMMU_PLATDEV(fimc3);
extern struct platform_device SYSMMU_PLATDEV(gsc0);
extern struct platform_device SYSMMU_PLATDEV(gsc1);
extern struct platform_device SYSMMU_PLATDEV(gsc2);
extern struct platform_device SYSMMU_PLATDEV(gsc3);
extern struct platform_device SYSMMU_PLATDEV(isp);
extern struct platform_device SYSMMU_PLATDEV(fimd0);
extern struct platform_device SYSMMU_PLATDEV(fimd1);
extern struct platform_device SYSMMU_PLATDEV(camif0);
extern struct platform_device SYSMMU_PLATDEV(camif1);
extern struct platform_device SYSMMU_PLATDEV(2d);
extern const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM];
#ifdef CONFIG_IOMMU_API
static inline void platform_set_sysmmu(
struct device *sysmmu, struct device *dev)
{
dev->archdata.iommu = sysmmu;
}
#endif
typedef enum exynos4_sysmmu_ips sysmmu_ips;
#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
#endif
void sysmmu_clk_init(struct device *dev, sysmmu_ips ips);
void sysmmu_clk_enable(sysmmu_ips ips);
void sysmmu_clk_disable(sysmmu_ips ips);
#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
#endif /* __ASM_ARM_ARCH_SYSMMU_H */
#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
......@@ -157,7 +157,6 @@ static struct platform_device *armlex4210_devices[] __initdata = {
&s3c_device_hsmmc3,
&s3c_device_rtc,
&s3c_device_wdt,
&exynos4_device_sysmmu,
&samsung_asoc_dma,
&armlex4210_smsc911x,
&exynos4_device_ahci,
......
......@@ -281,7 +281,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_spdif,
&exynos4_device_sysmmu,
&samsung_asoc_dma,
&samsung_asoc_idma,
&s5p_device_fimd0,
......
......@@ -50,14 +50,6 @@ config S5P_PM
Common code for power management support on S5P and newer SoCs
Note: Do not select this for S5P6440 and S5P6450.
comment "System MMU"
config S5P_SYSTEM_MMU
bool "S5P SYSTEM MMU"
depends on ARCH_EXYNOS4
help
Say Y here if you want to enable System MMU
config S5P_SLEEP
bool
help
......
......@@ -16,7 +16,6 @@ obj-y += clock.o
obj-y += irq.o
obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o
obj-$(CONFIG_S5P_PM) += pm.o irq-pm.o
obj-$(CONFIG_S5P_SLEEP) += sleep.o
obj-$(CONFIG_S5P_HRT) += s5p-time.o
......
/* linux/arch/arm/plat-s5p/sysmmu.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <asm/pgtable.h>
#include <mach/map.h>
#include <mach/regs-sysmmu.h>
#include <plat/sysmmu.h>
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
static struct device *dev;
static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
S5P_PAGE_FAULT_ADDR,
S5P_AR_FAULT_ADDR,
S5P_AW_FAULT_ADDR,
S5P_DEFAULT_SLAVE_ADDR,
S5P_AR_FAULT_ADDR,
S5P_AR_FAULT_ADDR,
S5P_AW_FAULT_ADDR,
S5P_AW_FAULT_ADDR
};
static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
"PAGE FAULT",
"AR MULTI-HIT FAULT",
"AW MULTI-HIT FAULT",
"BUS ERROR",
"AR SECURITY PROTECTION FAULT",
"AR ACCESS PROTECTION FAULT",
"AW SECURITY PROTECTION FAULT",
"AW ACCESS PROTECTION FAULT"
};
static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
enum S5P_SYSMMU_INTERRUPT_TYPE itype,
unsigned long pgtable_base,
unsigned long fault_addr);
/*
* If adjacent 2 bits are true, the system MMU is enabled.
* The system MMU is disabled, otherwise.
*/
static unsigned long sysmmu_states;
static inline void set_sysmmu_active(sysmmu_ips ips)
{
sysmmu_states |= 3 << (ips * 2);
}
static inline void set_sysmmu_inactive(sysmmu_ips ips)
{
sysmmu_states &= ~(3 << (ips * 2));
}
static inline int is_sysmmu_active(sysmmu_ips ips)
{
return sysmmu_states & (3 << (ips * 2));
}
static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
static inline void sysmmu_block(sysmmu_ips ips)
{
__raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
}
static inline void sysmmu_unblock(sysmmu_ips ips)
{
__raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
}
static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
{
__raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
}
static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
{
if (unlikely(pgd == 0)) {
pgd = (unsigned long)ZERO_PAGE(0);
__raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
} else {
__raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
}
__raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
sysmmu_ips_name[ips], pgd);
__sysmmu_tlb_invalidate(ips);
}
void sysmmu_set_fault_handler(sysmmu_ips ips,
int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
unsigned long pgtable_base,
unsigned long fault_addr))
{
BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
fault_handlers[ips] = handler;
}
static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
{
/* SYSMMU is in blocked when interrupt occurred. */
unsigned long base = 0;
sysmmu_ips ips = (sysmmu_ips)dev_id;
enum S5P_SYSMMU_INTERRUPT_TYPE itype;
itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
__ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
BUG_ON(!((itype >= 0) && (itype < 8)));
dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
sysmmu_ips_name[ips]);
if (fault_handlers[ips]) {
unsigned long addr;
base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
if (fault_handlers[ips](itype, base, addr)) {
__raw_writel(1 << itype,
sysmmusfrs[ips] + S5P_INT_CLEAR);
dev_notice(dev, "%s from %s is resolved."
" Retrying translation.\n",
sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
} else {
base = 0;
}
}
sysmmu_unblock(ips);
if (!base)
dev_notice(dev, "%s from %s is not handled.\n",
sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
return IRQ_HANDLED;
}
void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
{
if (is_sysmmu_active(ips)) {
sysmmu_block(ips);
__sysmmu_set_ptbase(ips, pgd);
sysmmu_unblock(ips);
} else {
dev_dbg(dev, "%s is disabled. "
"Skipping initializing page table base.\n",
sysmmu_ips_name[ips]);
}
}
void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
{
if (!is_sysmmu_active(ips)) {
sysmmu_clk_enable(ips);
__sysmmu_set_ptbase(ips, pgd);
__raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
set_sysmmu_active(ips);
dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
} else {
dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
}
}
void s5p_sysmmu_disable(sysmmu_ips ips)
{
if (is_sysmmu_active(ips)) {
__raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
set_sysmmu_inactive(ips);
sysmmu_clk_disable(ips);
dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
} else {
dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
}
}
void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
{
if (is_sysmmu_active(ips)) {
sysmmu_block(ips);
__sysmmu_tlb_invalidate(ips);
sysmmu_unblock(ips);
} else {
dev_dbg(dev, "%s is disabled. "
"Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
}
}
static int s5p_sysmmu_probe(struct platform_device *pdev)
{
int i, ret;
struct resource *res, *mem;
dev = &pdev->dev;
for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
int irq;
sysmmu_clk_init(dev, i);
sysmmu_clk_disable(i);
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res) {
dev_err(dev, "Failed to get the resource of %s.\n",
sysmmu_ips_name[i]);
ret = -ENODEV;
goto err_res;
}
mem = request_mem_region(res->start, resource_size(res),
pdev->name);
if (!mem) {
dev_err(dev, "Failed to request the memory region of %s.\n",
sysmmu_ips_name[i]);
ret = -EBUSY;
goto err_res;
}
sysmmusfrs[i] = ioremap(res->start, resource_size(res));
if (!sysmmusfrs[i]) {
dev_err(dev, "Failed to ioremap() for %s.\n",
sysmmu_ips_name[i]);
ret = -ENXIO;
goto err_reg;
}
irq = platform_get_irq(pdev, i);
if (irq <= 0) {
dev_err(dev, "Failed to get the IRQ resource of %s.\n",
sysmmu_ips_name[i]);
ret = -ENOENT;
goto err_map;
}
if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
pdev->name, (void *)i)) {
dev_err(dev, "Failed to request IRQ for %s.\n",
sysmmu_ips_name[i]);
ret = -ENOENT;
goto err_map;
}
}
return 0;
err_map:
iounmap(sysmmusfrs[i]);
err_reg:
release_mem_region(mem->start, resource_size(mem));
err_res:
return ret;
}
static int s5p_sysmmu_remove(struct platform_device *pdev)
{
return 0;
}
int s5p_sysmmu_runtime_suspend(struct device *dev)
{
return 0;
}
int s5p_sysmmu_runtime_resume(struct device *dev)
{
return 0;
}
const struct dev_pm_ops s5p_sysmmu_pm_ops = {
.runtime_suspend = s5p_sysmmu_runtime_suspend,
.runtime_resume = s5p_sysmmu_runtime_resume,
};
static struct platform_driver s5p_sysmmu_driver = {
.probe = s5p_sysmmu_probe,
.remove = s5p_sysmmu_remove,
.driver = {
.owner = THIS_MODULE,
.name = "s5p-sysmmu",
.pm = &s5p_sysmmu_pm_ops,
}
};
static int __init s5p_sysmmu_init(void)
{
return platform_driver_register(&s5p_sysmmu_driver);
}
arch_initcall(s5p_sysmmu_init);
......@@ -133,7 +133,6 @@ extern struct platform_device exynos4_device_pcm1;
extern struct platform_device exynos4_device_pcm2;
extern struct platform_device exynos4_device_pd[];
extern struct platform_device exynos4_device_spdif;
extern struct platform_device exynos4_device_sysmmu;
extern struct platform_device samsung_asoc_dma;
extern struct platform_device samsung_asoc_idma;
......
/* linux/arch/arm/plat-samsung/include/plat/sysmmu.h
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung System MMU driver for S5P platform
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __PLAT_SAMSUNG_SYSMMU_H
#define __PLAT_SAMSUNG_SYSMMU_H __FILE__
enum S5P_SYSMMU_INTERRUPT_TYPE {
SYSMMU_PAGEFAULT,
SYSMMU_AR_MULTIHIT,
SYSMMU_AW_MULTIHIT,
SYSMMU_BUSERROR,
SYSMMU_AR_SECURITY,
SYSMMU_AR_ACCESS,
SYSMMU_AW_SECURITY,
SYSMMU_AW_PROTECTION, /* 7 */
SYSMMU_FAULTS_NUM
};
#ifdef CONFIG_S5P_SYSTEM_MMU
#include <mach/sysmmu.h>
/**
* s5p_sysmmu_enable() - enable system mmu of ip
* @ips: The ip connected system mmu.
* #pgd: Base physical address of the 1st level page table
*
* This function enable system mmu to transfer address
* from virtual address to physical address
*/
void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd);
/**
* s5p_sysmmu_disable() - disable sysmmu mmu of ip
* @ips: The ip connected system mmu.
*
* This function disable system mmu to transfer address
* from virtual address to physical address
*/
void s5p_sysmmu_disable(sysmmu_ips ips);
/**
* s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
* @ips: The ip connected system mmu.
* @pgd: The page table base address.
*
* This function set page table base address
* When system mmu transfer address from virtaul address to physical address,
* system mmu refer address information from page table
*/
void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
/**
* s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
* @ips: The ip connected system mmu.
*
* This function flush all TLB entry in system mmu
*/
void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
/** s5p_sysmmu_set_fault_handler() - Fault handler for System MMUs
* @itype: type of fault.
* @pgtable_base: the physical address of page table base. This is 0 if @ips is
* SYSMMU_BUSERROR.
* @fault_addr: the device (virtual) address that the System MMU tried to
* translated. This is 0 if @ips is SYSMMU_BUSERROR.
* Called when interrupt occurred by the System MMUs
* The device drivers of peripheral devices that has a System MMU can implement
* a fault handler to resolve address translation fault by System MMU.
* The meanings of return value and parameters are described below.
* return value: non-zero if the fault is correctly resolved.
* zero if the fault is not handled.
*/
void s5p_sysmmu_set_fault_handler(sysmmu_ips ips,
int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
unsigned long pgtable_base,
unsigned long fault_addr));
#else
#define s5p_sysmmu_enable(ips, pgd) do { } while (0)
#define s5p_sysmmu_disable(ips) do { } while (0)
#define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0)
#define s5p_sysmmu_tlb_invalidate(ips) do { } while (0)
#define s5p_sysmmu_set_fault_handler(ips, handler) do { } while (0)
#endif
#endif /* __ASM_PLAT_SYSMMU_H */
......@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
space through the SMMU (System Memory Management Unit)
hardware included on Tegra SoCs.
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
select IOMMU_API
help
Support for the IOMMU(System MMU) of Samsung Exynos application
processor family. This enables H/W multimedia accellerators to see
non-linear physical memory chunks as a linear memory in their
address spaces
If unsure, say N here.
config EXYNOS_IOMMU_DEBUG
bool "Debugging log for Exynos IOMMU"
depends on EXYNOS_IOMMU
help
Select this to see the detailed log message that shows what
happens in the IOMMU driver
Say N unless you need kernel log message for IOMMU debugging
endif # IOMMU_SUPPORT
......@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
/* linux/drivers/iommu/exynos_iommu.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
#define DEBUG
#endif
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/iommu.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include <mach/sysmmu.h>
/* We does not consider super section mapping (16MB) */
#define SECT_ORDER 20
#define LPAGE_ORDER 16
#define SPAGE_ORDER 12
#define SECT_SIZE (1 << SECT_ORDER)
#define LPAGE_SIZE (1 << LPAGE_ORDER)
#define SPAGE_SIZE (1 << SPAGE_ORDER)
#define SECT_MASK (~(SECT_SIZE - 1))
#define LPAGE_MASK (~(LPAGE_SIZE - 1))
#define SPAGE_MASK (~(SPAGE_SIZE - 1))
#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
#define lv1ent_page(sent) ((*(sent) & 3) == 1)
#define lv1ent_section(sent) ((*(sent) & 3) == 2)
#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
#define section_phys(sent) (*(sent) & SECT_MASK)
#define section_offs(iova) ((iova) & 0xFFFFF)
#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
#define lpage_offs(iova) ((iova) & 0xFFFF)
#define spage_phys(pent) (*(pent) & SPAGE_MASK)
#define spage_offs(iova) ((iova) & 0xFFF)
#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
#define NUM_LV1ENTRIES 4096
#define NUM_LV2ENTRIES 256
#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
#define mk_lv1ent_sect(pa) ((pa) | 2)
#define mk_lv1ent_page(pa) ((pa) | 1)
#define mk_lv2ent_lpage(pa) ((pa) | 1)
#define mk_lv2ent_spage(pa) ((pa) | 2)
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
#define CTRL_DISABLE 0x0
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
#define REG_MMU_STATUS 0x008
#define REG_MMU_FLUSH 0x00C
#define REG_MMU_FLUSH_ENTRY 0x010
#define REG_PT_BASE_ADDR 0x014
#define REG_INT_STATUS 0x018
#define REG_INT_CLEAR 0x01C
#define REG_PAGE_FAULT_ADDR 0x024
#define REG_AW_FAULT_ADDR 0x028
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
#define REG_MMU_VERSION 0x034
#define REG_PB0_SADDR 0x04C
#define REG_PB0_EADDR 0x050
#define REG_PB1_SADDR 0x054
#define REG_PB1_EADDR 0x058
static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
{
return pgtable + lv1ent_offset(iova);
}
static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
{
return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
}
enum exynos_sysmmu_inttype {
SYSMMU_PAGEFAULT,
SYSMMU_AR_MULTIHIT,
SYSMMU_AW_MULTIHIT,
SYSMMU_BUSERROR,
SYSMMU_AR_SECURITY,
SYSMMU_AR_ACCESS,
SYSMMU_AW_SECURITY,
SYSMMU_AW_PROTECTION, /* 7 */
SYSMMU_FAULT_UNKNOWN,
SYSMMU_FAULTS_NUM
};
/*
* @itype: type of fault.
* @pgtable_base: the physical address of page table base. This is 0 if @itype
* is SYSMMU_BUSERROR.
* @fault_addr: the device (virtual) address that the System MMU tried to
* translated. This is 0 if @itype is SYSMMU_BUSERROR.
*/
typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
unsigned long pgtable_base, unsigned long fault_addr);
static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
REG_PAGE_FAULT_ADDR,
REG_AR_FAULT_ADDR,
REG_AW_FAULT_ADDR,
REG_DEFAULT_SLAVE_ADDR,
REG_AR_FAULT_ADDR,
REG_AR_FAULT_ADDR,
REG_AW_FAULT_ADDR,
REG_AW_FAULT_ADDR
};
static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
"PAGE FAULT",
"AR MULTI-HIT FAULT",
"AW MULTI-HIT FAULT",
"BUS ERROR",
"AR SECURITY PROTECTION FAULT",
"AR ACCESS PROTECTION FAULT",
"AW SECURITY PROTECTION FAULT",
"AW ACCESS PROTECTION FAULT",
"UNKNOWN FAULT"
};
struct exynos_iommu_domain {
struct list_head clients; /* list of sysmmu_drvdata.node */
unsigned long *pgtable; /* lv1 page table, 16KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
};
struct sysmmu_drvdata {
struct list_head node; /* entry of exynos_iommu_domain.clients */
struct device *sysmmu; /* System MMU's device descriptor */
struct device *dev; /* Owner of system MMU */
char *dbgname;
int nsfrs;
void __iomem **sfrbases;
struct clk *clk[2];
int activations;
rwlock_t lock;
struct iommu_domain *domain;
sysmmu_fault_handler_t fault_handler;
unsigned long pgtable;
};
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
{
/* return true if the System MMU was not active previously
and it needs to be initialized */
return ++data->activations == 1;
}
static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
{
/* return true if the System MMU is needed to be disabled */
BUG_ON(data->activations < 1);
return --data->activations == 0;
}
static bool is_sysmmu_active(struct sysmmu_drvdata *data)
{
return data->activations > 0;
}
static void sysmmu_unblock(void __iomem *sfrbase)
{
__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
}
static bool sysmmu_block(void __iomem *sfrbase)
{
int i = 120;
__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
--i;
if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
sysmmu_unblock(sfrbase);
return false;
}
return true;
}
static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
{
__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
}
static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
unsigned long iova)
{
__raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
}
static void __sysmmu_set_ptbase(void __iomem *sfrbase,
unsigned long pgd)
{
__raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
__sysmmu_tlb_invalidate(sfrbase);
}
static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
unsigned long size, int idx)
{
__raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
__raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
}
void exynos_sysmmu_set_prefbuf(struct device *dev,
unsigned long base0, unsigned long size0,
unsigned long base1, unsigned long size1)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
unsigned long flags;
int i;
BUG_ON((base0 + size0) <= base0);
BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
read_lock_irqsave(&data->lock, flags);
if (!is_sysmmu_active(data))
goto finish;
for (i = 0; i < data->nsfrs; i++) {
if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
if (!sysmmu_block(data->sfrbases[i]))
continue;
if (size1 == 0) {
if (size0 <= SZ_128K) {
base1 = base0;
size1 = size0;
} else {
size1 = size0 -
ALIGN(size0 / 2, SZ_64K);
size0 = size0 - size1;
base1 = base0 + size0;
}
}
__sysmmu_set_prefbuf(
data->sfrbases[i], base0, size0, 0);
__sysmmu_set_prefbuf(
data->sfrbases[i], base1, size1, 1);
sysmmu_unblock(data->sfrbases[i]);
}
}
finish:
read_unlock_irqrestore(&data->lock, flags);
}
static void __set_fault_handler(struct sysmmu_drvdata *data,
sysmmu_fault_handler_t handler)
{
unsigned long flags;
write_lock_irqsave(&data->lock, flags);
data->fault_handler = handler;
write_unlock_irqrestore(&data->lock, flags);
}
void exynos_sysmmu_set_fault_handler(struct device *dev,
sysmmu_fault_handler_t handler)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
__set_fault_handler(data, handler);
}
static int default_fault_handler(enum exynos_sysmmu_inttype itype,
unsigned long pgtable_base, unsigned long fault_addr)
{
unsigned long *ent;
if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
itype = SYSMMU_FAULT_UNKNOWN;
pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
sysmmu_fault_name[itype], fault_addr, pgtable_base);
ent = section_entry(__va(pgtable_base), fault_addr);
pr_err("\tLv1 entry: 0x%lx\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
pr_err("\t Lv2 entry: 0x%lx\n", *ent);
}
pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
BUG();
return 0;
}
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
/* SYSMMU is in blocked when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
struct resource *irqres;
struct platform_device *pdev;
enum exynos_sysmmu_inttype itype;
unsigned long addr = -1;
int i, ret = -ENOSYS;
read_lock(&data->lock);
WARN_ON(!is_sysmmu_active(data));
pdev = to_platform_device(data->sysmmu);
for (i = 0; i < (pdev->num_resources / 2); i++) {
irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (irqres && ((int)irqres->start == irq))
break;
}
if (i == pdev->num_resources) {
itype = SYSMMU_FAULT_UNKNOWN;
} else {
itype = (enum exynos_sysmmu_inttype)
__ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
itype = SYSMMU_FAULT_UNKNOWN;
else
addr = __raw_readl(
data->sfrbases[i] + fault_reg_offset[itype]);
}
if (data->domain)
ret = report_iommu_fault(data->domain, data->dev,
addr, itype);
if ((ret == -ENOSYS) && data->fault_handler) {
unsigned long base = data->pgtable;
if (itype != SYSMMU_FAULT_UNKNOWN)
base = __raw_readl(
data->sfrbases[i] + REG_PT_BASE_ADDR);
ret = data->fault_handler(itype, base, addr);
}
if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
__raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
else
dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
data->dbgname, sysmmu_fault_name[itype]);
if (itype != SYSMMU_FAULT_UNKNOWN)
sysmmu_unblock(data->sfrbases[i]);
read_unlock(&data->lock);
return IRQ_HANDLED;
}
static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
{
unsigned long flags;
bool disabled = false;
int i;
write_lock_irqsave(&data->lock, flags);
if (!set_sysmmu_inactive(data))
goto finish;
for (i = 0; i < data->nsfrs; i++)
__raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
if (data->clk[1])
clk_disable(data->clk[1]);
if (data->clk[0])
clk_disable(data->clk[0]);
disabled = true;
data->pgtable = 0;
data->domain = NULL;
finish:
write_unlock_irqrestore(&data->lock, flags);
if (disabled)
dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
else
dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
data->dbgname, data->activations);
return disabled;
}
/* __exynos_sysmmu_enable: Enables System MMU
*
* returns -error if an error occurred and System MMU is not enabled,
* 0 if the System MMU has been just enabled and 1 if System MMU was already
* enabled before.
*/
static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
unsigned long pgtable, struct iommu_domain *domain)
{
int i, ret = 0;
unsigned long flags;
write_lock_irqsave(&data->lock, flags);
if (!set_sysmmu_active(data)) {
if (WARN_ON(pgtable != data->pgtable)) {
ret = -EBUSY;
set_sysmmu_inactive(data);
} else {
ret = 1;
}
dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
goto finish;
}
if (data->clk[0])
clk_enable(data->clk[0]);
if (data->clk[1])
clk_enable(data->clk[1]);
data->pgtable = pgtable;
for (i = 0; i < data->nsfrs; i++) {
__sysmmu_set_ptbase(data->sfrbases[i], pgtable);
if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
/* System MMU version is 3.x */
__raw_writel((1 << 12) | (2 << 28),
data->sfrbases[i] + REG_MMU_CFG);
__sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
__sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
}
__raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
}
data->domain = domain;
dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
finish:
write_unlock_irqrestore(&data->lock, flags);
return ret;
}
int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
int ret;
BUG_ON(!memblock_is_memory(pgtable));
ret = pm_runtime_get_sync(data->sysmmu);
if (ret < 0) {
dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
return ret;
}
ret = __exynos_sysmmu_enable(data, pgtable, NULL);
if (WARN_ON(ret < 0)) {
pm_runtime_put(data->sysmmu);
dev_err(data->sysmmu,
"(%s) Already enabled with page table %#lx\n",
data->dbgname, data->pgtable);
} else {
data->dev = dev;
}
return ret;
}
bool exynos_sysmmu_disable(struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
bool disabled;
disabled = __exynos_sysmmu_disable(data);
pm_runtime_put(data->sysmmu);
return disabled;
}
static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
{
unsigned long flags;
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
read_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
int i;
for (i = 0; i < data->nsfrs; i++) {
if (sysmmu_block(data->sfrbases[i])) {
__sysmmu_tlb_invalidate_entry(
data->sfrbases[i], iova);
sysmmu_unblock(data->sfrbases[i]);
}
}
} else {
dev_dbg(data->sysmmu,
"(%s) Disabled. Skipping invalidating TLB.\n",
data->dbgname);
}
read_unlock_irqrestore(&data->lock, flags);
}
void exynos_sysmmu_tlb_invalidate(struct device *dev)
{
unsigned long flags;
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
read_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
int i;
for (i = 0; i < data->nsfrs; i++) {
if (sysmmu_block(data->sfrbases[i])) {
__sysmmu_tlb_invalidate(data->sfrbases[i]);
sysmmu_unblock(data->sfrbases[i]);
}
}
} else {
dev_dbg(data->sysmmu,
"(%s) Disabled. Skipping invalidating TLB.\n",
data->dbgname);
}
read_unlock_irqrestore(&data->lock, flags);
}
static int exynos_sysmmu_probe(struct platform_device *pdev)
{
int i, ret;
struct device *dev;
struct sysmmu_drvdata *data;
dev = &pdev->dev;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
dev_dbg(dev, "Not enough memory\n");
ret = -ENOMEM;
goto err_alloc;
}
ret = dev_set_drvdata(dev, data);
if (ret) {
dev_dbg(dev, "Unabled to initialize driver data\n");
goto err_init;
}
data->nsfrs = pdev->num_resources / 2;
data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
GFP_KERNEL);
if (data->sfrbases == NULL) {
dev_dbg(dev, "Not enough memory\n");
ret = -ENOMEM;
goto err_init;
}
for (i = 0; i < data->nsfrs; i++) {
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res) {
dev_dbg(dev, "Unable to find IOMEM region\n");
ret = -ENOENT;
goto err_res;
}
data->sfrbases[i] = ioremap(res->start, resource_size(res));
if (!data->sfrbases[i]) {
dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
res->start);
ret = -ENOENT;
goto err_res;
}
}
for (i = 0; i < data->nsfrs; i++) {
ret = platform_get_irq(pdev, i);
if (ret <= 0) {
dev_dbg(dev, "Unable to find IRQ resource\n");
goto err_irq;
}
ret = request_irq(ret, exynos_sysmmu_irq, 0,
dev_name(dev), data);
if (ret) {
dev_dbg(dev, "Unabled to register interrupt handler\n");
goto err_irq;
}
}
if (dev_get_platdata(dev)) {
char *deli, *beg;
struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
beg = platdata->clockname;
for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
/* NOTHING */;
if (*deli == '\0')
deli = NULL;
else
*deli = '\0';
data->clk[0] = clk_get(dev, beg);
if (IS_ERR(data->clk[0])) {
data->clk[0] = NULL;
dev_dbg(dev, "No clock descriptor registered\n");
}
if (data->clk[0] && deli) {
*deli = ',';
data->clk[1] = clk_get(dev, deli + 1);
if (IS_ERR(data->clk[1]))
data->clk[1] = NULL;
}
data->dbgname = platdata->dbgname;
}
data->sysmmu = dev;
rwlock_init(&data->lock);
INIT_LIST_HEAD(&data->node);
__set_fault_handler(data, &default_fault_handler);
if (dev->parent)
pm_runtime_enable(dev);
dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
return 0;
err_irq:
while (i-- > 0) {
int irq;
irq = platform_get_irq(pdev, i);
free_irq(irq, data);
}
err_res:
while (data->nsfrs-- > 0)
iounmap(data->sfrbases[data->nsfrs]);
kfree(data->sfrbases);
err_init:
kfree(data);
err_alloc:
dev_err(dev, "Failed to initialize\n");
return ret;
}
static struct platform_driver exynos_sysmmu_driver = {
.probe = exynos_sysmmu_probe,
.driver = {
.owner = THIS_MODULE,
.name = "exynos-sysmmu",
}
};
static inline void pgtable_flush(void *vastart, void *vaend)
{
dmac_flush_range(vastart, vaend);
outer_flush_range(virt_to_phys(vastart),
virt_to_phys(vaend));
}
static int exynos_iommu_domain_init(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->pgtable = (unsigned long *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO, 2);
if (!priv->pgtable)
goto err_pgtable;
priv->lv2entcnt = (short *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO, 1);
if (!priv->lv2entcnt)
goto err_counter;
pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients);
domain->priv = priv;
return 0;
err_counter:
free_pages((unsigned long)priv->pgtable, 2);
err_pgtable:
kfree(priv);
return -ENOMEM;
}
static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv = domain->priv;
struct sysmmu_drvdata *data;
unsigned long flags;
int i;
WARN_ON(!list_empty(&priv->clients));
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(data, &priv->clients, node) {
while (!exynos_sysmmu_disable(data->dev))
; /* until System MMU is actually disabled */
}
spin_unlock_irqrestore(&priv->lock, flags);
for (i = 0; i < NUM_LV1ENTRIES; i++)
if (lv1ent_page(priv->pgtable + i))
kfree(__va(lv2table_base(priv->pgtable + i)));
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
kfree(domain->priv);
domain->priv = NULL;
}
static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
struct exynos_iommu_domain *priv = domain->priv;
unsigned long flags;
int ret;
ret = pm_runtime_get_sync(data->sysmmu);
if (ret < 0)
return ret;
ret = 0;
spin_lock_irqsave(&priv->lock, flags);
ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
if (ret == 0) {
/* 'data->node' must not be appeared in priv->clients */
BUG_ON(!list_empty(&data->node));
data->dev = dev;
list_add_tail(&data->node, &priv->clients);
}
spin_unlock_irqrestore(&priv->lock, flags);
if (ret < 0) {
dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
__func__, __pa(priv->pgtable));
pm_runtime_put(data->sysmmu);
} else if (ret > 0) {
dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
__func__, __pa(priv->pgtable));
} else {
dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
__func__, __pa(priv->pgtable));
}
return ret;
}
static void exynos_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
struct exynos_iommu_domain *priv = domain->priv;
struct list_head *pos;
unsigned long flags;
bool found = false;
spin_lock_irqsave(&priv->lock, flags);
list_for_each(pos, &priv->clients) {
if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
found = true;
break;
}
}
if (!found)
goto finish;
if (__exynos_sysmmu_disable(data)) {
dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
__func__, __pa(priv->pgtable));
list_del(&data->node);
INIT_LIST_HEAD(&data->node);
} else {
dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
__func__, __pa(priv->pgtable));
}
finish:
spin_unlock_irqrestore(&priv->lock, flags);
if (found)
pm_runtime_put(data->sysmmu);
}
static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
short *pgcounter)
{
if (lv1ent_fault(sent)) {
unsigned long *pent;
pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
if (!pent)
return NULL;
*sent = mk_lv1ent_page(__pa(pent));
*pgcounter = NUM_LV2ENTRIES;
pgtable_flush(pent, pent + NUM_LV2ENTRIES);
pgtable_flush(sent, sent + 1);
}
return page_entry(sent, iova);
}
static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
{
if (lv1ent_section(sent))
return -EADDRINUSE;
if (lv1ent_page(sent)) {
if (*pgcnt != NUM_LV2ENTRIES)
return -EADDRINUSE;
kfree(page_entry(sent, 0));
*pgcnt = 0;
}
*sent = mk_lv1ent_sect(paddr);
pgtable_flush(sent, sent + 1);
return 0;
}
static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
short *pgcnt)
{
if (size == SPAGE_SIZE) {
if (!lv2ent_fault(pent))
return -EADDRINUSE;
*pent = mk_lv2ent_spage(paddr);
pgtable_flush(pent, pent + 1);
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
if (!lv2ent_fault(pent)) {
memset(pent, 0, sizeof(*pent) * i);
return -EADDRINUSE;
}
*pent = mk_lv2ent_lpage(paddr);
}
pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
*pgcnt -= SPAGES_PER_LPAGE;
}
return 0;
}
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct exynos_iommu_domain *priv = domain->priv;
unsigned long *entry;
unsigned long flags;
int ret = -ENOMEM;
BUG_ON(priv->pgtable == NULL);
spin_lock_irqsave(&priv->pgtablelock, flags);
entry = section_entry(priv->pgtable, iova);
if (size == SECT_SIZE) {
ret = lv1set_section(entry, paddr,
&priv->lv2entcnt[lv1ent_offset(iova)]);
} else {
unsigned long *pent;
pent = alloc_lv2entry(entry, iova,
&priv->lv2entcnt[lv1ent_offset(iova)]);
if (!pent)
ret = -ENOMEM;
else
ret = lv2set_page(pent, paddr, size,
&priv->lv2entcnt[lv1ent_offset(iova)]);
}
if (ret) {
pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
__func__, iova, size);
}
spin_unlock_irqrestore(&priv->pgtablelock, flags);
return ret;
}
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct exynos_iommu_domain *priv = domain->priv;
struct sysmmu_drvdata *data;
unsigned long flags;
unsigned long *ent;
BUG_ON(priv->pgtable == NULL);
spin_lock_irqsave(&priv->pgtablelock, flags);
ent = section_entry(priv->pgtable, iova);
if (lv1ent_section(ent)) {
BUG_ON(size < SECT_SIZE);
*ent = 0;
pgtable_flush(ent, ent + 1);
size = SECT_SIZE;
goto done;
}
if (unlikely(lv1ent_fault(ent))) {
if (size > SECT_SIZE)
size = SECT_SIZE;
goto done;
}
/* lv1ent_page(sent) == true here */
ent = page_entry(ent, iova);
if (unlikely(lv2ent_fault(ent))) {
size = SPAGE_SIZE;
goto done;
}
if (lv2ent_small(ent)) {
*ent = 0;
size = SPAGE_SIZE;
priv->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
}
/* lv1ent_large(ent) == true here */
BUG_ON(size < LPAGE_SIZE);
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
size = LPAGE_SIZE;
priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
done:
spin_unlock_irqrestore(&priv->pgtablelock, flags);
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry(data, &priv->clients, node)
sysmmu_tlb_invalidate_entry(data->dev, iova);
spin_unlock_irqrestore(&priv->lock, flags);
return size;
}
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
struct exynos_iommu_domain *priv = domain->priv;
unsigned long *entry;
unsigned long flags;
phys_addr_t phys = 0;
spin_lock_irqsave(&priv->pgtablelock, flags);
entry = section_entry(priv->pgtable, iova);
if (lv1ent_section(entry)) {
phys = section_phys(entry) + section_offs(iova);
} else if (lv1ent_page(entry)) {
entry = page_entry(entry, iova);
if (lv2ent_large(entry))
phys = lpage_phys(entry) + lpage_offs(iova);
else if (lv2ent_small(entry))
phys = spage_phys(entry) + spage_offs(iova);
}
spin_unlock_irqrestore(&priv->pgtablelock, flags);
return phys;
}
static struct iommu_ops exynos_iommu_ops = {
.domain_init = &exynos_iommu_domain_init,
.domain_destroy = &exynos_iommu_domain_destroy,
.attach_dev = &exynos_iommu_attach_device,
.detach_dev = &exynos_iommu_detach_device,
.map = &exynos_iommu_map,
.unmap = &exynos_iommu_unmap,
.iova_to_phys = &exynos_iommu_iova_to_phys,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
};
static int __init exynos_iommu_init(void)
{
int ret;
ret = platform_driver_register(&exynos_sysmmu_driver);
if (ret == 0)
bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
return ret;
}
subsys_initcall(exynos_iommu_init);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment