aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
authorfire855 <thefire855@gmail.com>2017-02-24 17:48:24 +0100
committerMister Oyster <oysterized@gmail.com>2017-04-11 10:59:46 +0200
commit504261abd2b3a69cb609ef9ccf4e58ae9ccad566 (patch)
treeeb5caf24cfcb12a7dbb2dbe38eaa1e68d4c26a3c /drivers/misc
parentd547e0f39015f8e8ec1dba8bd9d66c1beb24eb41 (diff)
Update m4u, smi and gud drivers
Backported from 3.18 MM kernel
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/mediatek/Kconfig1
-rw-r--r--drivers/misc/mediatek/Makefile2
-rw-r--r--drivers/misc/mediatek/Makefile.mt67352
-rw-r--r--drivers/misc/mediatek/cmdq/mt6735/cmdq_platform.c6
-rw-r--r--drivers/misc/mediatek/gpu/mt6735/mali-EAC/drivers/gpu/arm/midgard/mali_kbase_jm.c5
-rw-r--r--drivers/misc/mediatek/gud/302a/Makefile (renamed from drivers/misc/mediatek/gud/mt6735/Makefile)0
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/gud/302a/gud/Kconfig (renamed from drivers/misc/mediatek/gud/mt6735/gud/Kconfig)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/Makefile57
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/api.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/api.c)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/arm.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/arm.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/debug.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/debug.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/fastcall.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/fastcall.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/logging.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/logging.c)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/logging.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/logging.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/main.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/main.c)11
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/main.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/main.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/mem.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/mem.c)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/mem.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/mem.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/ops.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/ops.c)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/ops.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/ops.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/platform.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/platform.h)2
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/pm.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/pm.c)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/pm.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/pm.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/mc_kernel_api.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/mc_kernel_api.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/mc_linux.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/mc_linux.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/version.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/version.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/clientlib.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/clientlib.c)28
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/common.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/common.h)3
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/connection.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/connection.c)50
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/connection.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/connection.h)2
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/device.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/device.c)89
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/device.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/device.h)4
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include/mcinq.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/include/mcinq.h)7
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include/mcuuid.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/include/mcuuid.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/main.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/main.c)4
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public/mobicore_driver_api.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/public/mobicore_driver_api.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/session.c (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/session.c)31
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/session.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/session.h)2
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/wsm.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/wsm.h)0
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/Out/Public/tui_ioctl.h48
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/build_tag.h (renamed from drivers/misc/mediatek/gud/mt6735/gud/build_tag.h)2
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/dciTui.h108
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/t-base-tui.h38
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/main.c164
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/public/tui_ioctl.h48
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.c380
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.h22
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/trustedui.c131
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal.h28
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal_mt6735.c344
-rw-r--r--drivers/misc/mediatek/gud/302a/gud/build_tag.h15
-rw-r--r--drivers/misc/mediatek/gud/302c/Makefile7
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/Kconfig42
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/gud/302c/gud/Makefile (renamed from drivers/misc/mediatek/gud/mt6735/gud/Makefile)13
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/api.c118
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/arm.h87
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/debug.h64
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/fastcall.h258
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.c384
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.h30
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.c1733
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.h155
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.c813
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.h148
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.c421
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.h37
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/platform.h52
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.c307
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.h47
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_kernel_api.h88
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_linux.h217
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/version.h20
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/clientlib.c1079
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/common.h83
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.c199
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.h61
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.c259
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.h67
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcinq.h110
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcuuid.h24
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/main.c215
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_api.h399
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h250
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.c225
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.h148
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/wsm.h30
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/Out/Public/tui_ioctl.h48
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/build_tag.h15
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/dciTui.h108
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/t-base-tui.h38
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/main.c173
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/public/tui_ioctl.h48
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.c380
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.h23
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/trustedui.c131
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal.h28
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal_mt6735.c414
-rw-r--r--drivers/misc/mediatek/gud/302c/gud/build_tag.h15
-rw-r--r--drivers/misc/mediatek/gud/Kconfig68
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/gud/Makefile15
-rw-r--r--drivers/misc/mediatek/gud/Makefile.include17
-rw-r--r--drivers/misc/mediatek/m4u/2.0/Makefile17
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u.c2670
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_debug.c1457
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_debug.h15
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_mva.c392
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_mva.h (renamed from drivers/misc/mediatek/m4u/mt6735/m4u_mva.h)0
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_pgtable.c1008
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_pgtable.h144
-rw-r--r--drivers/misc/mediatek/m4u/2.0/m4u_v2.h161
-rw-r--r--drivers/misc/mediatek/m4u/Kconfig6
-rw-r--r--drivers/misc/mediatek/m4u/Makefile7
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/m4u/mt6735/Makefile14
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u.c2677
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u.h6
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u.mk9
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_debug.c883
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_hw.c3816
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_hw.h223
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_mva.c417
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.c1062
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.h148
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_port.h16
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/m4u_priv.h361
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/m4u/mt6735/mt6735/Makefile6
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.c112
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.h14
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_port.h118
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_reg.h214
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/m4u/mt6735/mt6735m/Makefile5
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.c87
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.h12
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_port.h77
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_reg.h212
-rw-r--r--[-rwxr-xr-x]drivers/misc/mediatek/m4u/mt6735/mt6753/Makefile5
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.c117
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.h14
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_port.h123
-rw-r--r--drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_reg.h212
-rw-r--r--drivers/misc/mediatek/mach/mt6735/include/mach/m4u.h196
-rw-r--r--drivers/misc/mediatek/mach/mt6735/include/mach/mt_smi.h229
-rw-r--r--drivers/misc/mediatek/smi/Kconfig16
-rw-r--r--drivers/misc/mediatek/smi/Makefile43
-rw-r--r--drivers/misc/mediatek/smi/mmdvfs_mgr.c708
-rw-r--r--drivers/misc/mediatek/smi/mmdvfs_mgr.h147
-rw-r--r--drivers/misc/mediatek/smi/mmdvfs_mgr_v2.c1019
-rwxr-xr-xdrivers/misc/mediatek/smi/mt6735/Makefile22
-rw-r--r--drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.c410
-rw-r--r--drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.h27
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_common.h69
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_common_d1.c2112
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_common_d2.c1970
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_common_d3.c2077
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_debug.c153
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_debug.h24
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_reg_d1.h467
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_reg_d2.h464
-rw-r--r--drivers/misc/mediatek/smi/mt6735/smi_reg_d3.h467
-rw-r--r--drivers/misc/mediatek/smi/smi_common.c2003
-rw-r--r--drivers/misc/mediatek/smi/smi_common.h70
-rw-r--r--drivers/misc/mediatek/smi/smi_config_util.c49
-rw-r--r--drivers/misc/mediatek/smi/smi_config_util.h8
-rw-r--r--drivers/misc/mediatek/smi/smi_configuration.c1307
-rw-r--r--drivers/misc/mediatek/smi/smi_configuration.h53
-rw-r--r--drivers/misc/mediatek/smi/smi_debug.c348
-rw-r--r--drivers/misc/mediatek/smi/smi_debug.h34
-rw-r--r--drivers/misc/mediatek/smi/smi_info_util.c86
-rw-r--r--drivers/misc/mediatek/smi/smi_info_util.h13
-rw-r--r--drivers/misc/mediatek/smi/smi_internal.c61
-rw-r--r--drivers/misc/mediatek/smi/smi_reg.h449
-rw-r--r--drivers/misc/mediatek/smi/variant/Makefile15
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_common.h55
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_debug.c136
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_debug.h23
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_priv.h36
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_reg.h536
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_variant.c1760
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_variant_config_8127.c220
-rw-r--r--drivers/misc/mediatek/smi/variant/smi_variant_config_8173.c258
180 files changed, 29653 insertions, 16669 deletions
diff --git a/drivers/misc/mediatek/Kconfig b/drivers/misc/mediatek/Kconfig
index 00abc771d..769d15b02 100644
--- a/drivers/misc/mediatek/Kconfig
+++ b/drivers/misc/mediatek/Kconfig
@@ -493,5 +493,6 @@ source "drivers/misc/mediatek/connectivity/Kconfig"
source "drivers/misc/mediatek/gps/Kconfig"
source "drivers/misc/mediatek/hall/Kconfig"
source "drivers/misc/mediatek/multibridge/Kconfig"
+source "drivers/misc/mediatek/m4u/Kconfig"
endif
diff --git a/drivers/misc/mediatek/Makefile b/drivers/misc/mediatek/Makefile
index 404516ca7..b3d1fb412 100644
--- a/drivers/misc/mediatek/Makefile
+++ b/drivers/misc/mediatek/Makefile
@@ -238,7 +238,7 @@ endif
obj-y += power/
obj-y += i2c/
obj-y += pwm/
-obj-$(CONFIG_MTK_SMI) += smi/
+obj-$(CONFIG_MTK_SMI_EXT) += smi/
obj-$(CONFIG_MTK_BTCVSD) += btcvsd/
obj-$(CONFIG_MTK_SOUND) += sound/
obj-$(CONFIG_MTK_SPI) += spi/
diff --git a/drivers/misc/mediatek/Makefile.mt6735 b/drivers/misc/mediatek/Makefile.mt6735
index 857a9ca67..72c75a9d2 100644
--- a/drivers/misc/mediatek/Makefile.mt6735
+++ b/drivers/misc/mediatek/Makefile.mt6735
@@ -159,7 +159,7 @@ obj-$(CONFIG_MTK_MMC) += pmt/
obj-y += power/
obj-y += i2c/
obj-y += pwm/
-obj-$(CONFIG_MTK_SMI) += smi/
+obj-$(CONFIG_MTK_SMI_EXT) += smi/
obj-$(CONFIG_MTK_BTCVSD) += btcvsd/
obj-$(CONFIG_MTK_SOUND) += sound/
obj-$(CONFIG_MTK_SPI) += spi/
diff --git a/drivers/misc/mediatek/cmdq/mt6735/cmdq_platform.c b/drivers/misc/mediatek/cmdq/mt6735/cmdq_platform.c
index 85d83659f..39cf9286b 100644
--- a/drivers/misc/mediatek/cmdq/mt6735/cmdq_platform.c
+++ b/drivers/misc/mediatek/cmdq/mt6735/cmdq_platform.c
@@ -6,7 +6,7 @@
#include <mach/mt_clkmgr.h>
#include <linux/seq_file.h>
-#include "smi_debug.h"
+#include <../../smi/smi_debug.h>
#define MMSYS_CONFIG_BASE cmdq_dev_get_module_base_VA_MMSYS_CONFIG()
@@ -667,9 +667,9 @@ int cmdq_core_dump_smi(const int showSmiDump)
/* isSMIHang = smi_debug_bus_hanging_detect(
SMI_DBG_DISPSYS | SMI_DBG_VDEC | SMI_DBG_IMGSYS | SMI_DBG_VENC | SMI_DBG_MJC,
showSmiDump); */
- isSMIHang = smi_debug_bus_hanging_detect_ext(
+ /* isSMIHang = smi_debug_bus_hanging_detect_ext(
SMI_DBG_DISPSYS | SMI_DBG_VDEC | SMI_DBG_IMGSYS | SMI_DBG_VENC | SMI_DBG_MJC,
- showSmiDump, 1);
+ showSmiDump, 1); */
CMDQ_ERR("SMI Hang? = %d\n", isSMIHang);
#endif
diff --git a/drivers/misc/mediatek/gpu/mt6735/mali-EAC/drivers/gpu/arm/midgard/mali_kbase_jm.c b/drivers/misc/mediatek/gpu/mt6735/mali-EAC/drivers/gpu/arm/midgard/mali_kbase_jm.c
index 68f2dd955..19bbf1a89 100644
--- a/drivers/misc/mediatek/gpu/mt6735/mali-EAC/drivers/gpu/arm/midgard/mali_kbase_jm.c
+++ b/drivers/misc/mediatek/gpu/mt6735/mali-EAC/drivers/gpu/arm/midgard/mali_kbase_jm.c
@@ -31,8 +31,6 @@
#include "mali_kbase_jm.h"
-extern void smi_dumpDebugMsg(void);
-
#define beenthere(kctx, f, a...) dev_dbg(kctx->kbdev->dev, "%s:" f, __func__, ##a)
#ifdef CONFIG_MALI_DEBUG_SHADER_SPLIT_FS
@@ -1225,8 +1223,6 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
dev_err(kbdev->dev, "Resetting GPU (allowing up to %d ms)", RESET_TIMEOUT);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
-
- smi_dumpDebugMsg();
if (kbdev->hwcnt.state == KBASE_INSTR_STATE_RESETTING) { /*the same interrupt handler preempted itself */
/* GPU is being reset */
@@ -1248,7 +1244,6 @@ void kbasep_reset_timeout_worker(struct work_struct *data)
/* Output the state of some interesting registers to help in the
* debugging of GPU resets */
- smi_dumpDebugMsg();
kbase_debug_dump_registers(kbdev);
bckp_state = kbdev->hwcnt.state;
diff --git a/drivers/misc/mediatek/gud/mt6735/Makefile b/drivers/misc/mediatek/gud/302a/Makefile
index 4937fb49c..4937fb49c 100644
--- a/drivers/misc/mediatek/gud/mt6735/Makefile
+++ b/drivers/misc/mediatek/gud/302a/Makefile
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/Kconfig b/drivers/misc/mediatek/gud/302a/gud/Kconfig
index 9d210a7a4..9d210a7a4 100755..100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/Kconfig
+++ b/drivers/misc/mediatek/gud/302a/gud/Kconfig
diff --git a/drivers/misc/mediatek/gud/302a/gud/Makefile b/drivers/misc/mediatek/gud/302a/gud/Makefile
new file mode 100644
index 000000000..e752c0e54
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/Makefile
@@ -0,0 +1,57 @@
+#
+# Makefile for the kernel mobicore drivers
+#
+
+ifneq ($(MTK_ROOT_BUILD),)
+include $(MTK_ROOT_BUILD)/Makefile
+endif
+
+GUD_ROOT_FOLDER := $(dir $(lastword $(MAKEFILE_LIST)))
+# add our modules to kernel.
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += mcKernelApi.o
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += mcDrvModule.o
+obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui.o
+
+mcDrvModule-objs := MobiCoreDriver/logging.o \
+ MobiCoreDriver/ops.o \
+ MobiCoreDriver/mem.o \
+ MobiCoreDriver/api.o \
+ MobiCoreDriver/pm.o \
+ MobiCoreDriver/main.o
+
+mcKernelApi-objs := MobiCoreKernelApi/main.o \
+ MobiCoreKernelApi/clientlib.o \
+ MobiCoreKernelApi/device.o \
+ MobiCoreKernelApi/session.o \
+ MobiCoreKernelApi/connection.o
+
+TlcTui-objs := TlcTui/main.o \
+ TlcTui/tlcTui.o \
+ TlcTui/trustedui.o \
+ TlcTui/tui-hal_$(MTK_PLATFORM).o
+
+# Release mode by default
+ccflags-y := -DNDEBUG
+ccflags-y += -Wno-declaration-after-statement
+#ccflags-y += -Wno-error=date-time
+
+ccflags-$(CONFIG_MOBICORE_DEBUG) += -DDEBUG
+ccflags-$(CONFIG_MOBICORE_VERBOSE) += -DDEBUG_VERBOSE
+
+# Choose one platform from the folder
+#MOBICORE_PLATFORM := $(shell (ls -1 $(GUD_ROOT_FOLDER)MobiCoreDriver/platforms | tail -1) )
+ccflags-y += -DMC_NETLINK_COMPAT_V37
+
+# Use the available platform folder
+#ccflags-y += -I$(GUD_ROOT_FOLDER)MobiCoreDriver/platforms/$(MOBICORE_PLATFORM)
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)MobiCoreDriver/public
+# MobiCore KernelApi required incldes
+ccflags-y += -I$(GUD_ROOT_FOLDER)MobiCoreKernelApi/include \
+ -I$(GUD_ROOT_FOLDER)MobiCoreKernelApi/public
+
+# MobiCore TlcTui required includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/TlcTui \
+ -I$(GUD_ROOT_FOLDER)/TlcTui/inc \
+ -I$(GUD_ROOT_FOLDER)/TlcTui/public \
+ include
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/api.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/api.c
index 354f5ddff..354f5ddff 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/api.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/api.c
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/arm.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/arm.h
index 8c9fc37ee..8c9fc37ee 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/arm.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/arm.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/debug.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/debug.h
index 52362b346..52362b346 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/debug.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/debug.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/fastcall.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/fastcall.h
index b438d7244..b438d7244 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/fastcall.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/fastcall.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/logging.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/logging.c
index 044e297df..044e297df 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/logging.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/logging.c
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/logging.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/logging.h
index a3cbca21c..a3cbca21c 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/logging.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/logging.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/main.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/main.c
index 74ddcee71..6c51fafe5 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/main.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/main.c
@@ -112,9 +112,11 @@ static struct mc_instance *get_instance(struct file *file)
uint32_t mc_get_new_handle(void)
{
+ static DEFINE_MUTEX(local_mutex);
uint32_t handle;
struct mc_buffer *buffer;
- /* assumption ctx.bufs_lock mutex is locked */
+
+ mutex_lock(&local_mutex);
retry:
handle = atomic_inc_return(&ctx.handle_counter);
/* The handle must leave 12 bits (PAGE_SHIFT) for the 12 LSBs to be
@@ -130,6 +132,7 @@ retry:
if (buffer->handle == handle)
goto retry;
}
+ mutex_unlock(&local_mutex);
return handle;
}
@@ -1509,7 +1512,7 @@ out:
* This device is installed and registered as cdev, then interrupt and
* queue handling is set up
*/
-static unsigned int mobicore_irq_id = MC_INTR_SSIQ;
+static unsigned int mobicore_irq_id = MC_INTR_SSIQ;
static int __init mobicore_init(void)
{
int ret = 0;
@@ -1524,7 +1527,7 @@ static int __init mobicore_init(void)
/* Do not remove or change the following trace.
* The string "MobiCore" is used to detect if <t-base is in of the image
*/
- dev_info(mcd, "MobiCore Driver, Build: " __TIMESTAMP__ "\n");
+ dev_info(mcd, "MobiCore Driver, Build: " "\n");
dev_info(mcd, "MobiCore mcDrvModuleApi version is %i.%i\n",
MCDRVMODULEAPI_VERSION_MAJOR,
MCDRVMODULEAPI_VERSION_MINOR);
@@ -1615,8 +1618,8 @@ free_pm:
#ifdef MC_PM_RUNTIME
mc_pm_free();
free_isr:
- free_irq(mobicore_irq_id, &ctx);
#endif
+ free_irq(MC_INTR_SSIQ, &ctx);
err_req_irq:
mc_fastcall_destroy();
error:
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/main.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/main.h
index 32ffb95e1..32ffb95e1 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/main.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/main.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/mem.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/mem.c
index d65a91fee..d65a91fee 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/mem.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/mem.c
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/mem.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/mem.h
index c4b6715f2..c4b6715f2 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/mem.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/mem.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/ops.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/ops.c
index ad9e9e243..ad9e9e243 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/ops.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/ops.c
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/ops.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/ops.h
index 30458a37d..30458a37d 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/ops.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/ops.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/platform.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/platform.h
index 9f59b380d..e08a84215 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/platform.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/platform.h
@@ -34,7 +34,7 @@
#define MC_PM_RUNTIME
#endif
-// #define TBASE_CORE_SWITCHER
+#define TBASE_CORE_SWITCHER
/* Values of MPIDR regs in cpu0, cpu1, cpu2, cpu3*/
#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003, 0x0100, 0x0101, 0x0102, 0x0103}
#define COUNT_OF_CPUS 8
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/pm.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/pm.c
index e3ea6b530..e3ea6b530 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/pm.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/pm.c
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/pm.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/pm.h
index 6581425a7..6581425a7 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/pm.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/pm.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/mc_kernel_api.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/mc_kernel_api.h
index 96805fda1..96805fda1 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/mc_kernel_api.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/mc_kernel_api.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/mc_linux.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/mc_linux.h
index b9c4934d5..b9c4934d5 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/mc_linux.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/mc_linux.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/version.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/version.h
index 8db48a09b..8db48a09b 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreDriver/public/version.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreDriver/public/version.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/clientlib.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/clientlib.c
index a951e696f..39f81d3dc 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/clientlib.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/clientlib.c
@@ -29,6 +29,8 @@
/* device list */
LIST_HEAD(devices);
+/* lock used to prevent concurrent add/delete action on the device list */
+struct mutex device_mutex;
atomic_t device_usage = ATOMIC_INIT(0);
static struct mcore_device_t *resolve_device_id(uint32_t device_id)
@@ -37,33 +39,45 @@ static struct mcore_device_t *resolve_device_id(uint32_t device_id)
struct list_head *pos;
/* Get mcore_device_t for device_id */
+ mutex_lock(&device_mutex);
list_for_each(pos, &devices) {
tmp = list_entry(pos, struct mcore_device_t, list);
- if (tmp->device_id == device_id)
+ if (tmp->device_id == device_id) {
+ mutex_unlock(&device_mutex);
return tmp;
+ }
}
+ mutex_unlock(&device_mutex);
return NULL;
}
static void add_device(struct mcore_device_t *device)
{
+ mutex_lock(&device_mutex);
list_add_tail(&(device->list), &devices);
+ mutex_unlock(&device_mutex);
}
static bool remove_device(uint32_t device_id)
{
- struct mcore_device_t *tmp;
+ struct mcore_device_t *device, *candidate = NULL;
struct list_head *pos, *q;
+ bool found = false;
+ mutex_lock(&device_mutex);
list_for_each_safe(pos, q, &devices) {
- tmp = list_entry(pos, struct mcore_device_t, list);
- if (tmp->device_id == device_id) {
+ device = list_entry(pos, struct mcore_device_t, list);
+ if (device->device_id == device_id) {
list_del(pos);
- mcore_device_cleanup(tmp);
- return true;
+ candidate = device;
+ found = true;
+ break;
}
}
- return false;
+ mutex_unlock(&device_mutex);
+ if (!candidate)
+ mcore_device_cleanup(candidate);
+ return found;
}
enum mc_result mc_open_device(uint32_t device_id)
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/common.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/common.h
index b6c404b8b..63431142b 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/common.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/common.h
@@ -33,6 +33,9 @@ unsigned int mcapi_unique_id(void);
/* Found in main.c */
extern struct device *mc_kapi;
+/* Found in clientlib.c */
+extern struct mutex device_mutex;
+
#define MCDRV_ERROR(dev, txt, ...) \
dev_err(dev, "%s() ### ERROR: " txt, __func__, ##__VA_ARGS__)
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/connection.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/connection.c
index 43dddd35a..18dc5e720 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/connection.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/connection.c
@@ -93,7 +93,7 @@ size_t connection_read_data_msg(struct connection *conn, void *buffer,
size_t connection_read_datablock(struct connection *conn, void *buffer,
uint32_t len)
{
- return connection_read_data(conn, buffer, len, 2000);
+ return connection_read_data(conn, buffer, len, -1);
}
size_t connection_read_data(struct connection *conn, void *buffer, uint32_t len,
@@ -141,38 +141,34 @@ size_t connection_read_data(struct connection *conn, void *buffer, uint32_t len,
return ret;
}
-size_t connection_write_data(struct connection *conn, void *buffer,
+int connection_write_data(struct connection *conn, void *buffer,
uint32_t len)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
- int ret = 0;
-
- MCDRV_DBG_VERBOSE(mc_kapi, "buffer length %u from pid %u\n",
- len, conn->sequence_magic);
- do {
- skb = nlmsg_new(NLMSG_SPACE(len), GFP_KERNEL);
- if (!skb) {
- ret = -1;
- break;
- }
-
- nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2,
- NLMSG_LENGTH(len), NLM_F_REQUEST);
- if (!nlh) {
- ret = -1;
- kfree_skb(skb);
- break;
- }
- memcpy(NLMSG_DATA(nlh), buffer, len);
+ int ret;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "buffer length %u from pid %u\n", len,
+ conn->sequence_magic);
+ skb = nlmsg_new(NLMSG_SPACE(len), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2, NLMSG_LENGTH(len),
+ NLM_F_REQUEST);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
- /* netlink_unicast frees skb */
- netlink_unicast(conn->socket_descriptor, skb,
- conn->peer_pid, MSG_DONTWAIT);
- ret = len;
- } while (0);
+ /* netlink_unicast frees skb */
+ memcpy(NLMSG_DATA(nlh), buffer, len);
+ ret = netlink_unicast(conn->socket_descriptor, skb, conn->peer_pid,
+ MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
- return ret;
+ return len;
}
int connection_process(struct connection *conn, struct sk_buff *skb)
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/connection.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/connection.h
index 1b7436635..5a0249941 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/connection.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/connection.h
@@ -54,7 +54,7 @@ size_t connection_read_datablock(struct connection *conn, void *buffer,
uint32_t len);
size_t connection_read_data(struct connection *conn, void *buffer,
uint32_t len, int32_t timeout);
-size_t connection_write_data(struct connection *conn, void *buffer,
+int connection_write_data(struct connection *conn, void *buffer,
uint32_t len);
int connection_process(struct connection *conn, struct sk_buff *skb);
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/device.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/device.c
index e3d54e68c..021dc3d2f 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/device.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/device.c
@@ -55,13 +55,15 @@ struct mcore_device_t *mcore_device_create(uint32_t device_id,
INIT_LIST_HEAD(&dev->session_vector);
INIT_LIST_HEAD(&dev->wsm_mmu_vector);
+ mutex_init(&(dev->session_vector_lock));
+ mutex_init(&(dev->wsm_mmu_vector_lock));
return dev;
}
void mcore_device_cleanup(struct mcore_device_t *dev)
{
- struct session *tmp;
+ struct session *session = NULL;
struct wsm *wsm;
struct list_head *pos, *q;
@@ -69,18 +71,29 @@ void mcore_device_cleanup(struct mcore_device_t *dev)
* Delete all session objects. Usually this should not be needed
* as close_device() requires that all sessions have been closed before.
*/
- list_for_each_safe(pos, q, &dev->session_vector) {
- tmp = list_entry(pos, struct session, list);
- list_del(pos);
- session_cleanup(tmp);
- }
+ do {
+ session = NULL;
+ mutex_lock(&(dev->session_vector_lock));
+ if (!list_empty(&(dev->session_vector))) {
+ session = list_first_entry(&(dev->session_vector),
+ struct session,
+ list);
+ list_del(&(session->list));
+ }
+ mutex_unlock(&(dev->session_vector_lock));
+ if (!session)
+ break;
+ session_cleanup(session);
+ } while (true);
/* Free all allocated WSM descriptors */
- list_for_each_safe(pos, q, &dev->wsm_mmu_vector) {
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
+ list_for_each_safe(pos, q, &(dev->wsm_mmu_vector)) {
wsm = list_entry(pos, struct wsm, list);
list_del(pos);
kfree(wsm);
}
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
connection_cleanup(dev->connection);
mcore_device_close(dev);
@@ -100,7 +113,11 @@ void mcore_device_close(struct mcore_device_t *dev)
bool mcore_device_has_sessions(struct mcore_device_t *dev)
{
- return !list_empty(&dev->session_vector);
+ int ret = 0;
+ mutex_lock(&(dev->session_vector_lock));
+ ret = !list_empty(&dev->session_vector);
+ mutex_unlock(&(dev->session_vector_lock));
+ return ret;
}
bool mcore_device_create_new_session(struct mcore_device_t *dev,
@@ -117,44 +134,49 @@ bool mcore_device_create_new_session(struct mcore_device_t *dev,
session_create(session_id, dev->instance, connection);
if (session == NULL)
return false;
+ mutex_lock(&(dev->session_vector_lock));
list_add_tail(&(session->list), &(dev->session_vector));
+ mutex_unlock(&(dev->session_vector_lock));
return true;
}
bool mcore_device_remove_session(struct mcore_device_t *dev,
uint32_t session_id)
{
- bool ret = false;
- struct session *tmp;
- struct list_head *pos, *q;
+ bool found = false;
+ struct session *session = NULL;
+ struct list_head *pos;
- list_for_each_safe(pos, q, &dev->session_vector) {
- tmp = list_entry(pos, struct session, list);
- if (tmp->session_id == session_id) {
+ mutex_lock(&(dev->session_vector_lock));
+ list_for_each(pos, &dev->session_vector) {
+ session = list_entry(pos, struct session, list);
+ if (session->session_id == session_id) {
list_del(pos);
- session_cleanup(tmp);
- ret = true;
+ found = true;
break;
}
}
- return ret;
+ mutex_unlock(&(dev->session_vector_lock));
+ if (found)
+ session_cleanup(session);
+ return found;
}
struct session *mcore_device_resolve_session_id(struct mcore_device_t *dev,
uint32_t session_id)
{
struct session *ret = NULL;
- struct session *tmp;
- struct list_head *pos;
+ struct session *session;
/* Get session for session_id */
- list_for_each(pos, &dev->session_vector) {
- tmp = list_entry(pos, struct session, list);
- if (tmp->session_id == session_id) {
- ret = tmp;
+ mutex_lock(&(dev->session_vector_lock));
+ list_for_each_entry(session, &dev->session_vector, list) {
+ if (session->session_id == session_id) {
+ ret = session;
break;
}
}
+ mutex_unlock(&(dev->session_vector_lock));
return ret;
}
@@ -181,7 +203,9 @@ struct wsm *mcore_device_allocate_contiguous_wsm(struct mcore_device_t *dev,
break;
}
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
list_add_tail(&(wsm->list), &(dev->wsm_mmu_vector));
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
} while (0);
@@ -195,6 +219,7 @@ bool mcore_device_free_contiguous_wsm(struct mcore_device_t *dev,
struct wsm *tmp;
struct list_head *pos;
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
list_for_each(pos, &dev->wsm_mmu_vector) {
tmp = list_entry(pos, struct wsm, list);
if (tmp == wsm) {
@@ -202,7 +227,7 @@ bool mcore_device_free_contiguous_wsm(struct mcore_device_t *dev,
break;
}
}
-
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
if (ret) {
MCDRV_DBG_VERBOSE(mc_kapi,
"freeWsm virt_addr=0x%p, handle=%d",
@@ -220,14 +245,16 @@ bool mcore_device_free_contiguous_wsm(struct mcore_device_t *dev,
struct wsm *mcore_device_find_contiguous_wsm(struct mcore_device_t *dev,
void *virt_addr)
{
- struct wsm *wsm;
- struct list_head *pos;
+ struct wsm *wsm, *candidate = NULL;
- list_for_each(pos, &dev->wsm_mmu_vector) {
- wsm = list_entry(pos, struct wsm, list);
- if (virt_addr == wsm->virt_addr)
- return wsm;
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
+ list_for_each_entry(wsm, &dev->wsm_mmu_vector, list) {
+ if (virt_addr == wsm->virt_addr) {
+ candidate = wsm;
+ break;
+ }
}
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
- return NULL;
+ return candidate;
}
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/device.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/device.h
index e73042848..9b564d0b9 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/device.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/device.h
@@ -27,7 +27,11 @@
struct mcore_device_t {
/* MobiCore Trustlet session associated with the device */
+ /* lock used to prevent concurrent add/del action on the session list */
+ struct mutex session_vector_lock;
struct list_head session_vector;
+ /* lock used to prevent concurrent add/del action on the mmu list */
+ struct mutex wsm_mmu_vector_lock;
struct list_head wsm_mmu_vector; /* WSM L2 or L3 Table */
uint32_t device_id; /* Device identifier */
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/include/mcinq.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include/mcinq.h
index 30444993b..d84a28e30 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/include/mcinq.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include/mcinq.h
@@ -38,11 +38,14 @@
#define MIN_NQ_ELEM 1 /* Minimum notification queue elements. */
#define MAX_NQ_ELEM 64 /* Maximum notification queue elements. */
+/* Compute notification queue size in bytes from its number of elements */
+#define QUEUE_SIZE(a) (2*(sizeof(notification_queue_header) + (a)*sizeof(notification)) )
+
/* Minimum notification length (in bytes). */
-#define MIN_NQ_LEN (MIN_NQ_ELEM * sizeof(notification))
+#define MIN_NQ_LEN QUEUE_SIZE(MIN_NQ_ELEM)
/* Maximum notification length (in bytes). */
-#define MAX_NQ_LEN (MAX_NQ_ELEM * sizeof(notification))
+#define MAX_NQ_LEN QUEUE_SIZE(MAX_NQ_ELEM)
/*
* MCP session ID is used when directly communicating with the MobiCore
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/include/mcuuid.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include/mcuuid.h
index eca5191ed..eca5191ed 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/include/mcuuid.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include/mcuuid.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/main.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/main.c
index 5695b3638..83d675bc1 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/main.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/main.c
@@ -91,12 +91,11 @@ void mcapi_remove_connection(uint32_t seq)
struct connection *tmp;
struct list_head *pos, *q;
- mutex_lock(&(mod_ctx->peers_lock));
-
/*
* Delete all session objects. Usually this should not be needed as
* closeDevice() requires that all sessions have been closed before.
*/
+ mutex_lock(&(mod_ctx->peers_lock));
list_for_each_safe(pos, q, &mod_ctx->peers) {
tmp = list_entry(pos, struct connection, list);
if (tmp->sequence_magic == seq) {
@@ -191,6 +190,7 @@ static int __init mcapi_init(void)
INIT_LIST_HEAD(&mod_ctx->peers);
mutex_init(&mod_ctx->peers_lock);
+ mutex_init(&device_mutex);
return 0;
}
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/public/mobicore_driver_api.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
index 7bf2a2f66..7bf2a2f66 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
index 4e6ba0ddf..4e6ba0ddf 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/session.c b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/session.c
index 4f14ce904..29d909bba 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/session.c
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/session.c
@@ -53,6 +53,7 @@ struct session *session_create(
session->session_info.state = SESSION_STATE_INITIAL;
INIT_LIST_HEAD(&(session->bulk_buffer_descriptors));
+ mutex_init(&(session->bulk_buffer_descriptors_lock));
return session;
}
@@ -62,6 +63,7 @@ void session_cleanup(struct session *session)
struct list_head *pos, *q;
/* Unmap still mapped buffers */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
bulk_buf_descr =
list_entry(pos, struct bulk_buffer_descriptor, list);
@@ -80,6 +82,7 @@ void session_cleanup(struct session *session)
list_del(pos);
kfree(bulk_buf_descr);
}
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
/* Finally delete notification connection */
connection_cleanup(session->notification_connection);
@@ -102,17 +105,23 @@ struct bulk_buffer_descriptor *session_add_bulk_buf(struct session *session,
struct bulk_buffer_descriptor *bulk_buf_descr = NULL;
struct bulk_buffer_descriptor *tmp;
struct list_head *pos;
+ int ret = 0;
/*
* Search bulk buffer descriptors for existing vAddr
* At the moment a virtual address can only be added one time
*/
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
list_for_each(pos, &session->bulk_buffer_descriptors) {
tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
- if (tmp->virt_addr == buf)
- return NULL;
+ if (tmp->virt_addr == buf) {
+ ret = -1;
+ break;
+ }
}
-
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+ if (ret == -1)
+ return NULL;
do {
/*
* Prepare the interface structure for memory registration in
@@ -142,8 +151,10 @@ struct bulk_buffer_descriptor *session_add_bulk_buf(struct session *session,
}
/* Add to vector of descriptors */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
list_add_tail(&(bulk_buf_descr->list),
&(session->bulk_buffer_descriptors));
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
} while (0);
return bulk_buf_descr;
@@ -160,6 +171,7 @@ bool session_remove_bulk_buf(struct session *session, void *virt_addr)
virt_addr);
/* Search and remove bulk buffer descriptor */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
if (tmp->virt_addr == virt_addr) {
@@ -168,6 +180,7 @@ bool session_remove_bulk_buf(struct session *session, void *virt_addr)
break;
}
}
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
if (bulk_buf == NULL) {
MCDRV_DBG_ERROR(mc_kapi, "Virtual Address not found");
@@ -193,16 +206,20 @@ uint32_t session_find_bulk_buf(struct session *session, void *virt_addr)
{
struct bulk_buffer_descriptor *tmp;
struct list_head *pos, *q;
+ uint32_t handle = 0;
MCDRV_DBG_VERBOSE(mc_kapi, "Virtual Address = 0x%p",
virt_addr);
/* Search and return buffer descriptor handle */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
- if (tmp->virt_addr == virt_addr)
- return tmp->handle;
+ if (tmp->virt_addr == virt_addr) {
+ handle = tmp->handle;
+ break;
+ }
}
-
- return 0;
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+ return handle;
}
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/session.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/session.h
index 2f7d5a9a0..37c3d6f5f 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/session.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/session.h
@@ -64,6 +64,8 @@ struct session {
/* Descriptors of additional bulk buffer of a session */
struct list_head bulk_buffer_descriptors;
+ /* lock used to prevent concurrent add/del on the descriptor list */
+ struct mutex bulk_buffer_descriptors_lock;
/* Information about session */
struct session_information session_info;
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/wsm.h b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/wsm.h
index b8d4b26c6..b8d4b26c6 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/MobiCoreKernelApi/wsm.h
+++ b/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/wsm.h
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/Out/Public/tui_ioctl.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/Out/Public/tui_ioctl.h
new file mode 100644
index 000000000..def13393d
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/Out/Public/tui_ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TUI_IOCTL_H_
+#define TUI_IOCTL_H_
+
+
+
+/* Response header */
+struct tlc_tui_response_t {
+ uint32_t id;
+ uint32_t return_code;
+};
+
+/* Command IDs */
+#define TLC_TUI_CMD_NONE 0
+#define TLC_TUI_CMD_START_ACTIVITY 1
+#define TLC_TUI_CMD_STOP_ACTIVITY 2
+
+/* Return codes */
+#define TLC_TUI_OK 0
+#define TLC_TUI_ERROR 1
+#define TLC_TUI_ERR_UNKNOWN_CMD 2
+
+
+/*
+ * defines for the ioctl TUI driver module function call from user space.
+ */
+#define TUI_DEV_NAME "t-base-tui"
+
+#define TUI_IO_MAGIC 't'
+
+#define TUI_IO_NOTIFY _IOW(TUI_IO_MAGIC, 1, uint32_t)
+#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, uint32_t)
+#define TUI_IO_ACK _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
+
+#endif /* TUI_IOCTL_H_ */
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/build_tag.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/build_tag.h
index fc11448b3..b7a78a7d4 100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/build_tag.h
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/build_tag.h
@@ -12,4 +12,4 @@
* GNU General Public License for more details.
*/
#define MOBICORE_COMPONENT_BUILD_TAG \
- "t-base-Mediatek-MT6752-Android-302A-V006-39_39"
+ "t-base-Mediatek-Armv8-Android-302A-V010-20150908_113718_68"
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/dciTui.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/dciTui.h
new file mode 100644
index 000000000..5bee85cad
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/dciTui.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DCITUI_H__
+#define __DCITUI_H__
+
+/**< Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmd_id) (((uint32_t)(cmd_id)) | RSP_ID_MASK)
+#define IS_CMD(cmd_id) ((((uint32_t)(cmd_id)) & RSP_ID_MASK) == 0)
+#define IS_RSP(cmd_id) ((((uint32_t)(cmd_id)) & RSP_ID_MASK) == RSP_ID_MASK)
+#define CMD_ID_FROM_RSP(rsp_id) (rsp_id & (~RSP_ID_MASK))
+
+/**
+ * Return codes of driver commands.
+ */
+#define TUI_DCI_OK 0x00030000
+#define TUI_DCI_ERR_UNKNOWN_CMD 0x00030001
+#define TUI_DCI_ERR_NOT_SUPPORTED 0x00030002
+#define TUI_DCI_ERR_INTERNAL_ERROR 0x00030003
+#define TUI_DCI_ERR_NO_RESPONSE 0x00030004
+#define TUI_DCI_ERR_BAD_PARAMETERS 0x00030005
+#define TUI_DCI_ERR_NO_EVENT 0x00030006
+#define TUI_DCI_ERR_OUT_OF_DISPLAY 0x00030007
+/* ... add more error codes when needed */
+
+
+/**
+ * Notification ID's for communication Trustlet Connector -> Driver.
+ */
+#define NOT_TUI_NONE 0
+/* NWd system event that closes the current TUI session*/
+#define NOT_TUI_CANCEL_EVENT 1
+
+
+/**
+ * Command ID's for communication Driver -> Trustlet Connector.
+ */
+#define CMD_TUI_SW_NONE 0
+/* SWd request to NWd to start the TUI session */
+#define CMD_TUI_SW_OPEN_SESSION 1
+/* SWd request to NWd to close the TUI session */
+#define CMD_TUI_SW_CLOSE_SESSION 2
+/* SWd request to NWd stop accessing display controller */
+#define CMD_TUI_SW_STOP_DISPLAY 3
+
+
+/**
+ * Maximum data length.
+ */
+#define MAX_DCI_DATA_LEN (1024*100)
+
+/* Command payload */
+struct tui_alloc_data_t {
+ uint32_t alloc_size;
+ uint32_t num_of_buff;
+};
+
+union dci_cmd_payload_t {
+ struct tui_alloc_data_t alloc_data;
+};
+
+/* Command */
+struct dci_command_t {
+ volatile uint32_t id;
+ union dci_cmd_payload_t payload;
+};
+
+/* TUI frame buffer (output from NWd) */
+typedef struct {
+ uint64_t pa;
+} tuiAllocBuffer_t;
+
+#define MAX_DCI_BUFFER_NUMBER 4
+
+/* Response */
+struct dci_response_t {
+ volatile uint32_t id; /* must be command ID | RSP_ID_MASK */
+ uint32_t return_code;
+ union {
+ tuiAllocBuffer_t alloc_buffer[MAX_DCI_BUFFER_NUMBER];
+ };
+};
+
+/* DCI buffer */
+struct tui_dci_msg_t {
+ volatile uint32_t nwd_notif; /* Notification from TlcTui to DrTui */
+ struct dci_command_t cmd_nwd; /* Command from DrTui to TlcTui */
+ struct dci_response_t nwd_rsp; /* Response from TlcTui to DrTui */
+};
+
+/**
+ * Driver UUID. Update accordingly after reserving UUID
+ */
+#define DR_TUI_UUID { { 7, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
+
+#endif /* __DCITUI_H__ */
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/t-base-tui.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/t-base-tui.h
new file mode 100644
index 000000000..4f34a286e
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/inc/t-base-tui.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TBASE_TUI_H__
+#define __TBASE_TUI_H__
+
+#define TRUSTEDUI_MODE_OFF 0x00
+#define TRUSTEDUI_MODE_ALL 0xff
+#define TRUSTEDUI_MODE_TUI_SESSION 0x01
+#define TRUSTEDUI_MODE_VIDEO_SECURED 0x02
+#define TRUSTEDUI_MODE_INPUT_SECURED 0x04
+
+#ifdef CONFIG_TRUSTONIC_TRUSTED_UI
+
+int trustedui_blank_inc(void);
+int trustedui_blank_dec(void);
+int trustedui_blank_get_counter(void);
+void trustedui_blank_set_counter(int counter);
+
+int trustedui_get_current_mode(void);
+void trustedui_set_mode(int mode);
+int trustedui_set_mask(int mask);
+int trustedui_clear_mask(int mask);
+
+#endif /* CONFIG_TRUSTONIC_TRUSTED_UI */
+
+#endif /* __TBASE_TUI_H__ */
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/main.c b/drivers/misc/mediatek/gud/302a/gud/TlcTui/main.c
new file mode 100644
index 000000000..91ec18f2e
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/main.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "mobicore_driver_api.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+#include "build_tag.h"
+
+/*static int tui_dev_major_number = 122; */
+
+/*module_param(tui_dev_major_number, int, 0000); */
+/*MODULE_PARM_DESC(major, */
+/* "The device major number used to register a unique char device driver"); */
+
+/* Static variables */
+static struct cdev tui_cdev;
+
+static long tui_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int __user *uarg = (int __user *)arg;
+
+ if (_IOC_TYPE(cmd) != TUI_IO_MAGIC)
+ return -EINVAL;
+
+ pr_info("t-base-tui module: ioctl 0x%x ", cmd);
+
+ switch (cmd) {
+ case TUI_IO_NOTIFY:
+ pr_info("TUI_IO_NOTIFY\n");
+
+ if (tlc_notify_event(arg))
+ ret = 0;
+ else
+ ret = -EFAULT;
+ break;
+
+ case TUI_IO_WAITCMD: {
+ uint32_t cmd_id;
+
+ pr_info("TUI_IO_WAITCMD\n");
+
+ ret = tlc_wait_cmd(&cmd_id);
+ if (ret)
+ return ret;
+
+ /* Write command id to user */
+ pr_debug("IOCTL: sending command %d to user.\n", cmd_id);
+
+ if (copy_to_user(uarg, &cmd_id, sizeof(cmd_id)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ }
+
+ case TUI_IO_ACK: {
+ struct tlc_tui_response_t rsp_id;
+
+ pr_info("TUI_IO_ACK\n");
+
+ /* Read user response */
+ if (copy_from_user(&rsp_id, uarg, sizeof(rsp_id)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+ pr_debug("IOCTL: User completed command %d.\n", rsp_id.id);
+ ret = tlc_ack_cmd(&rsp_id);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ default:
+ pr_info("undefined!\n");
+ return -ENOTTY;
+ }
+
+ return ret;
+}
+
+static const struct file_operations tui_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tui_ioctl,
+};
+
+/*--------------------------------------------------------------------------- */
+static int __init tlc_tui_init(void)
+{
+ pr_info("Loading t-base-tui module.\n");
+ pr_debug("\n=============== Running TUI Kernel TLC ===============\n");
+ pr_info("%s\n", MOBICORE_COMPONENT_BUILD_TAG);
+
+ dev_t devno;
+ int err;
+ static struct class *tui_class;
+
+ err = alloc_chrdev_region(&devno, 0, 1, TUI_DEV_NAME);
+ if (err) {
+ pr_debug(KERN_ERR "Unable to allocate Trusted UI device number\n");
+ return err;
+ }
+
+ cdev_init(&tui_cdev, &tui_fops);
+ tui_cdev.owner = THIS_MODULE;
+ /* tui_cdev.ops = &tui_fops; */
+
+ err = cdev_add(&tui_cdev, devno, 1);
+ if (err) {
+ pr_debug(KERN_ERR "Unable to add Trusted UI char device\n");
+ unregister_chrdev_region(devno, 1);
+ return err;
+ }
+
+ tui_class = class_create(THIS_MODULE, "tui_cls");
+ device_create(tui_class, NULL, devno, NULL, TUI_DEV_NAME);
+
+ if (!hal_tui_init())
+ return -1;
+
+ return 0;
+}
+
+static void __exit tlc_tui_exit(void)
+{
+ pr_info("Unloading t-base-tui module.\n");
+
+ unregister_chrdev_region(tui_cdev.dev, 1);
+ cdev_del(&tui_cdev);
+
+ hal_tui_exit();
+}
+
+module_init(tlc_tui_init);
+module_exit(tlc_tui_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("<t-base TUI");
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/public/tui_ioctl.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/public/tui_ioctl.h
new file mode 100644
index 000000000..def13393d
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/public/tui_ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TUI_IOCTL_H_
+#define TUI_IOCTL_H_
+
+
+
+/* Response header */
+struct tlc_tui_response_t {
+ uint32_t id;
+ uint32_t return_code;
+};
+
+/* Command IDs */
+#define TLC_TUI_CMD_NONE 0
+#define TLC_TUI_CMD_START_ACTIVITY 1
+#define TLC_TUI_CMD_STOP_ACTIVITY 2
+
+/* Return codes */
+#define TLC_TUI_OK 0
+#define TLC_TUI_ERROR 1
+#define TLC_TUI_ERR_UNKNOWN_CMD 2
+
+
+/*
+ * defines for the ioctl TUI driver module function call from user space.
+ */
+#define TUI_DEV_NAME "t-base-tui"
+
+#define TUI_IO_MAGIC 't'
+
+#define TUI_IO_NOTIFY _IOW(TUI_IO_MAGIC, 1, uint32_t)
+#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, uint32_t)
+#define TUI_IO_ACK _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
+
+#endif /* TUI_IOCTL_H_ */
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.c b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.c
new file mode 100644
index 000000000..8c096c1eb
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include "mobicore_driver_api.h"
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+
+
+/* ------------------------------------------------------------- */
+/* Globals */
+struct tui_dci_msg_t *dci;
+DECLARE_COMPLETION(dci_comp);
+DECLARE_COMPLETION(io_comp);
+
+/* ------------------------------------------------------------- */
+/* Static */
+static const uint32_t DEVICE_ID = MC_DEVICE_ID_DEFAULT;
+static struct task_struct *thread_id;
+static uint32_t g_cmd_id = TLC_TUI_CMD_NONE;
+static struct mc_session_handle dr_session_handle = {0, 0};
+static struct tlc_tui_response_t g_user_rsp = {
+ TLC_TUI_CMD_NONE, TLC_TUI_ERR_UNKNOWN_CMD};
+/* Functions */
+
+/* ------------------------------------------------------------- */
+static bool tlc_open_driver(void)
+{
+ bool ret = false;
+ enum mc_result mc_ret;
+ struct mc_uuid_t dr_uuid = DR_TUI_UUID;
+
+ /* Allocate WSM buffer for the DCI */
+ mc_ret = mc_malloc_wsm(DEVICE_ID, 0, sizeof(struct tui_dci_msg_t),
+ (uint8_t **)&dci, 0);
+ if (MC_DRV_OK != mc_ret) {
+ pr_debug("ERROR %s: Allocation of DCI WSM failed: %d\n",
+ __func__, mc_ret);
+ return false;
+ }
+
+ /* Clear the session handle */
+ memset(&dr_session_handle, 0, sizeof(dr_session_handle));
+ /* The device ID (default device is used */
+ dr_session_handle.device_id = DEVICE_ID;
+ /* Open session with the Driver */
+ mc_ret = mc_open_session(&dr_session_handle, &dr_uuid, (uint8_t *)dci,
+ (uint32_t)sizeof(struct tui_dci_msg_t));
+ if (MC_DRV_OK != mc_ret) {
+ pr_debug("ERROR %s: Open driver session failed: %d\n",
+ __func__, mc_ret);
+ ret = false;
+ } else {
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+/* ------------------------------------------------------------- */
+static bool tlc_open(void)
+{
+ bool ret = false;
+ enum mc_result mc_ret;
+
+ /* Open the tbase device */
+ pr_debug("%s: Opening tbase device\n", __func__);
+ mc_ret = mc_open_device(DEVICE_ID);
+
+ /* In case the device is already open, mc_open_device will return an
+ * error (MC_DRV_ERR_INVALID_OPERATION). But in this case, we can
+ * continue, even though mc_open_device returned an error. Stop in all
+ * other case of error
+ */
+ if (MC_DRV_OK != mc_ret && MC_DRV_ERR_INVALID_OPERATION != mc_ret) {
+ pr_debug("ERROR %s: Error %d opening device\n", __func__,
+ mc_ret);
+ return false;
+ }
+
+ pr_debug("%s: Opening driver session\n", __func__);
+ ret = tlc_open_driver();
+
+ return ret;
+}
+
+
+/* ------------------------------------------------------------- */
+static void tlc_wait_cmd_from_driver(void)
+{
+ uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
+
+ /* Wait for a command from secure driver */
+ ret = mc_wait_notification(&dr_session_handle, -1);
+ if (MC_DRV_OK == ret)
+ pr_debug("tlc_wait_cmd_from_driver: Got a command\n");
+ else
+ pr_debug("ERROR %s: mc_wait_notification() failed: %d\n",
+ __func__, ret);
+}
+
+
+static uint32_t send_cmd_to_user(uint32_t command_id)
+{
+ uint32_t ret = TUI_DCI_ERR_NO_RESPONSE;
+
+ /* Init shared variables */
+ g_cmd_id = command_id;
+ g_user_rsp.id = TLC_TUI_CMD_NONE;
+ g_user_rsp.return_code = TLC_TUI_ERR_UNKNOWN_CMD;
+
+ /* Give way to ioctl thread */
+ complete(&dci_comp);
+ pr_debug("send_cmd_to_user: give way to ioctl thread\n");
+
+ /* Wait for ioctl thread to complete */
+ wait_for_completion(&io_comp);
+ pr_debug("send_cmd_to_user: Got an answer from ioctl thread.\n");
+ INIT_COMPLETION(io_comp);
+
+ /* Check id of the cmd processed by ioctl thread (paranoia) */
+ if (g_user_rsp.id != command_id) {
+ pr_debug("ERROR %s: Wrong response id 0x%08x iso 0x%08x\n",
+ __func__, dci->nwd_rsp.id, RSP_ID(command_id));
+ ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ } else {
+ /* retrieve return code */
+ switch (g_user_rsp.return_code) {
+ case TLC_TUI_OK:
+ ret = TUI_DCI_OK;
+ break;
+ case TLC_TUI_ERROR:
+ ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ break;
+ case TLC_TUI_ERR_UNKNOWN_CMD:
+ ret = TUI_DCI_ERR_UNKNOWN_CMD;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_process_cmd(void)
+{
+ uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ uint32_t command_id = CMD_TUI_SW_NONE;
+
+ if (NULL == dci) {
+ pr_debug("ERROR %s: DCI has not been set up properly - exiting"\
+ "\n", __func__);
+ return;
+ } else {
+ command_id = dci->cmd_nwd.id;
+ }
+
+ /* Warn if previous response was not acknowledged */
+ if (CMD_TUI_SW_NONE == command_id) {
+ pr_debug("ERROR %s: Notified without command\n", __func__);
+ return;
+ } else {
+ if (dci->nwd_rsp.id != CMD_TUI_SW_NONE)
+ pr_debug("%s: Warning, previous response not ack\n",
+ __func__);
+ }
+
+ /* Handle command */
+ switch (command_id) {
+ case CMD_TUI_SW_OPEN_SESSION:
+ pr_debug("%s: CMD_TUI_SW_OPEN_SESSION.\n", __func__);
+
+ /* Start android TUI activity */
+ ret = send_cmd_to_user(TLC_TUI_CMD_START_ACTIVITY);
+ if (TUI_DCI_OK != ret)
+ break;
+
+ /* allocate TUI frame buffer */
+ ret = hal_tui_alloc(dci->nwd_rsp.alloc_buffer,
+ dci->cmd_nwd.payload.alloc_data.alloc_size,
+ dci->cmd_nwd.payload.alloc_data.num_of_buff);
+
+ if (TUI_DCI_OK != ret)
+ break;
+
+ /* Deactivate linux UI drivers */
+ ret = hal_tui_deactivate();
+
+ if (TUI_DCI_OK != ret) {
+ hal_tui_free();
+ send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY);
+ break;
+ }
+
+ break;
+
+ case CMD_TUI_SW_CLOSE_SESSION:
+ pr_debug("%s: CMD_TUI_SW_CLOSE_SESSION.\n", __func__);
+
+ /* Activate linux UI drivers */
+ ret = hal_tui_activate();
+
+ hal_tui_free();
+
+ /* Stop android TUI activity */
+ ret = send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY);
+ break;
+
+ default:
+ pr_debug("ERROR %s: Unknown command %d\n",
+ __func__, command_id);
+ break;
+ }
+
+ /* Fill in response to SWd, fill ID LAST */
+ pr_debug("%s: return 0x%08x to cmd 0x%08x\n",
+ __func__, ret, command_id);
+ dci->nwd_rsp.return_code = ret;
+ dci->nwd_rsp.id = RSP_ID(command_id);
+
+ /* Acknowledge command */
+ dci->cmd_nwd.id = CMD_TUI_SW_NONE;
+
+ /* Notify SWd */
+ pr_debug("DCI RSP NOTIFY CORE\n");
+ ret = mc_notify(&dr_session_handle);
+ if (MC_DRV_OK != ret)
+ pr_debug("ERROR %s: Notify failed: %d\n", __func__, ret);
+}
+
+
+/* ------------------------------------------------------------- */
+static void tlc_close_driver(void)
+{
+ enum mc_result ret;
+
+ /* Close session with the Driver */
+ ret = mc_close_session(&dr_session_handle);
+ if (MC_DRV_OK != ret) {
+ pr_debug("ERROR %s: Closing driver session failed: %d\n",
+ __func__, ret);
+ }
+}
+
+
+/* ------------------------------------------------------------- */
+static void tlc_close(void)
+{
+ enum mc_result ret;
+
+ pr_debug("%s: Closing driver session\n", __func__);
+ tlc_close_driver();
+
+ pr_debug("%s: Closing tbase\n", __func__);
+ /* Close the tbase device */
+ ret = mc_close_device(DEVICE_ID);
+ if (MC_DRV_OK != ret) {
+ pr_debug("ERROR %s: Closing tbase device failed: %d\n",
+ __func__, ret);
+ }
+}
+
+/* ------------------------------------------------------------- */
+bool tlc_notify_event(uint32_t event_type)
+{
+ bool ret = false;
+ enum mc_result result;
+
+ if (NULL == dci) {
+ pr_debug("ERROR tlc_notify_event: DCI has not been set up "\
+ "properly - exiting\n");
+ return false;
+ }
+
+ /* Wait for previous notification to be acknowledged */
+ while (dci->nwd_notif != NOT_TUI_NONE) {
+ pr_debug("TLC waiting for previous notification ack\n");
+ usleep_range(10000, 10000);
+ };
+
+ /* Prepare notification message in DCI */
+ pr_debug("tlc_notify_event: event_type = %d\n", event_type);
+ dci->nwd_notif = event_type;
+
+ /* Signal the Driver */
+ pr_debug("DCI EVENT NOTIFY CORE\n");
+ result = mc_notify(&dr_session_handle);
+ if (MC_DRV_OK != result) {
+ pr_debug("ERROR tlc_notify_event: mc_notify failed: %d\n",
+ result);
+ ret = false;
+ } else {
+ ret = true;
+ }
+
+ return ret;
+}
+
+/* ------------------------------------------------------------- */
+/**
+ */
+int main_thread(void *uarg)
+{
+ pr_debug("main_thread: TlcTui start!\n");
+
+ /* Open session on the driver */
+ if (!tlc_open()) {
+ pr_debug("ERROR main_thread: open driver failed!\n");
+ return 1;
+ }
+
+ /* TlcTui main thread loop */
+ for (;;) {
+ /* Wait for a command from the DrTui on DCI*/
+ tlc_wait_cmd_from_driver();
+ /* Something has been received, process it. */
+ tlc_process_cmd();
+ }
+
+ /* Close tlc. Note that this frees the DCI pointer.
+ * Do not use this pointer after tlc_close().*/
+ tlc_close();
+
+ return 0;
+}
+
+int tlc_wait_cmd(uint32_t *cmd_id)
+{
+ /* Create the TlcTui Main thread and start secure driver (only
+ 1st time) */
+ if (dr_session_handle.session_id == 0) {
+ thread_id = kthread_run(main_thread, NULL, "dci_thread");
+ if (!thread_id) {
+ pr_debug(KERN_ERR "Unable to start Trusted UI main thread\n");
+ return -EFAULT;
+ }
+ }
+
+ /* Wait for signal from DCI handler */
+ if (wait_for_completion_interruptible(&dci_comp)) {
+ pr_debug("interrupted by system\n");
+ return -ERESTARTSYS;
+ }
+ INIT_COMPLETION(dci_comp);
+
+ *cmd_id = g_cmd_id;
+ return 0;
+}
+
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id)
+{
+ g_user_rsp = *rsp_id;
+
+ /* Send signal to DCI */
+ complete(&io_comp);
+
+ return 0;
+}
+
+/** @} */
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.h
new file mode 100644
index 000000000..eae4ffa77
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tlcTui.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TLCTUI_H_
+#define TLCTUI_H_
+
+int tlc_wait_cmd(uint32_t *cmd_id);
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id);
+bool tlc_notify_event(uint32_t event_type);
+
+#endif /* TLCTUI_H_ */
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/trustedui.c b/drivers/misc/mediatek/gud/302a/gud/TlcTui/trustedui.c
new file mode 100644
index 000000000..91e27ac26
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/trustedui.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * File : trustedui.c
+ * Created : 26-02-2010
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+//#include <linux/t-base-tui.h>
+#include <t-base-tui.h>
+
+static int trustedui_mode = TRUSTEDUI_MODE_OFF;
+static int trustedui_blank_counter;
+
+static DEFINE_SPINLOCK(trustedui_lock);
+
+int trustedui_blank_inc(void)
+{
+ unsigned long flags;
+ int newvalue;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ newvalue = ++trustedui_blank_counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_inc);
+
+int trustedui_blank_dec(void)
+{
+ unsigned long flags;
+ int newvalue;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ newvalue = --trustedui_blank_counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_dec);
+
+int trustedui_blank_get_counter(void)
+{
+ unsigned long flags;
+ int newvalue;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ newvalue = trustedui_blank_counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_get_counter);
+
+void trustedui_blank_set_counter(int counter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ trustedui_blank_counter = counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_blank_set_counter);
+
+int trustedui_get_current_mode(void)
+{
+ unsigned long flags;
+ int mode;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ mode = trustedui_mode;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(trustedui_get_current_mode);
+
+void trustedui_set_mode(int mode)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ trustedui_mode = mode;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_set_mode);
+
+
+int trustedui_set_mask(int mask)
+{
+ unsigned long flags;
+ int mode;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ mode = trustedui_mode |= mask;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(trustedui_set_mask);
+
+int trustedui_clear_mask(int mask)
+{
+ unsigned long flags;
+ int mode;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ mode = trustedui_mode &= ~mask;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(trustedui_clear_mask);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("<t-base TUI");
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal.h b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal.h
new file mode 100644
index 000000000..778b49338
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TUI_HAL_H_
+#define _TUI_HAL_H_
+
+#include <linux/types.h>
+
+uint32_t hal_tui_init(void);
+void hal_tui_exit(void);
+uint32_t hal_tui_alloc(tuiAllocBuffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
+ size_t allocsize, uint32_t number);
+void hal_tui_free(void);
+uint32_t hal_tui_deactivate(void);
+uint32_t hal_tui_activate(void);
+
+#endif
diff --git a/drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal_mt6735.c b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal_mt6735.c
new file mode 100644
index 000000000..0428344f4
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/TlcTui/tui-hal_mt6735.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fb.h>
+
+#define CONFIG_TRUSTONIC_TRUSTED_UI
+#include <t-base-tui.h>
+
+#include "tui_ioctl.h"
+#include "dciTui.h"
+#include "tlcTui.h"
+#include "tui-hal.h"
+#include <linux/delay.h>
+
+#include <mach/mt_clkmgr.h>
+
+
+#define TUI_MEMPOOL_SIZE 0
+
+/* Extrac memory size required for TUI driver */
+#define TUI_EXTRA_MEM_SIZE (0x200000)
+
+struct tui_mempool {
+ void *va;
+ unsigned long pa;
+ size_t size;
+};
+
+/* for TUI EINT mepping to Security World */
+extern void gt1x_power_reset(void);
+extern int mt_eint_set_deint(int eint_num, int irq_num);
+extern int mt_eint_clr_deint(int eint_num);
+extern int tpd_reregister_from_tui(void);
+extern int tpd_enter_tui(void);
+extern int tpd_exit_tui(void);
+extern int secmem_api_alloc(u32 alignment, u32 size, u32 *refcount, u32 *sec_handle,
+ uint8_t *owner, uint32_t id);
+extern int secmem_api_unref(u32 sec_handle, uint8_t *owner, uint32_t id);
+extern int tui_region_offline(phys_addr_t *pa, unsigned long *size);
+extern int tui_region_online(void);
+static struct tui_mempool g_tui_mem_pool;
+static int g_tui_secmem_handle;
+
+/* basic implementation of a memory pool for TUI framebuffer. This
+ * implementation is using kmalloc, for the purpose of demonstration only.
+ * A real implementation might prefer using more advanced allocator, like ION,
+ * in order not to exhaust memory available to kmalloc
+ */
+static bool allocate_tui_memory_pool(struct tui_mempool *pool, size_t size)
+{
+ bool ret = false;
+ void *tui_mem_pool = NULL;
+
+ pr_info("%s %s:%d\n", __func__, __FILE__, __LINE__);
+ if (!size) {
+ pr_debug("TUI frame buffer: nothing to allocate.");
+ return true;
+ }
+
+ tui_mem_pool = kmalloc(size, GFP_KERNEL);
+ if (!tui_mem_pool) {
+ pr_debug("ERROR Could not allocate TUI memory pool");
+ } else if (ksize(tui_mem_pool) < size) {
+ pr_debug("ERROR TUI memory pool allocated size is too small."\
+ " required=%zd allocated=%zd",
+ size, ksize(tui_mem_pool));
+ kfree(tui_mem_pool);
+ } else {
+ pool->va = tui_mem_pool;
+ pool->pa = virt_to_phys(tui_mem_pool);
+ pool->size = ksize(tui_mem_pool);
+ ret = true;
+ }
+ return ret;
+}
+
+static void free_tui_memory_pool(struct tui_mempool *pool)
+{
+ kfree(pool->va);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+uint32_t hal_tui_init(void)
+{
+ /* Allocate memory pool for the framebuffer
+ */
+ if (!allocate_tui_memory_pool(&g_tui_mem_pool, TUI_MEMPOOL_SIZE))
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+
+ return TUI_DCI_OK;
+}
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void hal_tui_exit(void)
+{
+ /* delete memory pool if any */
+ if (g_tui_mem_pool.va)
+ free_tui_memory_pool(&g_tui_mem_pool);
+}
+
+/**
+ * hal_tui_alloc() - allocator for secure framebuffer and working buffer
+ * @allocbuffer: putput parameter that the allocator fills with the physical
+ * addresses of the allocated buffers
+ * @allocsize: size of the buffer to allocate. All the buffer are of the
+ * same size
+ * @number: Number to allocate.
+ *
+ * This function is called when the module receives a CMD_TUI_SW_OPEN_SESSION
+ * message from the secure driver. The function must allocate 'number'
+ * buffer(s) of physically contiguous memory, where the length of each buffer
+ * is at least 'allocsize' bytes. The physical address of each buffer must be
+ * stored in the array of structure 'allocbuffer' which is provided as
+ * arguments.
+ *
+ * Physical address of the first buffer must be put in allocate[0].pa , the
+ * second one on allocbuffer[1].pa, and so on. The function must return 0 on
+ * success, non-zero on error. For integrations where the framebuffer is not
+ * allocated by the Normal World, this function should do nothing and return
+ * success (zero).
+ */
+uint32_t hal_tui_alloc(
+ tuiAllocBuffer_t *allocbuffer, size_t allocsize, uint32_t number)
+{
+ uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ phys_addr_t pa;
+ u32 sec_handle = 0;
+ u32 refcount = 0;
+ unsigned long size = 0;
+
+ if (!allocbuffer) {
+ pr_debug("%s(%d): allocbuffer is null\n", __func__, __LINE__);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ pr_debug("%s(%d): Requested size=0x%zx x %u chunks\n",
+ __func__, __LINE__, allocsize, number);
+
+ if ((size_t)allocsize == 0) {
+ pr_debug("%s(%d): Nothing to allocate\n", __func__, __LINE__);
+ return TUI_DCI_OK;
+ }
+
+ if (number != 2) {
+ pr_debug("%s(%d): Unexpected number of buffers requested\n",
+ __func__, __LINE__);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ /*ret = secmem_api_alloc(4096, allocsize*number+TUI_EXTRA_MEM_SIZE, &refcount,
+ &sec_handle, __func__, __LINE__);*/
+ ret = tui_region_offline(&pa, &size);
+ if (ret) {
+ pr_err("%s(%d): tui_region_offline failed!\n",
+ __func__, __LINE__);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ if (ret == 0) {
+ g_tui_secmem_handle = pa;
+ allocbuffer[0].pa = (uint64_t) pa;
+ allocbuffer[1].pa = (uint64_t) (pa + allocsize);
+ } else {
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+ pr_debug("tui pa=0x%x, size=0x%lx", (uint32_t)pa, size);
+
+ pr_debug("tui-hal allocasize=%ld number=%d, extra=%d\n", allocsize, number, TUI_EXTRA_MEM_SIZE);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[0].pa);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[1].pa);
+
+ pr_debug("%s: sec_handle=%x ret=%d", __func__, sec_handle, (int)ret);
+ return TUI_DCI_OK;
+
+#if 0
+ if ((size_t)(allocsize*number) <= g_tui_mem_pool.size) {
+ /* requested buffer fits in the memory pool */
+ allocbuffer[0].pa = (uint64_t) g_tui_mem_pool.pa;
+ allocbuffer[1].pa = (uint64_t) (g_tui_mem_pool.pa +
+ g_tui_mem_pool.size/2);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[0].pa);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[1].pa);
+ ret = TUI_DCI_OK;
+ } else {
+ /* requested buffer is bigger than the memory pool, return an
+ error */
+ pr_debug("%s(%d): Memory pool too small\n", __func__, __LINE__);
+ ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * hal_tui_free() - free memory allocated by hal_tui_alloc()
+ *
+ * This function is called at the end of the TUI session, when the TUI module
+ * receives the CMD_TUI_SW_CLOSE_SESSION message. The function should free the
+ * buffers allocated by hal_tui_alloc(...).
+ */
+void hal_tui_free(void)
+{
+ pr_info("[TUI-HAL] hal_tui_free()\n");
+ if (g_tui_secmem_handle) {
+ //secmem_api_unref(g_tui_secmem_handle, __func__, __LINE__);
+ tui_region_online();
+ g_tui_secmem_handle = 0;
+ }
+}
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+
+extern int display_enter_tui();
+extern int display_exit_tui();
+
+uint32_t hal_tui_deactivate(void)
+{
+ int ret = TUI_DCI_OK, tmp;
+ pr_info("[TUI-HAL] hal_tui_deactivate()\n");
+ /* Set linux TUI flag */
+ trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
+ pr_info("TDDP/[TUI-HAL] %s()\n", __func__);
+ /*
+ * Stop NWd display here. After this function returns, SWd will take
+ * control of the display and input. Therefore the NWd should no longer
+ * access it
+ * This can be done by calling the fb_blank(FB_BLANK_POWERDOWN) function
+ * on the appropriate framebuffer device
+ */
+ tpd_enter_tui();
+ mt_eint_set_deint(10, 187);
+ enable_clock(MT_CG_PERI_I2C0, "i2c");
+ enable_clock(MT_CG_PERI_I2C1, "i2c");
+ enable_clock(MT_CG_PERI_I2C2, "i2c");
+ enable_clock(MT_CG_PERI_I2C3, "i2c");
+ enable_clock(MT_CG_PERI_APDMA, "i2c");
+
+ //gt1x_power_reset();
+
+ tmp = display_enter_tui();
+ if(tmp) {
+ pr_debug("TDDP/[TUI-HAL] %s() failed because display\n", __func__);
+ ret = TUI_DCI_ERR_OUT_OF_DISPLAY;
+ }
+
+
+ trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
+ TRUSTEDUI_MODE_INPUT_SECURED);
+
+ pr_info("TDDP/[TUI-HAL] %s()\n", __func__);
+
+ return ret;
+}
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t hal_tui_activate(void)
+{
+ pr_info("[TUI-HAL] hal_tui_activate()\n");
+ /* Protect NWd */
+ trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
+ TRUSTEDUI_MODE_INPUT_SECURED);
+
+ pr_info("TDDP %s()\n", __func__);
+
+ /*
+ * Restart NWd display here. TUI session has ended, and therefore the
+ * SWd will no longer use display and input.
+ * This can be done by calling the fb_blank(FB_BLANK_UNBLANK) function
+ * on the appropriate framebuffer device
+ */
+ /* Clear linux TUI flag */
+ mt_eint_clr_deint(10);
+ tpd_exit_tui();
+ tpd_reregister_from_tui();
+ //gt1x_power_reset();
+
+ disable_clock(MT_CG_PERI_I2C0, "i2c");
+ disable_clock(MT_CG_PERI_I2C1, "i2c");
+ disable_clock(MT_CG_PERI_I2C2, "i2c");
+ disable_clock(MT_CG_PERI_I2C3, "i2c");
+ disable_clock(MT_CG_PERI_APDMA, "i2c");
+
+ display_exit_tui();
+
+
+ trustedui_set_mode(TRUSTEDUI_MODE_OFF);
+
+ return TUI_DCI_OK;
+}
+
diff --git a/drivers/misc/mediatek/gud/302a/gud/build_tag.h b/drivers/misc/mediatek/gud/302a/gud/build_tag.h
new file mode 100644
index 000000000..b7a78a7d4
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302a/gud/build_tag.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define MOBICORE_COMPONENT_BUILD_TAG \
+ "t-base-Mediatek-Armv8-Android-302A-V010-20150908_113718_68"
diff --git a/drivers/misc/mediatek/gud/302c/Makefile b/drivers/misc/mediatek/gud/302c/Makefile
new file mode 100644
index 000000000..4937fb49c
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/Makefile
@@ -0,0 +1,7 @@
+#ccflags-y += -Werror
+
+ifeq ($(CONFIG_ARM64), y)
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += gud/
+else
+obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += gud/
+endif
diff --git a/drivers/misc/mediatek/gud/302c/gud/Kconfig b/drivers/misc/mediatek/gud/302c/gud/Kconfig
new file mode 100644
index 000000000..164a34f36
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/Kconfig
@@ -0,0 +1,42 @@
+#
+# MobiCore configuration
+#
+config MOBICORE_DRIVER
+ tristate "MobiCore Driver"
+ depends on ARM
+ ---help---
+ Enable Linux Kernel MobiCore Support
+
+config MOBICORE_DEBUG
+ bool "MobiCore Module debug mode"
+ depends on MOBICORE_DRIVER
+ ---help---
+ Enable Debug mode in the MobiCore Driver.
+ It enables printing information about mobicore operations
+
+config MOBICORE_VERBOSE
+ bool "MobiCore Module verbose debug mode"
+ depends on MOBICORE_DEBUG
+ ---help---
+ Enable Verbose Debug mode in the MobiCore Driver.
+ It enables printing extra information about mobicore operations
+ Beware: this is only useful for debuging deep in the driver because
+ it prints too much logs
+
+config MOBICORE_API
+ tristate "Linux MobiCore API"
+ depends on MOBICORE_DRIVER
+ ---help---
+ Enable Linux Kernel MobiCore API
+
+config TRUSTONIC_TRUSTED_UI
+ tristate "<t-base TUI"
+ depends on MOBICORE_API
+ ---help---
+ Enable <t-base Trusted User Interface
+
+config TRUSTONIC_TRUSTED_UI_FB_BLANK
+ bool "<t-base TUI with fb_blank"
+ depends on TRUSTONIC_TRUSTED_UI
+ ---help---
+ Blank the framebuffer before starting a TUI session
diff --git a/drivers/misc/mediatek/gud/mt6735/gud/Makefile b/drivers/misc/mediatek/gud/302c/gud/Makefile
index c5711bed8..4480019b5 100755..100644
--- a/drivers/misc/mediatek/gud/mt6735/gud/Makefile
+++ b/drivers/misc/mediatek/gud/302c/gud/Makefile
@@ -10,6 +10,7 @@ GUD_ROOT_FOLDER := $(dir $(lastword $(MAKEFILE_LIST)))
# add our modules to kernel.
obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += mcKernelApi.o
obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += mcDrvModule.o
+obj-$(CONFIG_TRUSTONIC_TRUSTED_UI) += TlcTui.o
mcDrvModule-objs := MobiCoreDriver/logging.o \
MobiCoreDriver/ops.o \
@@ -24,6 +25,11 @@ mcKernelApi-objs := MobiCoreKernelApi/main.o \
MobiCoreKernelApi/session.o \
MobiCoreKernelApi/connection.o
+TlcTui-objs := TlcTui/main.o \
+ TlcTui/tlcTui.o \
+ TlcTui/trustedui.o \
+ TlcTui/tui-hal_$(MTK_PLATFORM).o
+
# Release mode by default
ccflags-y := -DNDEBUG
ccflags-y += -Wno-declaration-after-statement
@@ -39,6 +45,11 @@ ccflags-y += -DMC_NETLINK_COMPAT_V37
#ccflags-y += -I$(GUD_ROOT_FOLDER)MobiCoreDriver/platforms/$(MOBICORE_PLATFORM)
# MobiCore Driver includes
ccflags-y += -I$(GUD_ROOT_FOLDER)MobiCoreDriver/public
-# MobiCore KernelApi required incldes
+# MobiCore KernelApi required includes
ccflags-y += -I$(GUD_ROOT_FOLDER)MobiCoreKernelApi/include \
-I$(GUD_ROOT_FOLDER)MobiCoreKernelApi/public
+
+# MobiCore TlcTui required includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/TlcTui \
+ -I$(GUD_ROOT_FOLDER)/TlcTui/inc \
+ -I$(GUD_ROOT_FOLDER)/TlcTui/public
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/api.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/api.c
new file mode 100644
index 000000000..354f5ddff
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/api.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+
+#include "main.h"
+#include "mem.h"
+#include "debug.h"
+
+int mobicore_map_vmem(struct mc_instance *instance, void *addr,
+ uint32_t len, uint32_t *handle)
+{
+ phys_addr_t phys;
+ return mc_register_wsm_mmu(instance, addr, len,
+ handle, &phys);
+}
+EXPORT_SYMBOL(mobicore_map_vmem);
+
+/*
+ * Unmap a virtual memory buffer from mobicore
+ * @param instance
+ * @param handle
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle)
+{
+ return mc_unregister_wsm_mmu(instance, handle);
+}
+EXPORT_SYMBOL(mobicore_unmap_vmem);
+
+/*
+ * Free a WSM buffer allocated with mobicore_allocate_wsm
+ * @param instance
+ * @param handle handle of the buffer
+ *
+ * @return 0 if no error
+ *
+ */
+int mobicore_free_wsm(struct mc_instance *instance, uint32_t handle)
+{
+ return mc_free_buffer(instance, handle);
+}
+EXPORT_SYMBOL(mobicore_free_wsm);
+
+
+/*
+ * Allocate WSM for given instance
+ *
+ * @param instance instance
+ * @param requested_size size of the WSM
+ * @param handle pointer where the handle will be saved
+ * @param virt_kernel_addr pointer for the kernel virtual address
+ *
+ * @return error code or 0 for success
+ */
+int mobicore_allocate_wsm(struct mc_instance *instance,
+ unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr)
+{
+ struct mc_buffer *buffer = NULL;
+
+ /* Setup the WSM buffer structure! */
+ if (mc_get_buffer(instance, &buffer, requested_size))
+ return -EFAULT;
+
+ *handle = buffer->handle;
+ *virt_kernel_addr = buffer->addr;
+ return 0;
+}
+EXPORT_SYMBOL(mobicore_allocate_wsm);
+
+/*
+ * Initialize a new mobicore API instance object
+ *
+ * @return Instance or NULL if no allocation was possible.
+ */
+struct mc_instance *mobicore_open(void)
+{
+ struct mc_instance *instance = mc_alloc_instance();
+ if (instance)
+ instance->admin = true;
+ return instance;
+}
+EXPORT_SYMBOL(mobicore_open);
+
+/*
+ * Release a mobicore instance object and all objects related to it
+ * @param instance instance
+ * @return 0 if Ok or -E ERROR
+ */
+int mobicore_release(struct mc_instance *instance)
+{
+ return mc_release_instance(instance);
+}
+EXPORT_SYMBOL(mobicore_release);
+
+/*
+ * Test if mobicore can sleep
+ *
+ * @return true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void)
+{
+ return mc_sleep_ready();
+}
+EXPORT_SYMBOL(mobicore_sleep_ready);
+
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/arm.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/arm.h
new file mode 100644
index 000000000..8c9fc37ee
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/arm.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_ARM_H_
+#define _MC_ARM_H_
+
+#include "debug.h"
+
+#ifdef CONFIG_ARM64
+inline bool has_security_extensions(void)
+{
+ return true;
+}
+
+inline bool is_secure_mode(void)
+{
+ return false;
+}
+#else
+/*
+ * ARM Trustzone specific masks and modes
+ * Vanilla Linux is unaware of TrustZone extension.
+ * I.e. arch/arm/include/asm/ptrace.h does not define monitor mode.
+ * Also TZ bits in cpuid are not defined, ARM port uses magic numbers,
+ * see arch/arm/kernel/setup.c
+ */
+#define ARM_MONITOR_MODE (0x16) /*(0b10110)*/
+#define ARM_SECURITY_EXTENSION_MASK (0x30)
+
+/* check if CPU supports the ARM TrustZone Security Extensions */
+inline bool has_security_extensions(void)
+{
+ u32 fea = 0;
+ asm volatile(
+ "mrc p15, 0, %[fea], cr0, cr1, 0" :
+ [fea]"=r" (fea));
+
+ MCDRV_DBG_VERBOSE(mcd, "CPU Features: 0x%X", fea);
+
+ /*
+ * If the CPU features ID has 0 for security features then the CPU
+ * doesn't support TrustZone at all!
+ */
+ if ((fea & ARM_SECURITY_EXTENSION_MASK) == 0)
+ return false;
+
+ return true;
+}
+
+/* check if running in secure mode */
+inline bool is_secure_mode(void)
+{
+ u32 cpsr = 0;
+ u32 nsacr = 0;
+
+ asm volatile(
+ "mrc p15, 0, %[nsacr], cr1, cr1, 2\n"
+ "mrs %[cpsr], cpsr\n" :
+ [nsacr]"=r" (nsacr),
+ [cpsr]"=r"(cpsr));
+
+ MCDRV_DBG_VERBOSE(mcd, "CPRS.M = set to 0x%X\n", cpsr & MODE_MASK);
+ MCDRV_DBG_VERBOSE(mcd, "SCR.NS = set to 0x%X\n", nsacr);
+
+ /*
+ * If the NSACR contains the reset value(=0) then most likely we are
+ * running in Secure MODE.
+ * If the cpsr mode is set to monitor mode then we cannot load!
+ */
+ if (nsacr == 0 || ((cpsr & MODE_MASK) == ARM_MONITOR_MODE))
+ return true;
+
+ return false;
+}
+#endif
+
+#endif /* _MC_ARM_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/debug.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/debug.h
new file mode 100644
index 000000000..52362b346
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/debug.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_DEBUG_H_
+#define _MC_DEBUG_H_
+/* Found in main.c */
+extern struct device *mcd;
+
+#define MCDRV_DBG_ERROR(dev, txt, ...) \
+ dev_err(dev, "MobiCore %s() ### ERROR: " txt "\n", \
+ __func__, \
+ ##__VA_ARGS__)
+
+/* dummy function helper macro. */
+#define DUMMY_FUNCTION() do {} while (0)
+
+#if defined(DEBUG)
+
+/* #define DEBUG_VERBOSE */
+#if defined(DEBUG_VERBOSE)
+#define MCDRV_DBG_VERBOSE MCDRV_DBG
+#else
+#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
+#endif
+
+#define MCDRV_DBG(dev, txt, ...) \
+ dev_info(dev, "MobiCore %s(): " txt "\n", \
+ __func__, \
+ ##__VA_ARGS__)
+
+#define MCDRV_DBG_WARN(dev, txt, ...) \
+ dev_warn(dev, "MobiCore %s() WARNING: " txt "\n", \
+ __func__, \
+ ##__VA_ARGS__)
+
+#define MCDRV_ASSERT(cond) \
+ do { \
+ if (unlikely(!(cond))) { \
+ panic("Assertion failed: %s:%d\n", \
+ __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+#else
+
+#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
+#define MCDRV_DBG(...) DUMMY_FUNCTION()
+#define MCDRV_DBG_WARN(...) DUMMY_FUNCTION()
+
+#define MCDRV_ASSERT(...) DUMMY_FUNCTION()
+
+#endif /* [not] defined(DEBUG) */
+
+#endif /* _MC_DEBUG_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/fastcall.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/fastcall.h
new file mode 100644
index 000000000..b438d7244
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/fastcall.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_FASTCALL_H_
+#define _MC_FASTCALL_H_
+
+#include "debug.h"
+#include "platform.h"
+
+/* Use the arch_extension sec pseudo op before switching to secure world */
+#if defined(__GNUC__) && \
+ defined(__GNUC_MINOR__) && \
+ defined(__GNUC_PATCHLEVEL__) && \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)) \
+ >= 40502
+#ifndef CONFIG_ARM64
+#define MC_ARCH_EXTENSION_SEC
+#endif
+#endif
+
+/*
+ * MobiCore SMCs
+ */
+#define MC_SMC_N_YIELD 0x3 /* Yield to switch from NWd to SWd. */
+#define MC_SMC_N_SIQ 0x4 /* SIQ to switch from NWd to SWd. */
+
+/*
+ * MobiCore fast calls. See MCI documentation
+ */
+#ifdef MC_AARCH32_FC
+
+#define MC_FC_STD64_BASE ((uint32_t)0xFF000000)
+/**< Initializing FastCall. */
+#define MC_FC_INIT (MC_FC_STD64_BASE+1)
+/**< Info FastCall. */
+#define MC_FC_INFO (MC_FC_STD64_BASE+2)
+/**< Enable SWd tracing via memory */
+#define MC_FC_NWD_TRACE (MC_FC_STD64_BASE+10)
+#ifdef TBASE_CORE_SWITCHER
+/**< Core switching fastcall */
+#define MC_FC_SWITCH_CORE (MC_FC_STD64_BASE+54)
+#endif
+
+#else
+
+#define MC_FC_INIT -1
+#define MC_FC_INFO -2
+#define MC_FC_NWD_TRACE -31
+#ifdef TBASE_CORE_SWITCHER
+#define MC_FC_SWITCH_CORE 0x84000005
+#endif
+#endif
+
+/*
+ * return code for fast calls
+ */
+#define MC_FC_RET_OK 0
+#define MC_FC_RET_ERR_INVALID 1
+#define MC_FC_RET_ERR_ALREADY_INITIALIZED 5
+
+
+/* structure wrappers for specific fastcalls */
+
+/* generic fast call parameters */
+union fc_generic {
+ struct {
+ uint32_t cmd;
+ uint32_t param[3];
+ } as_in;
+ struct {
+ uint32_t resp;
+ uint32_t ret;
+ uint32_t param[2];
+ } as_out;
+};
+
+/* fast call init */
+union mc_fc_init {
+ union fc_generic as_generic;
+ struct {
+ uint32_t cmd;
+ uint32_t base;
+ uint32_t nq_info;
+ uint32_t mcp_info;
+ } as_in;
+ struct {
+ uint32_t resp;
+ uint32_t ret;
+ uint32_t rfu[2];
+ } as_out;
+};
+
+/* fast call info parameters */
+union mc_fc_info {
+ union fc_generic as_generic;
+ struct {
+ uint32_t cmd;
+ uint32_t ext_info_id;
+ uint32_t rfu[2];
+ } as_in;
+ struct {
+ uint32_t resp;
+ uint32_t ret;
+ uint32_t state;
+ uint32_t ext_info;
+ } as_out;
+};
+
+#ifdef TBASE_CORE_SWITCHER
+/* fast call switch Core parameters */
+union mc_fc_swich_core {
+ union fc_generic as_generic;
+ struct {
+ uint32_t cmd;
+ uint32_t core_id;
+ uint32_t rfu[2];
+ } as_in;
+ struct {
+ uint32_t resp;
+ uint32_t ret;
+ uint32_t state;
+ uint32_t ext_info;
+ } as_out;
+};
+#endif
+/*
+ * _smc() - fast call to MobiCore
+ *
+ * @data: pointer to fast call data
+ */
+#ifdef CONFIG_ARM64
+static inline long _smc(void *data)
+{
+ int ret = 0;
+
+ if (data == NULL)
+ return -EPERM;
+
+ {
+ union fc_generic *fc_generic = data;
+ /* SMC expect values in x0-x3 */
+ register u64 reg0 __asm__("x0") = fc_generic->as_in.cmd;
+ register u64 reg1 __asm__("x1") = fc_generic->as_in.param[0];
+ register u64 reg2 __asm__("x2") = fc_generic->as_in.param[1];
+ register u64 reg3 __asm__("x3") = fc_generic->as_in.param[2];
+
+ /* According to AARCH64 SMC Calling Convention (ARM DEN 0028A),
+ section 3.1 : registers x4-x17 are unpredictable/scratch
+ registers. So we have to make sure that the compiler does not
+ allocate any of those registers by letting him know that the
+ asm code might clobber them */
+ __asm__ volatile (
+ "smc #0\n"
+ : "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3) : :
+ "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12",
+ "x13", "x14", "x15", "x16", "x17"
+ );
+
+
+ /* set response */
+ fc_generic->as_out.resp = reg0;
+ fc_generic->as_out.ret = reg1;
+ fc_generic->as_out.param[0] = reg2;
+ fc_generic->as_out.param[1] = reg3;
+ }
+
+ return ret;
+}
+
+#else
+static inline long _smc(void *data)
+{
+ int ret = 0;
+
+ if (data == NULL)
+ return -EPERM;
+
+ #ifdef MC_SMC_FASTCALL
+ {
+ ret = smc_fastcall(data, sizeof(union fc_generic));
+ }
+ #else
+ {
+ union fc_generic *fc_generic = data;
+ /* SMC expect values in r0-r3 */
+ register u32 reg0 __asm__("r0") = fc_generic->as_in.cmd;
+ register u32 reg1 __asm__("r1") = fc_generic->as_in.param[0];
+ register u32 reg2 __asm__("r2") = fc_generic->as_in.param[1];
+ register u32 reg3 __asm__("r3") = fc_generic->as_in.param[2];
+
+ __asm__ volatile (
+#ifdef MC_ARCH_EXTENSION_SEC
+ /* This pseudo op is supported and required from
+ * binutils 2.21 on */
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
+ );
+
+
+#if defined(__ARM_VE_A9X4_QEMU__) || defined(__ARM_GOLDFISH_QEMU__)
+ /* Qemu does not return to the address following the SMC
+ * instruction so we have to insert several nop instructions to
+ * workaround this Qemu bug. */
+ __asm__ volatile (
+ "nop\n"
+ "nop\n"
+ "nop\n"
+ "nop"
+ );
+#endif
+
+ /* set response */
+ fc_generic->as_out.resp = reg0;
+ fc_generic->as_out.ret = reg1;
+ fc_generic->as_out.param[0] = reg2;
+ fc_generic->as_out.param[1] = reg3;
+ }
+ #endif
+ return ret;
+}
+#endif
+
+/*
+ * convert fast call return code to linux driver module error code
+ */
+static inline int convert_fc_ret(uint32_t sret)
+{
+ int ret = -EFAULT;
+
+ switch (sret) {
+ case MC_FC_RET_OK:
+ ret = 0;
+ break;
+ case MC_FC_RET_ERR_INVALID:
+ ret = -EINVAL;
+ break;
+ case MC_FC_RET_ERR_ALREADY_INITIALIZED:
+ ret = -EBUSY;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+#endif /* _MC_FASTCALL_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.c
new file mode 100644
index 000000000..6091a4d51
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore Driver Logging Subsystem.
+ *
+ * The logging subsystem provides the interface between the Mobicore trace
+ * buffer and the Linux log
+ */
+#include <linux/miscdevice.h>
+#include <linux/moduleparam.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+
+#include "main.h"
+#include "debug.h"
+#include "ops.h"
+#include "logging.h"
+
+/* Default length of the log ring buffer 256KB*/
+#define LOG_BUF_SIZE (64 * PAGE_SIZE)
+
+/* Max Len of a log line for printing */
+#define LOG_LINE_SIZE 256
+
+static uint32_t log_size = LOG_BUF_SIZE;
+
+module_param(log_size, uint, 0);
+MODULE_PARM_DESC(log_size, "Size of the MobiCore log ringbuffer(256KB def)");
+
+/* Definitions for log version 2 */
+#define LOG_TYPE_MASK (0x0007)
+#define LOG_TYPE_CHAR 0
+#define LOG_TYPE_INTEGER 1
+/* Field length */
+#define LOG_LENGTH_MASK (0x00F8)
+#define LOG_LENGTH_SHIFT 3
+/* Extra attributes */
+#define LOG_EOL (0x0100)
+#define LOG_INTEGER_DECIMAL (0x0200)
+#define LOG_INTEGER_SIGNED (0x0400)
+
+struct logmsg_struct {
+ uint16_t ctrl; /* Type and format of data */
+ uint16_t source; /* Unique value for each event source */
+ uint32_t log_data; /* Value, if any */
+};
+
+static uint16_t prev_source; /* Previous Log source */
+static uint32_t log_pos; /* MobiCore log previous position */
+static struct mc_trace_buf *log_buf; /* MobiCore log buffer structure */
+struct task_struct *log_thread; /* Log Thread task structure */
+static char *log_line; /* Log Line buffer */
+static uint32_t log_line_len; /* Log Line buffer current length */
+static int thread_err;
+
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+extern uint8_t trustonic_swd_debug;
+#endif
+static void log_eol(uint16_t source)
+{
+ if (!strnlen(log_line, LOG_LINE_SIZE)) {
+ /* In case a TA tries to print a 0x0 */
+ log_line_len = 0;
+ return;
+ }
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+ if (trustonic_swd_debug) {
+#endif
+ /* MobiCore Userspace */
+ if (prev_source)
+ pr_debug("%03x|%s\n", prev_source, log_line);
+ /* MobiCore kernel */
+ else
+ pr_debug("%s\n", log_line);
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+ }
+#endif
+
+ log_line_len = 0;
+ log_line[0] = 0;
+}
+
+/*
+ * Collect chars in log_line buffer and output the buffer when it is full.
+ * No locking needed because only "mobicore_log" thread updates this buffer.
+ */
+static void log_char(char ch, uint16_t source)
+{
+ if (ch == '\n' || ch == '\r') {
+ log_eol(source);
+ return;
+ }
+
+ if (log_line_len >= LOG_LINE_SIZE - 1 || source != prev_source)
+ log_eol(source);
+
+
+ log_line[log_line_len] = ch;
+ log_line[log_line_len + 1] = 0;
+ log_line_len++;
+ prev_source = source;
+}
+
+static const uint8_t HEX2ASCII[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+
+static void dbg_raw_nro(uint32_t format, uint32_t value, uint16_t source)
+{
+ int digits = 1;
+ uint32_t base = (format & LOG_INTEGER_DECIMAL) ? 10 : 16;
+ int width = (format & LOG_LENGTH_MASK) >> LOG_LENGTH_SHIFT;
+ int negative = 0;
+ uint32_t digit_base = 1;
+
+ if ((format & LOG_INTEGER_SIGNED) != 0 && ((signed int)value) < 0) {
+ negative = 1;
+ value = (uint32_t)(-(signed int)value);
+ width--;
+ }
+
+ /* Find length and divider to get largest digit */
+ while (value / digit_base >= base) {
+ digit_base *= base;
+ digits++;
+ }
+
+ if (width > digits) {
+ char ch = (base == 10) ? ' ' : '0';
+ while (width > digits) {
+ log_char(ch, source);
+ width--;
+ }
+ }
+
+ if (negative)
+ log_char('-', source);
+
+ while (digits-- > 0) {
+ uint32_t d = value / digit_base;
+ log_char(HEX2ASCII[d], source);
+ value = value - d * digit_base;
+ digit_base /= base;
+ }
+}
+
+static void log_msg(struct logmsg_struct *msg)
+{
+ switch (msg->ctrl & LOG_TYPE_MASK) {
+ case LOG_TYPE_CHAR: {
+ uint32_t ch;
+ ch = msg->log_data;
+ while (ch != 0) {
+ log_char(ch & 0xFF, msg->source);
+ ch >>= 8;
+ }
+ break;
+ }
+ case LOG_TYPE_INTEGER: {
+ dbg_raw_nro(msg->ctrl, msg->log_data, msg->source);
+ break;
+ }
+ default:
+ break;
+ }
+ if (msg->ctrl & LOG_EOL)
+ log_eol(msg->source);
+}
+
+static uint32_t process_log(void)
+{
+ char *last_msg = log_buf->buff + log_buf->write_pos;
+ char *buff = log_buf->buff + log_pos;
+
+ while (buff != last_msg) {
+ log_msg((struct logmsg_struct *)buff);
+ buff += sizeof(struct logmsg_struct);
+ /* Wrap around */
+ if ((buff + sizeof(struct logmsg_struct)) >
+ ((char *)log_buf + log_size))
+ buff = log_buf->buff;
+ }
+ return buff - log_buf->buff;
+}
+
+static void log_exit(void)
+{
+ union fc_generic fc_log;
+
+ memset(&fc_log, 0, sizeof(fc_log));
+ fc_log.as_in.cmd = MC_FC_NWD_TRACE;
+
+ MCDRV_DBG(mcd, "Unregister the trace buffer");
+ mc_fastcall(&fc_log);
+ MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
+
+ if (fc_log.as_out.ret == 0) {
+ free_pages((unsigned long)log_buf, get_order(log_size));
+ log_buf = NULL;
+ }
+}
+
+/* log_worker() - Worker thread processing the log_buf buffer. */
+static int log_worker(void *p)
+{
+ int ret = 0;
+ if (log_buf == NULL) {
+ ret = -EFAULT;
+ goto err_kthread;
+ }
+
+ while (!kthread_should_stop()) {
+ if (log_buf->write_pos == log_pos)
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+
+ switch (log_buf->version) {
+ case 2:
+ log_pos = process_log();
+ break;
+ default:
+ MCDRV_DBG_ERROR(mcd, "Unknown Mobicore log data");
+ log_pos = log_buf->write_pos;
+ /*
+ * Stop the thread as we have no idea what
+ * happens next
+ */
+ ret = -EFAULT;
+ goto err_kthread;
+ }
+ }
+err_kthread:
+ MCDRV_DBG(mcd, "Logging thread stopped!");
+ thread_err = ret;
+ /* Wait until the next kthread_stop() is called, if it was already
+ * called we just slip through, if there is an error signal it and
+ * wait to get the signal */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ log_exit();
+
+ return ret;
+}
+
+/*
+ * Wake up the log reader thread
+ * This should be called from the places where calls into MobiCore have
+ * generated some logs(eg, yield, SIQ...)
+ */
+void mobicore_log_read(void)
+{
+ if (log_thread == NULL || IS_ERR(log_thread))
+ return;
+
+ /* The thread itself is in some error condition so just get
+ * rid of it */
+ if (thread_err != 0) {
+ kthread_stop(log_thread);
+ log_thread = NULL;
+ return;
+ }
+
+ wake_up_process(log_thread);
+}
+
+/*
+ * Setup MobiCore kernel log. It assumes it's running on CORE 0!
+ * The fastcall will complain is that is not the case!
+ */
+long mobicore_log_setup(void)
+{
+ phys_addr_t phys_log_buf;
+ union fc_generic fc_log;
+ struct sched_param param = { .sched_priority = 1 };
+
+ long ret;
+ log_pos = 0;
+ log_buf = NULL;
+ log_thread = NULL;
+ log_line = NULL;
+ log_line_len = 0;
+ prev_source = 0;
+ thread_err = 0;
+
+ /* Sanity check for the log size */
+ if (log_size < PAGE_SIZE)
+ return -EFAULT;
+ else
+ log_size = PAGE_ALIGN(log_size);
+
+ log_line = kzalloc(LOG_LINE_SIZE, GFP_KERNEL);
+ if (IS_ERR(log_line)) {
+ MCDRV_DBG_ERROR(mcd, "failed to allocate log line!");
+ return -ENOMEM;
+ }
+
+ log_thread = kthread_create(log_worker, NULL, "mc_log");
+ if (IS_ERR(log_thread)) {
+ MCDRV_DBG_ERROR(mcd, "MobiCore log thread creation failed!");
+ ret = -EFAULT;
+ goto err_free_line;
+ }
+
+ sched_setscheduler(log_thread, SCHED_IDLE, &param);
+ /*
+ * We are going to map this buffer into virtual address space in SWd.
+ * To reduce complexity there, we use a contiguous buffer.
+ */
+ log_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(log_size));
+ if (!log_buf) {
+ MCDRV_DBG_ERROR(mcd, "Failed to get page for logger!");
+ ret = -ENOMEM;
+ goto err_stop_kthread;
+ }
+ phys_log_buf = virt_to_phys(log_buf);
+
+ memset(&fc_log, 0, sizeof(fc_log));
+ fc_log.as_in.cmd = MC_FC_NWD_TRACE;
+ fc_log.as_in.param[0] = (uint32_t)phys_log_buf;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ fc_log.as_in.param[1] = (uint32_t)(phys_log_buf >> 32);
+#endif
+ fc_log.as_in.param[2] = log_size;
+
+ MCDRV_DBG(mcd, "fc_log virt=%p phys=0x%llX",
+ log_buf, (u64)phys_log_buf);
+ mc_fastcall(&fc_log);
+ MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
+
+ /* If the setup failed we must free the memory allocated */
+ if (fc_log.as_out.ret) {
+ MCDRV_DBG_ERROR(mcd, "MobiCore shared traces setup failed!");
+ free_pages((unsigned long)log_buf, get_order(log_size));
+ log_buf = NULL;
+ ret = -EIO;
+ goto err_stop_kthread;
+ }
+
+ set_task_state(log_thread, TASK_INTERRUPTIBLE);
+
+ MCDRV_DBG(mcd, "fc_log Logger version %u", log_buf->version);
+ return 0;
+
+err_stop_kthread:
+ kthread_stop(log_thread);
+ log_thread = NULL;
+err_free_line:
+ kfree(log_line);
+ log_line = NULL;
+ return ret;
+}
+
+/*
+ * Free kernel log components.
+ * ATTN: We can't free the log buffer because it's also in use by MobiCore and
+ * even if the module is unloaded MobiCore is still running.
+ */
+void mobicore_log_free(void)
+{
+ if (log_thread && !IS_ERR(log_thread)) {
+ /* We don't really care what the thread returns for exit */
+ kthread_stop(log_thread);
+ }
+
+ kfree(log_line);
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.h
new file mode 100644
index 000000000..a3cbca21c
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/logging.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_LOGGING_H_
+#define _MC_LOGGING_H_
+
+/* MobiCore internal trace buffer structure. */
+struct mc_trace_buf {
+ uint32_t version; /* version of trace buffer */
+ uint32_t length; /* length of allocated buffer(includes header) */
+ uint32_t write_pos; /* last write position */
+ char buff[1]; /* start of the log buffer */
+};
+
+/* MobiCore internal trace log setup. */
+void mobicore_log_read(void);
+long mobicore_log_setup(void);
+void mobicore_log_free(void);
+
+#endif /* _MC_LOGGING_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.c
new file mode 100644
index 000000000..62b1d49ee
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.c
@@ -0,0 +1,1733 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command or
+ * fd = open(/dev/mobicore-user)
+ */
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/completion.h>
+#include <linux/fdtable.h>
+#include <linux/cdev.h>
+#ifdef CONFIG_OF
+#include <linux/of_irq.h>
+#endif
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+#include <linux/debugfs.h>
+#endif
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/af_unix.h>
+
+#include "main.h"
+#include "fastcall.h"
+
+#include "arm.h"
+#include "mem.h"
+#include "ops.h"
+#include "pm.h"
+#include "debug.h"
+#include "logging.h"
+#include "build_tag.h"
+
+/* Define a MobiCore device structure for use with dev_debug() etc */
+struct device_driver mcd_debug_name = {
+ .name = "MobiCore"
+};
+
+struct device mcd_debug_subname = {
+ .driver = &mcd_debug_name
+};
+
+struct device *mcd = &mcd_debug_subname;
+
+/* We need 2 devices for admin and user interface*/
+#define MC_DEV_MAX 2
+
+/* Need to discover a chrdev region for the driver */
+static dev_t mc_dev_admin, mc_dev_user;
+struct cdev mc_admin_cdev, mc_user_cdev;
+/* Device class for the driver assigned major */
+static struct class *mc_device_class;
+
+#ifndef FMODE_PATH
+ #define FMODE_PATH 0x0
+#endif
+
+static struct sock *__get_socket(struct file *filp)
+{
+ struct sock *u_sock = NULL;
+ struct inode *inode = filp->f_path.dentry->d_inode;
+
+ /*
+ * Socket ?
+ */
+ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
+ struct socket *sock = SOCKET_I(inode);
+ struct sock *s = sock->sk;
+
+ /*
+ * PF_UNIX ?
+ */
+ if (s && sock->ops && sock->ops->family == PF_UNIX)
+ u_sock = s;
+ }
+ return u_sock;
+}
+
+
+/* MobiCore interrupt context data */
+static struct mc_context ctx;
+
+/* Get process context from file pointer */
+static struct mc_instance *get_instance(struct file *file)
+{
+ return (struct mc_instance *)(file->private_data);
+}
+
+extern struct mc_mmu_table *find_mmu_table(unsigned int handle);
+uint32_t mc_get_new_handle(void)
+{
+ uint32_t handle;
+ struct mc_buffer *buffer;
+ struct mc_mmu_table *table;
+
+
+ mutex_lock(&ctx.cont_bufs_lock);
+retry:
+ handle = atomic_inc_return(&ctx.handle_counter);
+ /* The handle must leave 12 bits (PAGE_SHIFT) for the 12 LSBs to be
+ * zero, as mmap requires the offset to be page-aligned, plus 1 bit for
+ * the MSB to be 0 too, so mmap does not see the offset as negative
+ * and fail.
+ */
+ if ((handle << (PAGE_SHIFT+1)) == 0) {
+ atomic_set(&ctx.handle_counter, 1);
+ handle = 1;
+ }
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ if (buffer->handle == handle)
+ goto retry;
+ }
+
+ /* here we assume table_lock is already taken. */
+ table = find_mmu_table(handle);
+ if (table != NULL)
+ goto retry;
+
+ mutex_unlock(&ctx.cont_bufs_lock);
+
+ return handle;
+}
+
+/* Clears the reserved bit of each page and frees the pages */
+static inline void free_continguous_pages(void *addr, unsigned int order)
+{
+ int i;
+ struct page *page = virt_to_page(addr);
+ for (i = 0; i < (1<<order); i++) {
+ MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p", page);
+ clear_bit(PG_reserved, &page->flags);
+ page++;
+ }
+
+ MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x", addr, order);
+ free_pages((unsigned long)addr, order);
+}
+
+/* Frees the memory associated with a buffer */
+static int free_buffer(struct mc_buffer *buffer)
+{
+ if (buffer->handle == 0)
+ return -EINVAL;
+
+ if (buffer->addr == 0)
+ return -EINVAL;
+
+ if (!atomic_dec_and_test(&buffer->usage)) {
+ MCDRV_DBG_VERBOSE(mcd, "Could not free %u, usage=%d",
+ buffer->handle,
+ atomic_read(&(buffer->usage)));
+ return 0;
+ }
+
+ MCDRV_DBG_VERBOSE(mcd,
+ "h=%u phy=0x%llx, kaddr=0x%p len=%u buf=%p usage=%d",
+ buffer->handle, (u64)buffer->phys, buffer->addr,
+ buffer->len, buffer, atomic_read(&(buffer->usage)));
+
+ list_del(&buffer->list);
+
+ free_continguous_pages(buffer->addr, buffer->order);
+ kfree(buffer);
+ return 0;
+}
+
+static uint32_t mc_find_cont_wsm_addr(struct mc_instance *instance, void *uaddr,
+ void **addr, uint32_t len)
+{
+ int ret = 0;
+ struct mc_buffer *buffer;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&instance->lock);
+
+ mutex_lock(&ctx.cont_bufs_lock);
+
+ /* search for the given handle in the buffers list */
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ if (buffer->uaddr == uaddr && buffer->len == len) {
+ *addr = buffer->addr;
+ goto found;
+ }
+ }
+
+ /* Coundn't find the buffer */
+ ret = -EINVAL;
+
+found:
+ mutex_unlock(&ctx.cont_bufs_lock);
+ mutex_unlock(&instance->lock);
+
+ return ret;
+}
+
+bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd)
+{
+#ifndef __ARM_VE_A9X4_STD__
+ struct file *fp;
+ struct sock *s;
+ struct files_struct *files;
+ struct task_struct *peer = NULL;
+ bool ret = false;
+
+ MCDRV_DBG_VERBOSE(mcd, "Finding wsm for fd = %d", fd);
+ if (!instance)
+ return false;
+
+ if (is_daemon(instance))
+ return true;
+
+ rcu_read_lock();
+ fp = fcheck_files(current->files, fd);
+ if (fp == NULL)
+ goto out;
+ s = __get_socket(fp);
+ if (s)
+ peer = get_pid_task(s->sk_peer_pid, PIDTYPE_PID);
+
+ if (peer) {
+ task_lock(peer);
+ files = peer->files;
+ if (!files)
+ goto out;
+ for (fd = 0; fd < files_fdtable(files)->max_fds; fd++) {
+ fp = fcheck_files(files, fd);
+ if (!fp)
+ continue;
+ if (fp->private_data == instance) {
+ ret = true;
+ break;
+ }
+ }
+ } else {
+ MCDRV_DBG(mcd, "Owner not found!");
+ }
+out:
+ if (peer) {
+ task_unlock(peer);
+ put_task_struct(peer);
+ }
+ rcu_read_unlock();
+ if (!ret)
+ MCDRV_DBG(mcd, "Owner not found!");
+ return ret;
+#else
+ return true;
+#endif
+}
+static uint32_t mc_find_cont_wsm(struct mc_instance *instance, uint32_t handle,
+ int32_t fd, phys_addr_t *phys, uint32_t *len)
+{
+ int ret = 0;
+ struct mc_buffer *buffer;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ mutex_lock(&instance->lock);
+
+ mutex_lock(&ctx.cont_bufs_lock);
+
+ /* search for the given handle in the buffers list */
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ if (buffer->handle == handle) {
+ if (mc_check_owner_fd(buffer->instance, fd)) {
+ *phys = buffer->phys;
+ *len = buffer->len;
+ goto found;
+ } else {
+ break;
+ }
+ }
+ }
+
+ /* Couldn't find the buffer */
+ ret = -EINVAL;
+
+found:
+ mutex_unlock(&ctx.cont_bufs_lock);
+ mutex_unlock(&instance->lock);
+
+ return ret;
+}
+
+/*
+ * __free_buffer - Free a WSM buffer allocated with mobicore_allocate_wsm
+ *
+ * @instance
+ * @handle handle of the buffer
+ *
+ * Returns 0 if no error
+ *
+ */
+static int __free_buffer(struct mc_instance *instance, uint32_t handle,
+ bool unlock)
+{
+ int ret = 0;
+ struct mc_buffer *buffer;
+#ifndef MC_VM_UNMAP
+ struct mm_struct *mm = current->mm;
+#endif
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&ctx.cont_bufs_lock);
+ /* search for the given handle in the buffers list */
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ if (buffer->handle == handle)
+ goto found_buffer;
+ }
+ ret = -EINVAL;
+ goto err;
+found_buffer:
+ if (!is_daemon(instance) && buffer->instance != instance) {
+ ret = -EPERM;
+ goto err;
+ }
+ mutex_unlock(&ctx.cont_bufs_lock);
+ /* Only unmap if the request is coming from the user space and
+ * it hasn't already been unmapped */
+ if (!unlock && buffer->uaddr != NULL) {
+#ifndef MC_VM_UNMAP
+ /* do_munmap must be done with mm->mmap_sem taken */
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm,
+ (long unsigned int)buffer->uaddr,
+ buffer->len);
+ up_write(&mm->mmap_sem);
+
+#else
+ ret = vm_munmap((long unsigned int)buffer->uaddr, buffer->len);
+#endif
+ if (ret < 0) {
+ /* Something is not right if we end up here, better not
+ * clean the buffer so we just leak memory instead of
+ * creating security issues */
+ MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&ctx.cont_bufs_lock);
+ /* search for the given handle in the buffers list */
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ if (buffer->handle == handle)
+ goto del_buffer;
+ }
+ ret = -EINVAL;
+ goto err;
+
+del_buffer:
+ if (is_daemon(instance) || buffer->instance == instance)
+ ret = free_buffer(buffer);
+ else
+ ret = -EPERM;
+err:
+ mutex_unlock(&ctx.cont_bufs_lock);
+ return ret;
+}
+
+int mc_free_buffer(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&instance->lock);
+
+ ret = __free_buffer(instance, handle, false);
+ mutex_unlock(&instance->lock);
+ return ret;
+}
+
+
+int mc_get_buffer(struct mc_instance *instance,
+ struct mc_buffer **buffer, unsigned long len)
+{
+ struct mc_buffer *cbuffer = NULL;
+ void *addr = 0;
+ phys_addr_t phys = 0;
+ unsigned int order;
+#if defined(DEBUG_VERBOSE)
+ unsigned long allocated_size;
+#endif
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (len == 0) {
+ MCDRV_DBG_WARN(mcd, "cannot allocate size 0");
+ return -ENOMEM;
+ }
+
+ order = get_order(len);
+ if (order > MAX_ORDER) {
+ MCDRV_DBG_WARN(mcd, "Buffer size too large");
+ return -ENOMEM;
+ }
+#if defined(DEBUG_VERBOSE)
+ allocated_size = (1 << order) * PAGE_SIZE;
+#endif
+
+ if (mutex_lock_interruptible(&instance->lock))
+ return -ERESTARTSYS;
+
+ /* allocate a new buffer. */
+ cbuffer = kzalloc(sizeof(*cbuffer), GFP_KERNEL);
+ if (!cbuffer) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)",
+ len, order, allocated_size);
+
+ addr = (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
+ if (!addr) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ phys = virt_to_phys(addr);
+ cbuffer->handle = mc_get_new_handle();
+ cbuffer->phys = phys;
+ cbuffer->addr = addr;
+ cbuffer->order = order;
+ cbuffer->len = len;
+ cbuffer->instance = instance;
+ cbuffer->uaddr = 0;
+ /* Refcount +1 because the TLC is requesting it */
+ atomic_set(&cbuffer->usage, 1);
+
+ INIT_LIST_HEAD(&cbuffer->list);
+ mutex_lock(&ctx.cont_bufs_lock);
+ list_add(&cbuffer->list, &ctx.cont_bufs);
+ mutex_unlock(&ctx.cont_bufs_lock);
+
+ MCDRV_DBG_VERBOSE(mcd,
+ "phy=0x%llx-0x%llx, kaddr=0x%p h=%d buf=%p usage=%d",
+ (u64)phys,
+ (u64)(phys+allocated_size),
+ addr, cbuffer->handle,
+ cbuffer, atomic_read(&(cbuffer->usage)));
+ *buffer = cbuffer;
+
+end:
+ if (ret)
+ kfree(cbuffer);
+
+ mutex_unlock(&instance->lock);
+ return ret;
+}
+
+/*
+ * __lock_buffer() - Locks a contiguous buffer - +1 refcount.
+ * Assumes the instance lock is already taken!
+ */
+static int __lock_buffer(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+ struct mc_buffer *buffer;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ mutex_lock(&ctx.cont_bufs_lock);
+ /* search for the given handle in the buffers list */
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ if (buffer->handle == handle) {
+ atomic_inc(&buffer->usage);
+ MCDRV_DBG_VERBOSE(mcd, "handle=%u phy=0x%llx usage=%d",
+ buffer->handle, (u64)buffer->phys,
+ atomic_read(&(buffer->usage)));
+ goto unlock;
+ }
+ }
+ ret = -EINVAL;
+
+unlock:
+ mutex_unlock(&ctx.cont_bufs_lock);
+ return ret;
+}
+
+static phys_addr_t get_mci_base_phys(unsigned int len)
+{
+ if (ctx.mci_base.phys) {
+ return ctx.mci_base.phys;
+ } else {
+ unsigned int order = get_order(len);
+ ctx.mcp = NULL;
+ ctx.mci_base.order = order;
+ ctx.mci_base.addr =
+ (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
+ if (ctx.mci_base.addr == NULL) {
+ MCDRV_DBG_WARN(mcd, "get_free_pages failed");
+ memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
+ return 0;
+ }
+ ctx.mci_base.phys = virt_to_phys(ctx.mci_base.addr);
+ return ctx.mci_base.phys;
+ }
+}
+
+/*
+ * Create a MMU table from a virtual memory buffer which can be vmalloc
+ * or user space virtual memory
+ */
+int mc_register_wsm_mmu(struct mc_instance *instance,
+ void *buffer, uint32_t len,
+ uint32_t *handle, phys_addr_t *phys)
+{
+ int ret = 0;
+ struct mc_mmu_table *table = NULL;
+ struct task_struct *task = current;
+ void *kbuff = NULL;
+
+ uint32_t index;
+ uint64_t *mmu_table = NULL;
+ uint32_t nb_of_1mb_section;
+ unsigned int offset;
+ unsigned int page_number;
+ unsigned int *handles = NULL;
+ unsigned int tmp_len;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (len == 0) {
+ MCDRV_DBG_ERROR(mcd, "len=0 is not supported!");
+ return -EINVAL;
+ }
+
+
+ /* The offset of the buffer*/
+ offset = (unsigned int)
+ (((unsigned long)(buffer)) & (~PAGE_MASK));
+
+ MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x offset=%d",
+ buffer, len, offset);
+
+ /* Number of 4k pages required */
+ page_number = (offset + len) / PAGE_SIZE;
+ if (((offset + len) & (~PAGE_MASK)) != 0)
+ page_number++;
+
+ /* Number of 1mb sections */
+ nb_of_1mb_section = (page_number * PAGE_SIZE) / SZ_1M;
+ if (((page_number * PAGE_SIZE) & (SZ_1M - 1)) != 0)
+ nb_of_1mb_section++;
+
+ /* since for both non-LPAE and LPAE cases we use uint64_t records
+ * for the fake table we don't support more than 512 MB TA size
+ */
+ if (nb_of_1mb_section > SZ_4K / sizeof(uint64_t)) {
+ MCDRV_DBG_ERROR(mcd, "fake L1 table size too big");
+ return -ENOMEM;
+ }
+ MCDRV_DBG_VERBOSE(mcd, "nb_of_1mb_section=%d", nb_of_1mb_section);
+ if (nb_of_1mb_section > 1) {
+ /* WSM buffer with size greater than 1Mb
+ * is available for open session command
+ * from the Daemon only
+ */
+ if (!is_daemon(instance)) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+ MCDRV_DBG_VERBOSE(mcd, "allocate %d L2 table",
+ nb_of_1mb_section);
+ mmu_table = (uint64_t *)get_zeroed_page(GFP_KERNEL);
+ MCDRV_DBG_VERBOSE(mcd, "mmu_table = 0x%p", mmu_table);
+ if (mmu_table == NULL) {
+ MCDRV_DBG_ERROR(mcd,
+ "fake L1 table alloc. failed");
+ return -ENOMEM;
+ }
+ }
+
+ if (!mc_find_cont_wsm_addr(instance, buffer, &kbuff, len)) {
+ buffer = kbuff;
+ task = NULL;
+ }
+
+ /* This array is used to free mmu tables in case of any error */
+ handles = kmalloc(sizeof(unsigned int)*nb_of_1mb_section,
+ GFP_KERNEL | __GFP_ZERO);
+ if (handles == NULL) {
+ MCDRV_DBG_ERROR(mcd, "auxiliary handles array alloc. failed");
+ ret = -ENOMEM;
+ goto err;
+ }
+ /* Each L1 record refers 1MB piece of TA blob
+ * for both non-LPAE and LPAE modes
+ */
+
+ tmp_len = (len + offset > SZ_1M) ? (SZ_1M - offset) : len;
+ for (index = 0; index < nb_of_1mb_section; index++) {
+ table = mc_alloc_mmu_table(instance, task, buffer, tmp_len, 0);
+
+ if (IS_ERR(table)) {
+ MCDRV_DBG_ERROR(mcd, "mc_alloc_mmu_table() failed");
+ ret = -EINVAL;
+ goto err;
+ }
+ handles[index] = table->handle;
+
+ if (mmu_table != NULL) {
+ MCDRV_DBG_VERBOSE(mcd, "fake L1 %p add L2 descr 0x%llX",
+ mmu_table + index,
+ (u64)table->phys);
+ mmu_table[index] = table->phys;
+ }
+
+ buffer += tmp_len;
+ len -= tmp_len;
+ tmp_len = (len > SZ_1M) ? SZ_1M : len;
+ }
+ if (mmu_table != NULL) {
+ MCDRV_DBG_VERBOSE(mcd, "fake L1 buffer: %p, len=%zu",
+ mmu_table,
+ nb_of_1mb_section*sizeof(uint64_t));
+
+ table = mc_alloc_mmu_table(
+ instance,
+ NULL,
+ mmu_table,
+ nb_of_1mb_section*sizeof(uint64_t),
+ MC_MMU_TABLE_TYPE_WSM_FAKE_L1);
+ if (IS_ERR(table)) {
+ MCDRV_DBG_ERROR(mcd, "mc_alloc_mmu_table() failed");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ /* set response */
+ *handle = table->handle;
+ /* WARNING: daemon shouldn't know this either, but live with it */
+ if (is_daemon(instance))
+ *phys = table->phys;
+ else
+ *phys = 0;
+
+ MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=0x%llX",
+ *handle, (u64)(*phys));
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ kfree(handles);
+
+ return ret;
+
+err:
+ if (handles != NULL) {
+ for (index = 0; index < nb_of_1mb_section; index++)
+ mc_free_mmu_table(instance, handles[index]);
+ kfree(handles);
+ }
+ free_page((unsigned long)mmu_table);
+ return ret;
+}
+
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ /* free table (if no further locks exist) */
+ mc_free_mmu_table(instance, handle);
+
+ return ret;
+}
+/* Lock the object from handle, it could be a WSM MMU table or a cont buffer! */
+static int mc_lock_handle(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ mutex_lock(&instance->lock);
+ ret = mc_lock_mmu_table(instance, handle);
+
+ /* Handle was not a MMU table but a cont buffer */
+ if (ret == -EINVAL) {
+ /* Call the non locking variant! */
+ ret = __lock_buffer(instance, handle);
+ }
+
+ mutex_unlock(&instance->lock);
+
+ return ret;
+}
+
+static int mc_unlock_handle(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ mutex_lock(&instance->lock);
+ ret = mc_free_mmu_table(instance, handle);
+
+ /* Not a MMU table, then it must be a buffer */
+ if (ret == -EINVAL) {
+ /* Call the non locking variant! */
+ ret = __free_buffer(instance, handle, true);
+ }
+ mutex_unlock(&instance->lock);
+
+ return ret;
+}
+
+static phys_addr_t mc_find_wsm_mmu(struct mc_instance *instance,
+ uint32_t handle, int32_t fd)
+{
+ if (WARN(!instance, "No instance data available"))
+ return 0;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return 0;
+ }
+
+ return mc_find_mmu_table(handle, fd);
+}
+
+static int mc_clean_wsm_mmu(struct mc_instance *instance)
+{
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ mc_clean_mmu_tables();
+
+ return 0;
+}
+
+static int mc_fd_mmap(struct file *file, struct vm_area_struct *vmarea)
+{
+ struct mc_instance *instance = get_instance(file);
+ unsigned long len = vmarea->vm_end - vmarea->vm_start;
+ uint32_t handle = vmarea->vm_pgoff;
+ struct mc_buffer *buffer = 0;
+ int ret = 0;
+
+ MCDRV_DBG_VERBOSE(mcd, "start=0x%p, size=%ld, offset=%ld, mci=0x%llX",
+ (void *)vmarea->vm_start, len, vmarea->vm_pgoff,
+ (u64)ctx.mci_base.phys);
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (len == 0) {
+ MCDRV_DBG_ERROR(mcd, "cannot allocate size 0");
+ return -ENOMEM;
+ }
+ if (handle) {
+ mutex_lock(&ctx.cont_bufs_lock);
+
+ /* search for the buffer list. */
+ list_for_each_entry(buffer, &ctx.cont_bufs, list) {
+ /* Only allow mapping if the client owns it!*/
+ if (buffer->handle == handle &&
+ buffer->instance == instance) {
+ /* We shouldn't do remap with larger size */
+ if (buffer->len > len)
+ break;
+ /* We can't allow mapping the buffer twice */
+ if (!buffer->uaddr)
+ goto found;
+ else
+ break;
+ }
+ }
+ /* Nothing found return */
+ mutex_unlock(&ctx.cont_bufs_lock);
+ MCDRV_DBG_ERROR(mcd, "handle not found");
+ return -EINVAL;
+
+found:
+ buffer->uaddr = (void *)vmarea->vm_start;
+ vmarea->vm_flags |= VM_IO;
+ /*
+ * Convert kernel address to user address. Kernel address begins
+ * at PAGE_OFFSET, user address range is below PAGE_OFFSET.
+ * Remapping the area is always done, so multiple mappings
+ * of one region are possible. Now remap kernel address
+ * space into user space
+ */
+ ret = (int)remap_pfn_range(vmarea, vmarea->vm_start,
+ page_to_pfn(virt_to_page(buffer->addr)),
+ buffer->len, vmarea->vm_page_prot);
+ /* If the remap failed then don't mark this buffer as marked
+ * since the unmaping will also fail */
+ if (ret)
+ buffer->uaddr = NULL;
+ mutex_unlock(&ctx.cont_bufs_lock);
+ } else {
+ if (!is_daemon(instance))
+ return -EPERM;
+
+ if (!ctx.mci_base.addr)
+ return -EFAULT;
+
+ vmarea->vm_flags |= VM_IO;
+ /* Convert kernel address to user address. Kernel address begins
+ * at PAGE_OFFSET, user address range is below PAGE_OFFSET.
+ * Remapping the area is always done, so multiple mappings
+ * of one region are possible. Now remap kernel address
+ * space into user space */
+ ret = (int)remap_pfn_range(vmarea, vmarea->vm_start,
+ page_to_pfn(virt_to_page(ctx.mci_base.addr)),
+ len, vmarea->vm_page_prot);
+ }
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ return ret;
+}
+
+static inline int ioctl_check_pointer(unsigned int cmd, int __user *uarg)
+{
+ int err = 0;
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
+ if (err)
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * mc_fd_user_ioctl() - Will be called from user space as ioctl(..)
+ * @file pointer to file
+ * @cmd command
+ * @arg arguments
+ *
+ * Returns 0 for OK and an errno in case of error
+ */
+static long mc_fd_user_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mc_instance *instance = get_instance(file);
+ int __user *uarg = (int __user *)arg;
+ int ret = -EINVAL;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (ioctl_check_pointer(cmd, uarg))
+ return -EFAULT;
+
+ switch (cmd) {
+ case MC_IO_FREE:
+ ret = mc_free_buffer(instance, (uint32_t)arg);
+ break;
+
+ /* 32/64 bit interface compatiblity notice:
+ * mc_ioctl_reg_wsm has been defined with the buffer parameter
+ * as void* which means that the size and layout of the structure
+ * are different between 32 and 64 bit variants.
+ * However our 64 bit Linux driver must be able to service both
+ * 32 and 64 bit clients so we have to allow both IOCTLs. Though
+ * we have a bit of copy paste code we provide maximum backwards
+ * compatiblity */
+ case MC_IO_REG_WSM:{
+ struct mc_ioctl_reg_wsm reg;
+ phys_addr_t phys = 0;
+ if (copy_from_user(&reg, uarg, sizeof(reg)))
+ return -EFAULT;
+
+ ret = mc_register_wsm_mmu(instance,
+ (void *)(uintptr_t)reg.buffer,
+ reg.len, &reg.handle, &phys);
+ reg.table_phys = phys;
+
+ if (!ret) {
+ if (copy_to_user(uarg, &reg, sizeof(reg))) {
+ ret = -EFAULT;
+ mc_unregister_wsm_mmu(instance, reg.handle);
+ }
+ }
+ break;
+ }
+ case MC_COMPAT_REG_WSM:{
+ struct mc_compat_ioctl_reg_wsm reg;
+ phys_addr_t phys = 0;
+ if (copy_from_user(&reg, uarg, sizeof(reg)))
+ return -EFAULT;
+
+ ret = mc_register_wsm_mmu(instance,
+ (void *)(uintptr_t)reg.buffer,
+ reg.len, &reg.handle, &phys);
+ reg.table_phys = phys;
+
+ if (!ret) {
+ if (copy_to_user(uarg, &reg, sizeof(reg))) {
+ ret = -EFAULT;
+ mc_unregister_wsm_mmu(instance, reg.handle);
+ }
+ }
+ break;
+ }
+ case MC_IO_UNREG_WSM:
+ ret = mc_unregister_wsm_mmu(instance, (uint32_t)arg);
+ break;
+
+ case MC_IO_VERSION:
+ ret = put_user(mc_get_version(), uarg);
+ if (ret)
+ MCDRV_DBG_ERROR(mcd,
+ "IOCTL_GET_VERSION failed to put data");
+ break;
+
+ case MC_IO_MAP_WSM:{
+ struct mc_ioctl_map map;
+ struct mc_buffer *buffer = 0;
+ if (copy_from_user(&map, uarg, sizeof(map)))
+ return -EFAULT;
+
+ /* Setup the WSM buffer structure! */
+ if (mc_get_buffer(instance, &buffer, map.len))
+ return -EFAULT;
+
+ map.handle = buffer->handle;
+ /* Trick: to keep the same interface with the user space, store
+ the handle in the physical address.
+ It is given back with the offset when mmap() is called. */
+ map.phys_addr = buffer->handle << PAGE_SHIFT;
+ map.reused = 0;
+ if (copy_to_user(uarg, &map, sizeof(map)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ }
+ default:
+ MCDRV_DBG_ERROR(mcd, "unsupported cmd=0x%x", cmd);
+ ret = -ENOIOCTLCMD;
+ break;
+
+ } /* end switch(cmd) */
+
+#ifdef MC_MEM_TRACES
+ mobicore_log_read();
+#endif
+
+ return (int)ret;
+}
+
+static long mc_fd_admin_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mc_instance *instance = get_instance(file);
+ int __user *uarg = (int __user *)arg;
+ int ret = -EINVAL;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ if (ioctl_check_pointer(cmd, uarg))
+ return -EFAULT;
+
+ switch (cmd) {
+ case MC_IO_INIT: {
+ struct mc_ioctl_init init;
+ ctx.mcp = NULL;
+ if (!ctx.mci_base.phys) {
+ MCDRV_DBG_ERROR(mcd,
+ "Cannot init MobiCore without MCI!");
+ return -EINVAL;
+ }
+ if (copy_from_user(&init, uarg, sizeof(init)))
+ return -EFAULT;
+
+ ctx.mcp = ctx.mci_base.addr + init.mcp_offset;
+ ret = mc_init(ctx.mci_base.phys, init.nq_length,
+ init.mcp_offset, init.mcp_length);
+ break;
+ }
+ case MC_IO_INFO: {
+ struct mc_ioctl_info info;
+ if (copy_from_user(&info, uarg, sizeof(info)))
+ return -EFAULT;
+
+ ret = mc_info(info.ext_info_id, &info.state,
+ &info.ext_info);
+
+ if (!ret) {
+ if (copy_to_user(uarg, &info, sizeof(info)))
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case MC_IO_YIELD:
+ ret = mc_yield();
+ break;
+
+ case MC_IO_NSIQ:
+ ret = mc_nsiq();
+ break;
+
+ case MC_IO_LOCK_WSM: {
+ ret = mc_lock_handle(instance, (uint32_t)arg);
+ break;
+ }
+ case MC_IO_UNLOCK_WSM:
+ ret = mc_unlock_handle(instance, (uint32_t)arg);
+ break;
+ case MC_IO_CLEAN_WSM:
+ ret = mc_clean_wsm_mmu(instance);
+ break;
+ case MC_IO_RESOLVE_WSM: {
+ phys_addr_t phys;
+ struct mc_ioctl_resolv_wsm wsm;
+ if (copy_from_user(&wsm, uarg, sizeof(wsm)))
+ return -EFAULT;
+ phys = mc_find_wsm_mmu(instance, wsm.handle, wsm.fd);
+ if (!phys)
+ return -EINVAL;
+
+ wsm.phys = phys;
+ if (copy_to_user(uarg, &wsm, sizeof(wsm)))
+ return -EFAULT;
+ ret = 0;
+ break;
+ }
+ case MC_IO_RESOLVE_CONT_WSM: {
+ struct mc_ioctl_resolv_cont_wsm cont_wsm;
+ phys_addr_t phys = 0;
+ uint32_t len = 0;
+ if (copy_from_user(&cont_wsm, uarg, sizeof(cont_wsm)))
+ return -EFAULT;
+ ret = mc_find_cont_wsm(instance, cont_wsm.handle, cont_wsm.fd,
+ &phys, &len);
+ if (!ret) {
+ cont_wsm.phys = phys;
+ cont_wsm.length = len;
+ if (copy_to_user(uarg, &cont_wsm, sizeof(cont_wsm)))
+ ret = -EFAULT;
+ }
+ break;
+ }
+ case MC_IO_MAP_MCI:{
+ struct mc_ioctl_map map;
+ phys_addr_t phys_addr;
+ if (copy_from_user(&map, uarg, sizeof(map)))
+ return -EFAULT;
+
+ map.reused = (ctx.mci_base.phys != 0);
+ phys_addr = get_mci_base_phys(map.len);
+ if (!phys_addr) {
+ MCDRV_DBG_ERROR(mcd, "Failed to setup MCI buffer!");
+ return -EFAULT;
+ }
+ map.phys_addr = 0;
+ if (copy_to_user(uarg, &map, sizeof(map)))
+ ret = -EFAULT;
+ ret = 0;
+ break;
+ }
+ case MC_IO_LOG_SETUP: {
+#ifdef MC_MEM_TRACES
+ ret = mobicore_log_setup();
+#endif
+ break;
+ }
+
+ /* The rest is handled commonly by user IOCTL */
+ default:
+ ret = mc_fd_user_ioctl(file, cmd, arg);
+ } /* end switch(cmd) */
+
+#ifdef MC_MEM_TRACES
+ mobicore_log_read();
+#endif
+
+ return (int)ret;
+}
+
+/*
+ * mc_fd_read() - This will be called from user space as read(...)
+ * @file: file pointer
+ * @buffer: buffer where to copy to(userspace)
+ * @buffer_len: number of requested data
+ * @pos: not used
+ *
+ * The read function is blocking until a interrupt occurs. In that case the
+ * event counter is copied into user space and the function is finished.
+ *
+ * If OK this function returns the number of copied data otherwise it returns
+ * errno
+ */
+static ssize_t mc_fd_read(struct file *file, char *buffer, size_t buffer_len,
+ loff_t *pos)
+{
+ int ret = 0, ssiq_counter;
+ struct mc_instance *instance = get_instance(file);
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ /* avoid debug output on non-error, because this is call quite often */
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ /* only the MobiCore Daemon is allowed to call this function */
+ if (WARN_ON(!is_daemon(instance))) {
+ MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
+ return -EPERM;
+ }
+
+ if (buffer_len < sizeof(unsigned int)) {
+ MCDRV_DBG_ERROR(mcd, "invalid length");
+ return -EINVAL;
+ }
+
+ for (;;) {
+ if (wait_for_completion_interruptible(&ctx.isr_comp)) {
+ MCDRV_DBG_VERBOSE(mcd, "read interrupted");
+ return -ERESTARTSYS;
+ }
+
+ ssiq_counter = atomic_read(&ctx.isr_counter);
+ MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i",
+ ssiq_counter, ctx.evt_counter);
+
+ if (ssiq_counter != ctx.evt_counter) {
+ /* read data and exit loop without error */
+ ctx.evt_counter = ssiq_counter;
+ ret = 0;
+ break;
+ }
+
+ /* end loop if non-blocking */
+ if (file->f_flags & O_NONBLOCK) {
+ MCDRV_DBG_ERROR(mcd, "non-blocking read");
+ return -EAGAIN;
+ }
+
+ if (signal_pending(current)) {
+ MCDRV_DBG_VERBOSE(mcd, "received signal.");
+ return -ERESTARTSYS;
+ }
+ }
+
+ /* read data and exit loop */
+ ret = copy_to_user(buffer, &ctx.evt_counter, sizeof(unsigned int));
+
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "copy_to_user failed");
+ return -EFAULT;
+ }
+
+ ret = sizeof(unsigned int);
+
+ return (ssize_t)ret;
+}
+
+/*
+ * Initialize a new mobicore API instance object
+ *
+ * @return Instance or NULL if no allocation was possible.
+ */
+struct mc_instance *mc_alloc_instance(void)
+{
+ struct mc_instance *instance;
+
+ instance = kzalloc(sizeof(*instance), GFP_KERNEL);
+ if (instance == NULL)
+ return NULL;
+
+ /* get a unique ID for this instance (PIDs are not unique) */
+ instance->handle = atomic_inc_return(&ctx.instance_counter);
+
+ mutex_init(&instance->lock);
+
+ return instance;
+}
+
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+static ssize_t mc_fd_write(struct file *file, const char __user *buffer,
+ size_t buffer_len, loff_t *x)
+{
+ uint32_t cpu_new;
+ /* we only consider one digit */
+ char buf[2];
+ struct mc_instance *instance = get_instance(file);
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ /* Invalid data, nothing to do */
+ if (buffer_len < 1)
+ return -EINVAL;
+
+ /* Invalid data, nothing to do */
+ if (copy_from_user(buf, buffer, min(sizeof(buf), buffer_len)))
+ return -EFAULT;
+
+ if (buf[0] == 'n') {
+ mc_nsiq();
+ /* If it's a digit then switch cores */
+ } else if ((buf[0] >= '0') && (buf[0] <= '9')) {
+ cpu_new = buf[0] - '0';
+ if (cpu_new <= 8) {
+ MCDRV_DBG_VERBOSE(mcd, "Set Active Cpu: %d\n", cpu_new);
+ mc_switch_core(cpu_new);
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return buffer_len;
+}
+#endif
+
+/*
+ * Release a mobicore instance object and all objects related to it
+ * @instance: instance
+ * Returns 0 if Ok or -E ERROR
+ */
+int mc_release_instance(struct mc_instance *instance)
+{
+ struct mc_buffer *buffer, *tmp;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&instance->lock);
+ mc_clear_mmu_tables(instance);
+
+ mutex_lock(&ctx.cont_bufs_lock);
+ /* release all mapped data */
+
+ /* Check if some buffers are orphaned. */
+ list_for_each_entry_safe(buffer, tmp, &ctx.cont_bufs, list) {
+ /* It's safe here to only call free_buffer() without unmapping
+ * because mmap() takes a refcount to the file's fd so only
+ * time we end up here is when everything has been unmapped or
+ * the process called exit() */
+ if (buffer->instance == instance) {
+ buffer->instance = NULL;
+ free_buffer(buffer);
+ }
+ }
+ mutex_unlock(&ctx.cont_bufs_lock);
+
+ mutex_unlock(&instance->lock);
+
+ /* release instance context */
+ kfree(instance);
+
+ return 0;
+}
+
+/*
+ * mc_fd_user_open() - Will be called from user space as fd = open(...)
+ * A set of internal instance data are created and initialized.
+ *
+ * @inode
+ * @file
+ * Returns 0 if OK or -ENOMEM if no allocation was possible.
+ */
+static int mc_fd_user_open(struct inode *inode, struct file *file)
+{
+ struct mc_instance *instance;
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ instance = mc_alloc_instance();
+ if (instance == NULL)
+ return -ENOMEM;
+
+ /* store instance data reference */
+ file->private_data = instance;
+
+ return 0;
+}
+
+static int mc_fd_admin_open(struct inode *inode, struct file *file)
+{
+ struct mc_instance *instance;
+
+ /*
+ * The daemon is already set so we can't allow anybody else to open
+ * the admin interface.
+ */
+ if (ctx.daemon_inst) {
+ MCDRV_DBG_ERROR(mcd, "Daemon is already connected");
+ return -EPERM;
+ }
+ /* Setup the usual variables */
+ if (mc_fd_user_open(inode, file))
+ return -ENOMEM;
+ instance = get_instance(file);
+
+ MCDRV_DBG(mcd, "accept this as MobiCore Daemon");
+
+ ctx.daemon_inst = instance;
+ ctx.daemon = current;
+ instance->admin = true;
+ init_completion(&ctx.isr_comp);
+ /* init ssiq event counter */
+ ctx.evt_counter = atomic_read(&(ctx.isr_counter));
+
+ return 0;
+}
+
+/*
+ * mc_fd_release() - This function will be called from user space as close(...)
+ * The instance data are freed and the associated memory pages are unreserved.
+ *
+ * @inode
+ * @file
+ *
+ * Returns 0
+ */
+static int mc_fd_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ struct mc_instance *instance = get_instance(file);
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ /* check if daemon closes us. */
+ if (is_daemon(instance)) {
+ MCDRV_DBG_WARN(mcd, "MobiCore Daemon died");
+ ctx.daemon_inst = NULL;
+ ctx.daemon = NULL;
+ }
+
+ ret = mc_release_instance(instance);
+
+ /*
+ * ret is quite irrelevant here as most apps don't care about the
+ * return value from close() and it's quite difficult to recover
+ */
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ return (int)ret;
+}
+
+/*
+ * This function represents the interrupt function of the mcDrvModule.
+ * It signals by incrementing of an event counter and the start of the read
+ * waiting queue, the read function a interrupt has occurred.
+ */
+static irqreturn_t mc_ssiq_isr(int intr, void *context)
+{
+ /* increment interrupt event counter */
+ atomic_inc(&(ctx.isr_counter));
+
+ /* signal the daemon */
+ complete(&ctx.isr_comp);
+#ifdef MC_MEM_TRACES
+ mobicore_log_read();
+#endif
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+uint8_t trustonic_swd_debug;
+static ssize_t debugfs_read(struct file *filep, char __user *buf, size_t len, loff_t *ppos)
+{
+ char mybuf[2];
+
+ if (*ppos != 0)
+ return 0;
+ mybuf[0] = trustonic_swd_debug + '0';
+ mybuf[1] = '\n';
+ if (copy_to_user(buf, mybuf + *ppos, 2))
+ return -EFAULT;
+ *ppos = 2;
+ return 2;
+}
+
+static ssize_t debugfs_write(struct file *filep, const char __user *buf, size_t len, loff_t *ppos)
+{
+ uint8_t val=0;
+
+ if (len >=2) {
+ if (!copy_from_user(&val, &buf[0], 1))
+ if (val >= '0' && val <= '9') {
+ trustonic_swd_debug = val - '0';
+ }
+ return len;
+ }
+
+ return -EFAULT;
+}
+
+const struct file_operations debug_fops = {
+ .read = debugfs_read,
+ .write = debugfs_write
+};
+#endif
+
+/* function table structure of this device driver. */
+static const struct file_operations mc_admin_fops = {
+ .owner = THIS_MODULE,
+ .open = mc_fd_admin_open,
+ .release = mc_fd_release,
+ .unlocked_ioctl = mc_fd_admin_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mc_fd_admin_ioctl,
+#endif
+ .mmap = mc_fd_mmap,
+ .read = mc_fd_read,
+};
+
+/* function table structure of this device driver. */
+static const struct file_operations mc_user_fops = {
+ .owner = THIS_MODULE,
+ .open = mc_fd_user_open,
+ .release = mc_fd_release,
+ .unlocked_ioctl = mc_fd_user_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mc_fd_user_ioctl,
+#endif
+ .mmap = mc_fd_mmap,
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+ .write = mc_fd_write,
+#endif
+};
+
+static int create_devices(void)
+{
+ int ret = 0;
+
+ cdev_init(&mc_admin_cdev, &mc_admin_fops);
+ cdev_init(&mc_user_cdev, &mc_user_fops);
+
+ mc_device_class = class_create(THIS_MODULE, "mobicore");
+ if (IS_ERR(mc_device_class)) {
+ MCDRV_DBG_ERROR(mcd, "failed to create device class");
+ ret = PTR_ERR(mc_device_class);
+ goto out;
+ }
+
+ ret = alloc_chrdev_region(&mc_dev_admin, 0, MC_DEV_MAX, "mobicore");
+ if (ret < 0) {
+ MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region");
+ goto error;
+ }
+ mc_dev_user = MKDEV(MAJOR(mc_dev_admin), 1);
+
+ MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_admin));
+
+ /* First the ADMIN node */
+ ret = cdev_add(&mc_admin_cdev, mc_dev_admin, 1);
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "admin device register failed");
+ goto error;
+ }
+ mc_admin_cdev.owner = THIS_MODULE;
+ device_create(mc_device_class, NULL, mc_dev_admin, NULL,
+ MC_ADMIN_DEVNODE);
+
+ /* Then the user node */
+
+ ret = cdev_add(&mc_user_cdev, mc_dev_user, 1);
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "user device register failed");
+ goto error_unregister;
+ }
+ mc_user_cdev.owner = THIS_MODULE;
+ device_create(mc_device_class, NULL, mc_dev_user, NULL,
+ MC_USER_DEVNODE);
+
+ goto out;
+error_unregister:
+ device_destroy(mc_device_class, mc_dev_admin);
+ device_destroy(mc_device_class, mc_dev_user);
+
+ cdev_del(&mc_admin_cdev);
+ cdev_del(&mc_user_cdev);
+ unregister_chrdev_region(mc_dev_admin, MC_DEV_MAX);
+error:
+ class_destroy(mc_device_class);
+out:
+ return ret;
+}
+
+/*
+ * This function is called the kernel during startup or by a insmod command.
+ * This device is installed and registered as cdev, then interrupt and
+ * queue handling is set up
+ */
+static unsigned int mobicore_irq_id = MC_INTR_SSIQ;
+static int __init mobicore_init(void)
+{
+ int ret = 0;
+ dev_set_name(mcd, "mcd");
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+ struct dentry *debug_root;
+#endif
+#ifdef CONFIG_OF
+ struct device_node *node;
+#if 0
+ unsigned int irq_info[3] = {0, 0, 0};
+#endif
+#endif
+
+ /* Do not remove or change the following trace.
+ * The string "MobiCore" is used to detect if <t-base is in of the image
+ */
+ dev_info(mcd, "MobiCore Driver, Build: " "\n");
+ dev_info(mcd, "MobiCore mcDrvModuleApi version is %i.%i\n",
+ MCDRVMODULEAPI_VERSION_MAJOR,
+ MCDRVMODULEAPI_VERSION_MINOR);
+#ifdef MOBICORE_COMPONENT_BUILD_TAG
+ dev_info(mcd, "MobiCore %s\n", MOBICORE_COMPONENT_BUILD_TAG);
+#endif
+ /* Hardware does not support ARM TrustZone -> Cannot continue! */
+ if (!has_security_extensions()) {
+ MCDRV_DBG_ERROR(mcd,
+ "Hardware doesn't support ARM TrustZone!");
+ return -ENODEV;
+ }
+
+ /* Running in secure mode -> Cannot load the driver! */
+ if (is_secure_mode()) {
+ MCDRV_DBG_ERROR(mcd, "Running in secure MODE!");
+ return -ENODEV;
+ }
+
+ ret = mc_fastcall_init(&ctx);
+ if (ret)
+ goto error;
+
+ init_completion(&ctx.isr_comp);
+
+ /* initialize event counter for signaling of an IRQ to zero */
+ atomic_set(&ctx.isr_counter, 0);
+
+#ifdef CONFIG_OF
+ node = of_find_compatible_node(NULL, NULL, "trustonic,mobicore");
+#if 0
+ if (of_property_read_u32_array(node, "interrupts", irq_info, ARRAY_SIZE(irq_info))) {
+ MCDRV_DBG_ERROR(mcd,
+ "Fail to get SSIQ id from device tree!");
+ return -ENODEV;
+ }
+ mobicore_irq_id = irq_info[1];
+#else
+ mobicore_irq_id = irq_of_parse_and_map(node, 0);
+#endif
+ MCDRV_DBG_VERBOSE(mcd, "Interrupt from device tree is %d\n", mobicore_irq_id);
+#endif
+
+ /* set up S-SIQ interrupt handler ************************/
+ ret = request_irq(mobicore_irq_id, mc_ssiq_isr, IRQF_TRIGGER_RISING,
+ MC_ADMIN_DEVNODE, &ctx);
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "interrupt request failed");
+ goto err_req_irq;
+ }
+
+#ifdef MC_PM_RUNTIME
+ ret = mc_pm_initialize(&ctx);
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "Power Management init failed!");
+ goto free_isr;
+ }
+#endif
+
+ ret = create_devices();
+ if (ret != 0)
+ goto free_pm;
+
+ ret = mc_init_mmu_tables();
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ ret = mc_pm_clock_initialize();
+#endif
+
+ /*
+ * initialize unique number counters which we can use for
+ * handles. We start with 1 instead of 0.
+ */
+ atomic_set(&ctx.handle_counter, 1);
+ atomic_set(&ctx.instance_counter, 1);
+
+ /* init list for contiguous buffers */
+ INIT_LIST_HEAD(&ctx.cont_bufs);
+
+ /* init lock for the buffers list */
+ mutex_init(&ctx.cont_bufs_lock);
+
+ memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
+ MCDRV_DBG(mcd, "initialized");
+#ifdef CONFIG_MT_TRUSTONIC_TEE_DEBUGFS
+ debug_root = debugfs_create_dir("trustonic", NULL);
+ if (debug_root) {
+ if (!debugfs_create_file("swd_debug", 0644, debug_root, NULL, &debug_fops)) {
+ MCDRV_DBG_ERROR(mcd, "Create trustonic debugfs swd_debug failed!");
+ }
+ } else {
+ MCDRV_DBG_ERROR(mcd, "Create trustonic debugfs directory failed!");
+ }
+#endif
+ return 0;
+
+free_pm:
+#ifdef MC_PM_RUNTIME
+ mc_pm_free();
+free_isr:
+#endif
+ free_irq(MC_INTR_SSIQ, &ctx);
+err_req_irq:
+ mc_fastcall_destroy();
+error:
+ return ret;
+}
+
+/*
+ * This function removes this device driver from the Linux device manager .
+ */
+static void __exit mobicore_exit(void)
+{
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+#ifdef MC_MEM_TRACES
+ mobicore_log_free();
+#endif
+
+ mc_release_mmu_tables();
+
+#ifdef MC_PM_RUNTIME
+ mc_pm_free();
+#endif
+
+ device_destroy(mc_device_class, mc_dev_admin);
+ device_destroy(mc_device_class, mc_dev_user);
+ class_destroy(mc_device_class);
+ unregister_chrdev_region(mc_dev_admin, MC_DEV_MAX);
+
+ free_irq(mobicore_irq_id, &ctx);
+
+ mc_fastcall_destroy();
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ mc_pm_clock_finalize();
+#endif
+
+ MCDRV_DBG_VERBOSE(mcd, "exit");
+}
+
+bool mc_sleep_ready(void)
+{
+#ifdef MC_PM_RUNTIME
+ return mc_pm_sleep_ready();
+#else
+ return true;
+#endif
+}
+
+/* Linux Driver Module Macros */
+module_init(mobicore_init);
+module_exit(mobicore_exit);
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MobiCore driver");
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.h
new file mode 100644
index 000000000..23c8fea16
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/main.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_MAIN_H_
+#define _MC_MAIN_H_
+
+#include <asm/pgtable.h>
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+
+#include "public/mc_linux.h"
+/* Platform specific settings */
+#include "platform.h"
+
+#define MC_VERSION(major, minor) \
+ (((major & 0x0000ffff) << 16) | (minor & 0x0000ffff))
+
+/* Instance data for MobiCore Daemon and TLCs. */
+struct mc_instance {
+ /* lock for the instance */
+ struct mutex lock;
+ /* unique handle */
+ unsigned int handle;
+ bool admin;
+};
+
+/*
+ * Contiguous buffer allocated to TLCs.
+ * These buffers are uses as world shared memory (wsm) and shared with
+ * secure world.
+ * The virtual kernel address is added for a simpler search algorithm.
+ */
+struct mc_buffer {
+ struct list_head list;
+ /* unique handle */
+ unsigned int handle;
+ /* Number of references kept to this buffer */
+ atomic_t usage;
+ /* virtual Kernel start address */
+ void *addr;
+ /* virtual Userspace start address */
+ void *uaddr;
+ /* physical start address */
+ phys_addr_t phys;
+ /* order of number of pages */
+ unsigned int order;
+ uint32_t len;
+ struct mc_instance *instance;
+};
+
+/* MobiCore Driver Kernel Module context data. */
+struct mc_context {
+ /* MobiCore MCI information */
+ struct mc_buffer mci_base;
+ /* MobiCore MCP buffer */
+ struct mc_mcp_buffer *mcp;
+ /* event completion */
+ struct completion isr_comp;
+ /* isr event counter */
+ unsigned int evt_counter;
+ atomic_t isr_counter;
+ /* ever incrementing counters */
+ atomic_t handle_counter;
+ atomic_t instance_counter;
+ /* pointer to instance of daemon */
+ struct mc_instance *daemon_inst;
+ /* pointer to instance of daemon */
+ struct task_struct *daemon;
+ /* General list of contiguous buffers allocated by the kernel */
+ struct list_head cont_bufs;
+ /* Lock for the list of contiguous buffers */
+ struct mutex cont_bufs_lock;
+};
+
+struct mc_sleep_mode {
+ uint16_t sleep_req;
+ uint16_t ready_to_sleep;
+};
+
+/* MobiCore is idle. No scheduling required. */
+#define SCHEDULE_IDLE 0
+/* MobiCore is non idle, scheduling is required. */
+#define SCHEDULE_NON_IDLE 1
+
+/* MobiCore status flags */
+struct mc_flags {
+ /*
+ * Scheduling hint: if <> SCHEDULE_IDLE, MobiCore should
+ * be scheduled by the NWd
+ */
+ uint32_t schedule;
+ /* State of sleep protocol */
+ struct mc_sleep_mode sleep_mode;
+ /* Reserved for future use: Must not be interpreted */
+ uint32_t rfu[2];
+};
+
+/* MCP buffer structure */
+struct mc_mcp_buffer {
+ /* MobiCore Flags */
+ struct mc_flags flags;
+ uint32_t rfu; /* MCP message buffer - ignore */
+};
+
+/* check if caller is MobiCore Daemon */
+static inline bool is_daemon(struct mc_instance *instance)
+{
+ if (!instance)
+ return false;
+ return instance->admin;
+}
+
+
+/* Initialize a new mobicore API instance object */
+struct mc_instance *mc_alloc_instance(void);
+/* Release a mobicore instance object and all objects related to it */
+int mc_release_instance(struct mc_instance *instance);
+
+/*
+ * mc_register_wsm_mmu() - Create a MMU table from a virtual memory buffer which
+ * can be vmalloc or user space virtual memory
+ */
+int mc_register_wsm_mmu(struct mc_instance *instance,
+ void *buffer, uint32_t len,
+ uint32_t *handle, phys_addr_t *phys);
+/* Unregister the buffer mapped above */
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle);
+
+/* Allocate one mc_buffer of contiguous space */
+int mc_get_buffer(struct mc_instance *instance,
+ struct mc_buffer **buffer, unsigned long len);
+/* Free the buffer allocated above */
+int mc_free_buffer(struct mc_instance *instance, uint32_t handle);
+
+/* Check if the other end of the fd owns instance */
+bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd);
+
+/* Get a unique handle */
+uint32_t mc_get_new_handle(void);
+
+/* Test if sleep is possible */
+bool mc_sleep_ready(void);
+
+#endif /* _MC_MAIN_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.c
new file mode 100644
index 000000000..d65a91fee
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.c
@@ -0,0 +1,813 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ */
+#include "main.h"
+#include "debug.h"
+#include "mem.h"
+
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+
+#ifdef LPAE_SUPPORT
+#define MMU_TYPE_PAGE (3 << 0)
+#define MMU_BUFFERABLE (1 << 2) /* AttrIndx[0] */
+#define MMU_CACHEABLE (1 << 3) /* AttrIndx[1] */
+#define MMU_NS (1 << 5)
+#define MMU_AP_RW_ALL (1 << 6) /* AP[2:1], RW, at any privilege level */
+#define MMU_EXT_SHARED (3 << 8) /* SH[1:0], inner shareable */
+#define MMU_EXT_AF (1 << 10) /* Access Flag */
+#define MMU_EXT_NG (1 << 11)
+#define MMU_EXT_XN (((uint64_t)1) << 54) /* XN */
+#define MMU_PHYS_MASK (((1ULL << 40) - 1) & ~((1ULL << 12) - 1))
+#else
+#define MMU_TYPE_EXT (3 << 0) /* v5 */
+#define MMU_TYPE_SMALL (2 << 0)
+#define MMU_BUFFERABLE (1 << 2)
+#define MMU_CACHEABLE (1 << 3)
+#define MMU_EXT_AP0 (1 << 4)
+#define MMU_EXT_AP1 (2 << 4)
+#define MMU_EXT_TEX(x) ((x) << 6) /* v5 */
+#define MMU_EXT_SHARED (1 << 10) /* v6 */
+#define MMU_EXT_NG (1 << 11) /* v6 */
+#define MMU_PHYS_MASK 0xFFFFF000
+#endif
+
+/* MobiCore memory context data */
+struct mc_mem_context mem_ctx;
+
+static inline void release_page(struct page *page)
+{
+ set_bit(PG_dirty, &page->flags);
+
+ page_cache_release(page);
+}
+
+static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
+ int pages_no, struct page **pages)
+{
+ int locked_pages;
+
+ /* lock user pages, must hold the mmap_sem to do this. */
+ down_read(&(task->mm->mmap_sem));
+ locked_pages = get_user_pages(
+ task,
+ task->mm,
+ (unsigned long)virt_start_page_addr,
+ pages_no,
+ 1, /* write access */
+ 0,
+ pages,
+ NULL);
+ up_read(&(task->mm->mmap_sem));
+
+ /* check if we could lock all pages. */
+ if (locked_pages != pages_no) {
+ MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
+ locked_pages);
+ if (locked_pages > 0) {
+ /* release all locked pages. */
+ release_pages(pages, locked_pages, 0);
+ }
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Get kernel pointer to shared MMU table given a per-process reference */
+static void *get_mmu_table_kernel_virt(struct mc_mmu_table *table)
+{
+ if (WARN(!table, "Invalid MMU table"))
+ return NULL;
+
+ if (WARN(!table->set, "Invalid MMU table set"))
+ return NULL;
+
+ if (WARN(!table->set->kernel_virt, "Invalid MMU pointer"))
+ return NULL;
+
+ return &(table->set->kernel_virt->table[table->idx]);
+}
+
+/*
+ * Search the list of used MMU tables and return the one with the handle.
+ * Assumes the table_lock is taken.
+ */
+struct mc_mmu_table *find_mmu_table(unsigned int handle)
+{
+ struct mc_mmu_table *table;
+
+ list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+ if (table->handle == handle)
+ return table;
+ }
+ return NULL;
+}
+
+/*
+ * Search the list of used MMU tables and return the one with the handle.
+ * Assumes the table_lock is taken.
+ */
+struct mc_mmu_table *find_mmu_table_by_phys_addr(phys_addr_t phys)
+{
+ struct mc_mmu_table *table;
+
+ list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+ if (table->phys == phys)
+ return table;
+ }
+ return NULL;
+}
+
+
+/*
+ * Allocate a new MMU table store plus MMU_TABLES_PER_PAGE in the MMU free
+ * tables list. Assumes the table_lock is already taken by the caller above.
+ */
+static int alloc_mmu_table_store(void)
+{
+ unsigned long store;
+ struct mc_mmu_tables_set *mmutable_set;
+ struct mc_mmu_table *mmutable, *mmutable2;
+ struct page *page;
+ int ret = 0, i;
+ /* temp list for holding the MMU tables */
+ LIST_HEAD(temp);
+
+ store = get_zeroed_page(GFP_KERNEL);
+ if (!store)
+ return -ENOMEM;
+
+ /*
+ * Actually, locking is not necessary, because kernel
+ * memory is not supposed to get swapped out. But we
+ * play safe....
+ */
+ page = virt_to_page(store);
+ set_bit(PG_reserved, &page->flags);
+
+ /* add all the descriptors to the free descriptors list */
+ mmutable_set = kmalloc(sizeof(*mmutable_set), GFP_KERNEL | __GFP_ZERO);
+ if (mmutable_set == NULL) {
+ ret = -ENOMEM;
+ goto free_store;
+ }
+ /* initialize */
+ mmutable_set->kernel_virt = (void *)store;
+ mmutable_set->page = page;
+ mmutable_set->phys = virt_to_phys((void *)store);
+ /* the set is not yet used */
+ atomic_set(&mmutable_set->used_tables, 0);
+
+ /* init add to list. */
+ INIT_LIST_HEAD(&(mmutable_set->list));
+ list_add(&mmutable_set->list, &mem_ctx.mmu_tables_sets);
+
+ for (i = 0; i < MMU_TABLES_PER_PAGE; i++) {
+ /* allocate a WSM MMU descriptor */
+ mmutable = kmalloc(sizeof(*mmutable), GFP_KERNEL | __GFP_ZERO);
+ if (mmutable == NULL) {
+ ret = -ENOMEM;
+ MCDRV_DBG_ERROR(mcd, "out of memory");
+ /* Free the full temp list and the store in this case */
+ goto free_temp_list;
+ }
+
+ /* set set reference */
+ mmutable->set = mmutable_set;
+ mmutable->idx = i;
+ mmutable->virt = get_mmu_table_kernel_virt(mmutable);
+ mmutable->phys = mmutable_set->phys+i*sizeof(struct mmutable);
+ atomic_set(&mmutable->usage, 0);
+
+ /* add to temp list. */
+ INIT_LIST_HEAD(&mmutable->list);
+ list_add_tail(&mmutable->list, &temp);
+ }
+
+ /*
+ * If everything went ok then merge the temp list with the global
+ * free list
+ */
+ list_splice_tail(&temp, &mem_ctx.free_mmu_tables);
+ return 0;
+free_temp_list:
+ list_for_each_entry_safe(mmutable, mmutable2, &temp, list) {
+ kfree(mmutable);
+ }
+
+ list_del(&mmutable_set->list);
+
+free_store:
+ free_page(store);
+ return ret;
+}
+
+/*
+ * Get a MMU table from the free tables list or allocate a new one and
+ * initialize it. Assumes the table_lock is already taken.
+ */
+static struct mc_mmu_table *alloc_mmu_table(struct mc_instance *instance)
+{
+ int ret = 0;
+ struct mc_mmu_table *table = NULL;
+
+ if (list_empty(&mem_ctx.free_mmu_tables)) {
+ ret = alloc_mmu_table_store();
+ if (ret) {
+ MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
+ return ERR_PTR(-ENOMEM);
+ }
+ /* if it's still empty something wrong has happened */
+ if (list_empty(&mem_ctx.free_mmu_tables)) {
+ MCDRV_DBG_ERROR(mcd,
+ "Free list not updated correctly!");
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ /* get a WSM MMU descriptor */
+ table = list_first_entry(&mem_ctx.free_mmu_tables,
+ struct mc_mmu_table, list);
+ if (table == NULL) {
+ MCDRV_DBG_ERROR(mcd, "out of memory");
+ return ERR_PTR(-ENOMEM);
+ }
+ /* Move it to the used MMU tables list */
+ list_move_tail(&table->list, &mem_ctx.mmu_tables);
+
+ table->handle = mc_get_new_handle();
+ table->owner = instance;
+
+ atomic_inc(&table->set->used_tables);
+ atomic_inc(&table->usage);
+
+ MCDRV_DBG_VERBOSE(mcd,
+ "chunkPhys=0x%llX, idx=%d, usage=%d, owner=%p",
+ (u64)table->set->phys, table->idx,
+ atomic_read(&(table->usage)), table->owner);
+
+ return table;
+}
+
+/*
+ * Frees the object associated with a MMU table. Initially the object is moved
+ * to the free tables list, but if all the 4 lists of the store are free
+ * then the store is also released.
+ * Assumes the table_lock is already taken.
+ */
+static void free_mmu_table(struct mc_mmu_table *table)
+{
+ struct mc_mmu_tables_set *mmutable_set;
+
+ if (WARN(!table, "Invalid table"))
+ return;
+
+ mmutable_set = table->set;
+ if (WARN(!mmutable_set, "Invalid table set"))
+ return;
+
+ list_move_tail(&table->list, &mem_ctx.free_mmu_tables);
+
+ /* if nobody uses this set, we can release it. */
+ if (atomic_dec_and_test(&mmutable_set->used_tables)) {
+ struct mc_mmu_table *tmp;
+
+ /* remove from list */
+ list_del(&mmutable_set->list);
+ /*
+ * All the MMU tables are in the free list for this set
+ * so we can just remove them from there
+ */
+ list_for_each_entry_safe(table, tmp, &mem_ctx.free_mmu_tables,
+ list) {
+ if (table->set == mmutable_set) {
+ list_del(&table->list);
+ kfree(table);
+ }
+ } /* end while */
+
+ /*
+ * We shouldn't recover from this since it was some data
+ * corruption before
+ */
+ BUG_ON(!mmutable_set->page);
+ clear_bit(PG_reserved, &(mmutable_set->page)->flags);
+
+
+ BUG_ON(!mmutable_set->kernel_virt);
+ free_page((unsigned long)mmutable_set->kernel_virt);
+
+ kfree(mmutable_set);
+ }
+}
+
+/*
+ * Create a MMU table in a WSM container that has been allocates previously.
+ * Assumes the table lock is already taken or there is no need to take like
+ * when first creating the MMU table the full list is locked.
+ *
+ * @task pointer to task owning WSM
+ * @wsm_buffer user space WSM start
+ * @wsm_len WSM length
+ * @table Pointer to MMU table details
+ */
+static int map_buffer(struct task_struct *task, void *wsm_buffer,
+ unsigned int wsm_len, struct mc_mmu_table *table)
+{
+ int ret = 0;
+ unsigned int i, nr_of_pages;
+ /* start address of the 4 KiB page of wsm_buffer */
+ void *virt_addr_page;
+ struct page *page;
+ struct mmutable *mmutable;
+ struct page **mmutable_as_array_of_pointers_to_page;
+ /* page offset in wsm buffer */
+ unsigned int offset;
+
+ if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
+ return -EINVAL;
+
+ if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
+ return -EINVAL;
+
+ if (WARN(!table, "Invalid mapping table for WSM"))
+ return -EINVAL;
+
+ /* no size > 1Mib supported */
+ if (wsm_len > SZ_1M) {
+ MCDRV_DBG_ERROR(mcd, "size > 1 MiB");
+ return -EINVAL;
+ }
+
+ MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x", wsm_buffer,
+ wsm_len);
+
+ /* calculate page usage */
+ virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
+ offset = (unsigned int) (((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
+ nr_of_pages = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
+
+ MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d",
+ virt_addr_page, nr_of_pages);
+
+ /* MMU table can hold max 1MiB in 256 pages. */
+ if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
+ MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB");
+ return -EINVAL;
+ }
+
+ mmutable = table->virt;
+ /*
+ * We use the memory for the MMU table to hold the pointer
+ * and convert them later. This works, as everything comes
+ * down to a 32 bit value.
+ */
+ mmutable_as_array_of_pointers_to_page = (struct page **)mmutable;
+
+ /* Request comes from user space */
+ if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
+ /*
+ * lock user page in memory, so they do not get swapped
+ * out.
+ * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
+ * function, maybe it is called fast_gup() in some versions.
+ * handle user process doing a fork().
+ * Child should not get things.
+ * http://osdir.com/ml/linux-media/2009-07/msg00813.html
+ * http://lwn.net/Articles/275808/
+ */
+ ret = lock_pages(task, virt_addr_page, nr_of_pages,
+ mmutable_as_array_of_pointers_to_page);
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed");
+ return ret;
+ }
+ }
+ /* Request comes from kernel space(cont buffer) */
+ else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
+ void *uaddr = wsm_buffer;
+ for (i = 0; i < nr_of_pages; i++) {
+ page = virt_to_page(uaddr);
+ if (!page) {
+ MCDRV_DBG_ERROR(mcd, "failed to map address");
+ return -EINVAL;
+ }
+ get_page(page);
+ mmutable_as_array_of_pointers_to_page[i] = page;
+ uaddr += PAGE_SIZE;
+ }
+ }
+ /* Request comes from kernel space(vmalloc buffer) */
+ else {
+ void *uaddr = wsm_buffer;
+ for (i = 0; i < nr_of_pages; i++) {
+ page = vmalloc_to_page(uaddr);
+ if (!page) {
+ MCDRV_DBG_ERROR(mcd, "failed to map address");
+ return -EINVAL;
+ }
+ get_page(page);
+ mmutable_as_array_of_pointers_to_page[i] = page;
+ uaddr += PAGE_SIZE;
+ }
+ }
+
+ table->pages = nr_of_pages;
+
+ /*
+ * create MMU Table entries.
+ * used_mmutable->table contains a list of page pointers here.
+ * For a proper cleanup we have to ensure that the following
+ * code either works and used_mmutable contains a valid MMU table
+ * - or fails and used_mmutable->table contains the list of page
+ * pointers.
+ * Any mixed contents will make cleanup difficult.
+ */
+#if defined(CONFIG_ARM64) && !defined(LPAE_SUPPORT)
+ /*
+ * When NWd pointers are 64bits and SWd pte 32bits we need to fill the
+ * table from 0.
+ */
+ i = 0;
+#else
+ /*
+ * Fill the table in reverse order as the table is used as input and
+ * output.
+ */
+ i = MC_ARM_MMU_TABLE_ENTRIES-1;
+#endif
+ do {
+ if (i < nr_of_pages) {
+#ifdef LPAE_SUPPORT
+ uint64_t pte;
+#else
+ uint32_t pte;
+#endif
+ page = mmutable_as_array_of_pointers_to_page[i];
+
+ if (!page) {
+ MCDRV_DBG_ERROR(mcd, "page address is null");
+ return -EFAULT;
+ }
+ /*
+ * create MMU table entry, see ARM MMU docu for details
+ * about flags stored in the lowest 12 bits.
+ * As a side reference, the Article
+ * "ARM's multiply-mapped memory mess"
+ * found in the collection at
+ * http://lwn.net/Articles/409032/
+ * is also worth reading.
+ */
+ pte = page_to_phys(page);
+#ifdef LPAE_SUPPORT
+ pte |= MMU_EXT_XN
+ | MMU_EXT_NG
+ | MMU_EXT_AF
+ | MMU_AP_RW_ALL
+ | MMU_NS
+ | MMU_CACHEABLE | MMU_BUFFERABLE
+ | MMU_TYPE_PAGE;
+#else
+ pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+ | MMU_CACHEABLE | MMU_BUFFERABLE
+ | MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#endif /* LPAE_SUPPORT */
+ /*
+ * Linux uses different mappings for SMP systems(the
+ * sharing flag is set for the pte. In order not to
+ * confuse things too much in Mobicore make sure the
+ * shared buffers have the same flags.
+ * This should also be done in SWD side
+ */
+#ifdef CONFIG_SMP
+#ifdef LPAE_SUPPORT
+ pte |= MMU_EXT_SHARED;
+#else
+ pte |= MMU_EXT_SHARED | MMU_EXT_TEX(1);
+#endif /* LPAE_SUPPORT */
+#endif /* CONFIG_SMP */
+
+ mmutable->table_entries[i] = pte;
+ MCDRV_DBG_VERBOSE(mcd, "MMU entry %d: 0x%llx, virt %p",
+ i, (u64)(pte), page);
+ MCDRV_DBG_VERBOSE(mcd, "MMU entry: virt %p",
+ phys_to_virt(pte&MMU_PHYS_MASK));
+ } else {
+ /* ensure rest of table is empty */
+ mmutable->table_entries[i] = 0;
+ }
+#if defined(CONFIG_ARM64) && !defined(LPAE_SUPPORT)
+ } while (++i < MC_ARM_MMU_TABLE_ENTRIES);
+#else
+ } while (i-- != 0);
+#endif
+
+ return ret;
+}
+
+/*
+ * Remove a MMU table in a WSM container. Afterwards the container may be
+ * released. Assumes the table_lock and the lock is taken.
+ */
+static void unmap_buffers(struct mc_mmu_table *table)
+{
+ struct mmutable *mmutable;
+ int i;
+
+ if (WARN_ON(!table))
+ return;
+
+ /* found the table, now release the resources. */
+ MCDRV_DBG_VERBOSE(mcd,
+ "releasing %d referenced pages of table phys=0x%llX",
+ table->pages, (u64)table->phys);
+
+ mmutable = table->virt;
+
+ /* release all locked user space pages */
+ for (i = 0; i < table->pages; i++) {
+ /* convert physical entries from MMU table to page pointers */
+ struct page *page = pte_page(mmutable->table_entries[i]);
+ MCDRV_DBG_VERBOSE(mcd, "MMU entry %d: 0x%llx, virt %p", i,
+ (u64)(mmutable->table_entries[i]), page);
+ BUG_ON(!page);
+ release_page(page);
+ }
+
+ /* remember that all pages have been freed */
+ table->pages = 0;
+}
+
+/* Delete a used MMU table. Assumes the table_lock and the lock is taken */
+static void unmap_mmu_table(struct mc_mmu_table *table)
+{
+ /* Check if it's not locked by other processes too! */
+ if (!atomic_dec_and_test(&table->usage)) {
+ MCDRV_DBG_VERBOSE(mcd,
+ "table phys=%llx still in use (usage now %d)",
+ (u64)table->phys,
+ atomic_read(&(table->usage)));
+ return;
+ }
+
+ /* release if Nwd and Swd/MC do no longer use it. */
+ unmap_buffers(table);
+ free_mmu_table(table);
+}
+
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+ struct mc_mmu_table *table;
+ struct mc_mmu_table *table2;
+ int ret = 0;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = find_mmu_table(handle);
+
+ if (table == NULL) {
+ MCDRV_DBG_VERBOSE(mcd, "entry not found");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ if (instance == table->owner) {
+ /* Prevent double free */
+ table->owner = NULL;
+ } else if (!is_daemon(instance)) {
+ MCDRV_DBG_ERROR(mcd, "instance does not own it");
+ ret = -EPERM;
+ goto err_unlock;
+ }
+
+ /* clean-up in the case of fake L1 table:
+ * we need to unmap all sub-tables and
+ * the buffer referred by the fake table
+ */
+ if (table->type&MC_MMU_TABLE_TYPE_WSM_FAKE_L1) {
+ int i = 0;
+ uint64_t *va;
+ /* first and only record of the fake table
+ * contains physical address of the buffer
+ */
+#ifdef LPAE_SUPPORT
+ uint64_t *pte = (uint64_t *)table->virt;
+#else
+ uint32_t *pte = (uint32_t *)table->virt;
+#endif /* LPAE_SUPPORT */
+ /* convert it to virtual address */
+ va = phys_to_virt((*pte)&MMU_PHYS_MASK);
+ MCDRV_DBG_VERBOSE(mcd, "va = 0x%p", va);
+ /* loop through uin64_t buffer records */
+ if (va != NULL) {
+ while ((va[i] != 0) &&
+ (i < (SZ_4K / sizeof(uint64_t)))) {
+ MCDRV_DBG_VERBOSE(
+ mcd,
+ "phys. addr of sub-table [%d] = 0x%llX",
+ i,
+ (u64)va[i]);
+ table2 = find_mmu_table_by_phys_addr(
+ (phys_addr_t)va[i]);
+ MCDRV_DBG_VERBOSE(mcd, "table2 = 0x%p", table2);
+ if (table2 != NULL)
+ unmap_mmu_table(table2);
+ i++;
+ }
+ free_page((unsigned long)va);
+ }
+ }
+
+ /* free table (if no further locks exist) */
+ unmap_mmu_table(table);
+err_unlock:
+ mutex_unlock(&mem_ctx.table_lock);
+
+ return ret;
+}
+
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+ int ret = 0;
+ struct mc_mmu_table *table = NULL;
+
+ if (WARN(!instance, "No instance data available"))
+ return -EFAULT;
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = find_mmu_table(handle);
+
+ if (table == NULL) {
+ MCDRV_DBG_VERBOSE(mcd, "entry not found %u", handle);
+ ret = -EINVAL;
+ goto table_err;
+ }
+ if (instance != table->owner && !is_daemon(instance)) {
+ MCDRV_DBG_ERROR(mcd, "instance does no own it");
+ ret = -EPERM;
+ goto table_err;
+ }
+
+ /* lock entry */
+ atomic_inc(&table->usage);
+table_err:
+ mutex_unlock(&mem_ctx.table_lock);
+ return ret;
+}
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+ struct task_struct *task, void *wsm_buffer, unsigned int wsm_len,
+ unsigned int type)
+{
+ int ret = 0;
+ struct mc_mmu_table *table;
+
+ if (WARN(!instance, "No instance data available"))
+ return ERR_PTR(-EFAULT);
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = alloc_mmu_table(instance);
+ if (IS_ERR(table)) {
+ MCDRV_DBG_ERROR(mcd, "alloc_mmu_table() failed");
+ ret = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ /* create the MMU page for the WSM */
+ ret = map_buffer(task, wsm_buffer, wsm_len, table);
+
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mcd, "map_buffer() failed");
+ unmap_mmu_table(table);
+ goto err_no_mem;
+ }
+ table->type = type;
+ MCDRV_DBG_VERBOSE(mcd,
+ "mapped buffer %p to table with handle %d @ 0x%llX",
+ wsm_buffer, table->handle, (u64)table->phys);
+
+ mutex_unlock(&mem_ctx.table_lock);
+ return table;
+err_no_mem:
+ mutex_unlock(&mem_ctx.table_lock);
+ return ERR_PTR(ret);
+}
+
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd)
+{
+ phys_addr_t ret = 0;
+ struct mc_mmu_table *table = NULL;
+
+ mutex_lock(&mem_ctx.table_lock);
+ table = find_mmu_table(handle);
+
+ if (table == NULL) {
+ MCDRV_DBG_ERROR(mcd, "entry not found %u", handle);
+ ret = 0;
+ goto table_err;
+ }
+
+ /* It's safe here not to lock the instance since the owner of
+ * the table will be cleared only with the table lock taken */
+ if (!mc_check_owner_fd(table->owner, fd)) {
+ MCDRV_DBG_ERROR(mcd, "not valid owner %u", handle);
+ ret = 0;
+ goto table_err;
+ }
+
+ ret = table->phys;
+table_err:
+ mutex_unlock(&mem_ctx.table_lock);
+ return ret;
+}
+
+void mc_clean_mmu_tables(void)
+{
+ struct mc_mmu_table *table, *tmp;
+
+ mutex_lock(&mem_ctx.table_lock);
+ /* Check if some WSM is orphaned. */
+ list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+ if (table->owner == NULL) {
+ MCDRV_DBG_VERBOSE(mcd,
+ "WSM table phys=0x%llX pages=%d",
+ (u64)table->phys, table->pages);
+ unmap_mmu_table(table);
+ }
+ }
+ mutex_unlock(&mem_ctx.table_lock);
+}
+
+void mc_clear_mmu_tables(struct mc_instance *instance)
+{
+ struct mc_mmu_table *table, *tmp;
+
+ mutex_lock(&mem_ctx.table_lock);
+ /* Check if some WSM is still in use. */
+ list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+ if (table->owner == instance) {
+ MCDRV_DBG_VERBOSE(mcd, "WSM table phys=0x%llX pages=%d",
+ (u64)table->phys, table->pages);
+ /* unlock app usage and free or mark it as orphan */
+ table->owner = NULL;
+ unmap_mmu_table(table);
+ }
+ }
+ mutex_unlock(&mem_ctx.table_lock);
+}
+
+int mc_init_mmu_tables(void)
+{
+ /* init list for WSM MMU chunks. */
+ INIT_LIST_HEAD(&mem_ctx.mmu_tables_sets);
+
+ /* MMU table descriptor list. */
+ INIT_LIST_HEAD(&mem_ctx.mmu_tables);
+
+ /* MMU free table descriptor list. */
+ INIT_LIST_HEAD(&mem_ctx.free_mmu_tables);
+
+ mutex_init(&mem_ctx.table_lock);
+
+ return 0;
+}
+
+void mc_release_mmu_tables(void)
+{
+ struct mc_mmu_table *table;
+ /* Check if some WSM is still in use. */
+ list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+ WARN(1, "WSM MMU still in use: phys=0x%llX ,nr_of_pages=%d",
+ (u64)table->phys, table->pages);
+ }
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.h
new file mode 100644
index 000000000..c4b6715f2
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/mem.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_MEM_H_
+#define _MC_MEM_H_
+
+#ifdef LPAE_SUPPORT
+/*
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 2 MiB by using 512 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES 512
+
+/* ARM level 3 (MMU) table with 512 entries. Size: 4k */
+struct mmutable {
+ uint64_t table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There is 1 table in each page. */
+#define MMU_TABLES_PER_PAGE 1
+#else
+/*
+ * MobiCore specific page tables for world shared memory.
+ * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
+ * MobiCore uses the default ARM format.
+ *
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES 256
+
+/* ARM level 2 (MMU) table with 256 entries. Size: 1k */
+struct mmutable {
+ uint32_t table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There are 4 tables in each page. */
+#define MMU_TABLES_PER_PAGE 4
+#endif
+
+/* mc_mmu_table type flags */
+#define MC_MMU_TABLE_TYPE_WSM_FAKE_L1 1
+
+
+/* Store for four MMU tables in one 4kb page*/
+struct mc_mmu_table_store {
+ struct mmutable table[MMU_TABLES_PER_PAGE];
+};
+
+/* Usage and maintenance information about mc_mmu_table_store */
+struct mc_mmu_tables_set {
+ struct list_head list;
+ /* kernel virtual address */
+ struct mc_mmu_table_store *kernel_virt;
+ /* physical address */
+ phys_addr_t phys;
+ /* pointer to page struct */
+ struct page *page;
+ /* How many pages from this set are used */
+ atomic_t used_tables;
+};
+
+/*
+ * MMU table allocated to the Daemon or a TLC describing a world shared
+ * buffer.
+ * When users map a malloc()ed area into SWd, a MMU table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the MMU table and a handle for this table is returned to the user.
+ */
+struct mc_mmu_table {
+ struct list_head list;
+ /* Table lock */
+ struct mutex lock;
+ /* handle as communicated to user mode */
+ unsigned int handle;
+ /* Number of references kept to this MMU table */
+ atomic_t usage;
+ /* owner of this MMU table */
+ struct mc_instance *owner;
+ /* set describing where our MMU table is stored */
+ struct mc_mmu_tables_set *set;
+ /* index into MMU table set */
+ unsigned int idx;
+ /* size of buffer */
+ unsigned int pages;
+ /* virtual address*/
+ void *virt;
+ /* physical address */
+ phys_addr_t phys;
+ /* type of mmu table */
+ unsigned int type;
+};
+
+/* MobiCore Driver Memory context data. */
+struct mc_mem_context {
+ struct mc_instance *daemon_inst;
+ /* Backing store for MMU tables */
+ struct list_head mmu_tables_sets;
+ /* Bookkeeping for used MMU tables */
+ struct list_head mmu_tables;
+ /* Bookkeeping for free MMU tables */
+ struct list_head free_mmu_tables;
+ /* semaphore to synchronize access to above lists */
+ struct mutex table_lock;
+};
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+ struct task_struct *task, void *wsm_buffer, unsigned int wsm_len,
+ unsigned int type);
+
+/* Delete all the MMU tables associated with an instance */
+void mc_clear_mmu_tables(struct mc_instance *instance);
+
+/* Release all orphaned MMU tables */
+void mc_clean_mmu_tables(void);
+
+/* Delete a used MMU table. */
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/*
+ * Lock a MMU table - the daemon adds +1 to refcount of the MMU table
+ * marking it in use by SWD so it doesn't get released when the TLC dies.
+ */
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/* Return the phys address of MMU table. */
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd);
+/* Release all used MMU tables to Linux memory space */
+void mc_release_mmu_tables(void);
+
+/* Initialize all MMU tables structure */
+int mc_init_mmu_tables(void);
+
+#endif /* _MC_MEM_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.c
new file mode 100644
index 000000000..ad9e9e243
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ */
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/cpu.h>
+#include <linux/moduleparam.h>
+
+
+#include "main.h"
+#include "fastcall.h"
+#include "ops.h"
+#include "mem.h"
+#include "pm.h"
+#include "debug.h"
+
+/* MobiCore context data */
+static struct mc_context *ctx;
+#ifdef TBASE_CORE_SWITCHER
+static uint32_t active_cpu;
+
+#ifdef TEST
+ /*
+ * Normal world <t-base core info for testing.
+ */
+
+ module_param(active_cpu, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ MODULE_PARM_DESC(active_cpu, "Active <t-base Core");
+#endif
+
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+static struct notifier_block mobicore_cpu_notifer = {
+ .notifier_call = mobicore_cpu_callback,
+};
+#endif
+
+static inline long smc(union fc_generic *fc)
+{
+ /* If we request sleep yields must be filtered out as they
+ * make no sense */
+ if (ctx->mcp)
+ if (ctx->mcp->flags.sleep_mode.sleep_req) {
+ if (fc->as_in.cmd == MC_SMC_N_YIELD)
+ return MC_FC_RET_ERR_INVALID;
+ }
+ return _smc(fc);
+}
+
+struct fastcall_work {
+#ifdef MC_FASTCALL_WORKER_THREAD
+ struct kthread_work work;
+#else
+ struct work_struct work;
+#endif
+ void *data;
+};
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work);
+#else
+static void fastcall_work_func(struct work_struct *work);
+#endif
+
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+
+static struct task_struct *fastcall_thread;
+static DEFINE_KTHREAD_WORKER(fastcall_worker);
+
+bool mc_fastcall(void *data)
+{
+ struct fastcall_work fc_work = {
+ KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
+ .data = data,
+ };
+
+ if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
+ return false;
+ flush_kthread_work(&fc_work.work);
+ return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+ int ret = 0;
+ ctx = context;
+
+ fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
+ "mc_fastcall");
+ if (IS_ERR(fastcall_thread)) {
+ ret = PTR_ERR(fastcall_thread);
+ fastcall_thread = NULL;
+ MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)", ret);
+ return ret;
+ }
+
+ wake_up_process(fastcall_thread);
+
+ /* this thread MUST run on CPU 0 at startup */
+ set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
+#ifdef TBASE_CORE_SWITCHER
+ register_cpu_notifier(&mobicore_cpu_notifer);
+#endif
+ return 0;
+}
+
+void mc_fastcall_destroy(void)
+{
+ if (!IS_ERR_OR_NULL(fastcall_thread)) {
+ kthread_stop(fastcall_thread);
+ fastcall_thread = NULL;
+ }
+}
+#else
+
+bool mc_fastcall(void *data)
+{
+ struct fastcall_work work = {
+ .data = data,
+ };
+ INIT_WORK(&work.work, fastcall_work_func);
+ if (!schedule_work_on(0, &work.work))
+ return false;
+ flush_work(&work.work);
+ return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+ ctx = context;
+ return 0;
+};
+
+void mc_fastcall_destroy(void) {};
+#endif
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work)
+#else
+static void fastcall_work_func(struct work_struct *work)
+#endif
+{
+ struct fastcall_work *fc_work =
+ container_of(work, struct fastcall_work, work);
+ union fc_generic *fc_generic = fc_work->data;
+#ifdef TBASE_CORE_SWITCHER
+ uint32_t cpu_swap = 0, new_cpu;
+ uint32_t cpu_id[] = CPU_IDS;
+#endif
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ mc_pm_clock_enable();
+#endif
+
+
+ if (fc_generic == NULL)
+ return;
+#ifdef TBASE_CORE_SWITCHER
+ if (fc_generic->as_in.cmd == MC_FC_SWITCH_CORE) {
+ cpu_swap = 1;
+ new_cpu = fc_generic->as_in.param[0];
+ fc_generic->as_in.param[0] = cpu_id[fc_generic->as_in.param[0]];
+ }
+#endif
+ smc(fc_work->data);
+#ifdef TBASE_CORE_SWITCHER
+ if (cpu_swap) {
+ if (fc_generic->as_out.ret == 0) {
+ cpumask_t cpu;
+ active_cpu = new_cpu;
+ MCDRV_DBG(mcd, "CoreSwap ok %d -> %d\n",
+ raw_smp_processor_id(), active_cpu);
+ cpumask_clear(&cpu);
+ cpumask_set_cpu(active_cpu, &cpu);
+#ifdef MC_FASTCALL_WORKER_THREAD
+ set_cpus_allowed(fastcall_thread, cpu);
+#endif
+ } else {
+ MCDRV_DBG(mcd, "CoreSwap failed %d -> %d\n",
+ raw_smp_processor_id(),
+ fc_generic->as_in.param[0]);
+ }
+ }
+#endif
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ mc_pm_clock_disable();
+#endif
+}
+
+int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
+{
+ int ret = 0;
+ union mc_fc_info fc_info;
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ memset(&fc_info, 0, sizeof(fc_info));
+ fc_info.as_in.cmd = MC_FC_INFO;
+ fc_info.as_in.ext_info_id = ext_info_id;
+
+ MCDRV_DBG(mcd, "<- cmd=0x%08x, ext_info_id=0x%08x",
+ fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
+
+ mc_fastcall(&(fc_info.as_generic));
+
+ MCDRV_DBG(mcd,
+ "-> r=0x%08x ret=0x%08x state=0x%08x ext_info=0x%08x",
+ fc_info.as_out.resp,
+ fc_info.as_out.ret,
+ fc_info.as_out.state,
+ fc_info.as_out.ext_info);
+
+ ret = convert_fc_ret(fc_info.as_out.ret);
+
+ *state = fc_info.as_out.state;
+ *ext_info = fc_info.as_out.ext_info;
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ return ret;
+}
+
+#ifdef TBASE_CORE_SWITCHER
+
+uint32_t mc_active_core(void)
+{
+ return active_cpu;
+}
+
+int mc_switch_core(uint32_t core_num)
+{
+ int32_t ret = 0;
+ union mc_fc_swich_core fc_switch_core;
+
+ if (!cpu_online(core_num))
+ return 1;
+
+ MCDRV_DBG_VERBOSE(mcd, "enter\n");
+
+ memset(&fc_switch_core, 0, sizeof(fc_switch_core));
+ fc_switch_core.as_in.cmd = MC_FC_SWITCH_CORE;
+
+ if (core_num < COUNT_OF_CPUS)
+ fc_switch_core.as_in.core_id = core_num;
+ else
+ fc_switch_core.as_in.core_id = 0;
+
+ MCDRV_DBG(mcd,
+ "<- cmd=0x%08x, core_id=0x%08x\n",
+ fc_switch_core.as_in.cmd,
+ fc_switch_core.as_in.core_id);
+ MCDRV_DBG(mcd,
+ "<- core_num=0x%08x, active_cpu=0x%08x\n",
+ core_num, active_cpu);
+ mc_fastcall(&(fc_switch_core.as_generic));
+
+ ret = convert_fc_ret(fc_switch_core.as_out.ret);
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+
+ return ret;
+}
+
+void mc_cpu_offfline(int cpu)
+{
+ if (active_cpu == cpu) {
+ int i;
+ /* Chose the first online CPU and switch! */
+ for_each_online_cpu(i) {
+ if (i == cpu) {
+ MCDRV_DBG(mcd, "Skipping CPU %d\n", cpu);
+ continue;
+ }
+ MCDRV_DBG(mcd, "CPU %d is dying, switching to %d\n",
+ cpu, i);
+ mc_switch_core(i);
+ break;
+ }
+ } else {
+ MCDRV_DBG(mcd, "not active CPU, no action taken\n");
+ }
+}
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ dev_info(mcd, "Cpu %u is going to die\n", cpu);
+ mc_cpu_offfline(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ dev_info(mcd, "Cpu %u is dead\n", cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif
+
+/* Yield to MobiCore */
+int mc_yield(void)
+{
+ int ret = 0;
+ union fc_generic yield;
+
+ /* MCDRV_DBG_VERBOSE(mcd, "enter"); */
+ memset(&yield, 0, sizeof(yield));
+ yield.as_in.cmd = MC_SMC_N_YIELD;
+ mc_fastcall(&yield);
+ ret = convert_fc_ret(yield.as_out.ret);
+
+ return ret;
+}
+
+/* call common notify */
+int mc_nsiq(void)
+{
+ int ret = 0;
+ union fc_generic nsiq;
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+ memset(&nsiq, 0, sizeof(nsiq));
+ nsiq.as_in.cmd = MC_SMC_N_SIQ;
+ mc_fastcall(&nsiq);
+ ret = convert_fc_ret(nsiq.as_out.ret);
+ return ret;
+}
+
+/* call common notify */
+int _nsiq(void)
+{
+ int ret = 0;
+ union fc_generic nsiq;
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+ memset(&nsiq, 0, sizeof(nsiq));
+ nsiq.as_in.cmd = MC_SMC_N_SIQ;
+ _smc(&nsiq);
+ ret = convert_fc_ret(nsiq.as_out.ret);
+ return ret;
+}
+
+/* Call the INIT fastcall to setup MobiCore initialization */
+int mc_init(phys_addr_t base, uint32_t nq_length,
+ uint32_t mcp_offset, uint32_t mcp_length)
+{
+ int ret = 0;
+ union mc_fc_init fc_init;
+ uint64_t base_addr = (uint64_t)base;
+ uint32_t base_high = (uint32_t)(base_addr >> 32);
+
+ MCDRV_DBG_VERBOSE(mcd, "enter");
+
+ memset(&fc_init, 0, sizeof(fc_init));
+
+ fc_init.as_in.cmd = MC_FC_INIT;
+ /* base address of mci buffer 4KB aligned */
+ fc_init.as_in.base = (uint32_t)base_addr;
+ /* notification buffer start/length [16:16] [start, length] */
+ fc_init.as_in.nq_info = ((base_high & 0xFFFF) << 16) |
+ (nq_length & 0xFFFF);
+ /* mcp buffer start/length [16:16] [start, length] */
+ fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
+
+ /*
+ * Set KMOD notification queue to start of MCI
+ * mciInfo was already set up in mmap
+ */
+ MCDRV_DBG(mcd,
+ "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
+ fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
+ fc_init.as_in.mcp_info);
+ mc_fastcall(&fc_init.as_generic);
+ MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x", fc_init.as_out.resp,
+ fc_init.as_out.ret);
+
+ ret = convert_fc_ret(fc_init.as_out.ret);
+
+ MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+ return ret;
+}
+
+/* Return MobiCore driver version */
+uint32_t mc_get_version(void)
+{
+ MCDRV_DBG(mcd, "MobiCore driver version is %i.%i",
+ MCDRVMODULEAPI_VERSION_MAJOR,
+ MCDRVMODULEAPI_VERSION_MINOR);
+
+ return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+ MCDRVMODULEAPI_VERSION_MINOR);
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.h
new file mode 100644
index 000000000..30458a37d
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/ops.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_OPS_H_
+#define _MC_OPS_H_
+
+#include <linux/workqueue.h>
+#include "fastcall.h"
+
+int mc_yield(void);
+int mc_nsiq(void);
+int _nsiq(void);
+uint32_t mc_get_version(void);
+
+int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info);
+int mc_init(phys_addr_t base, uint32_t nq_length, uint32_t mcp_offset,
+ uint32_t mcp_length);
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num);
+#endif
+
+bool mc_fastcall(void *data);
+
+int mc_fastcall_init(struct mc_context *context);
+void mc_fastcall_destroy(void);
+
+#endif /* _MC_OPS_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/platform.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/platform.h
new file mode 100644
index 000000000..4ceb928b8
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/platform.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Header file of MobiCore Driver Kernel Module Platform
+ * specific structures
+ *
+ * Internal structures of the McDrvModule
+ *
+ * Header file the MobiCore Driver Kernel Module,
+ * its internal structures and defines.
+ */
+#ifndef _MC_DRV_PLATFORM_H_
+#define _MC_DRV_PLATFORM_H_
+
+/* MobiCore Interrupt. */
+#define MC_INTR_SSIQ 280
+
+/* Enable mobicore mem traces */
+#define MC_MEM_TRACES
+
+/* Enable Runtime Power Management */
+#ifdef CONFIG_PM_RUNTIME
+ #define MC_PM_RUNTIME
+#endif
+
+#define TBASE_CORE_SWITCHER
+/* Values of MPIDR regs in cpu0, cpu1, cpu2, cpu3*/
+#define CPU_IDS {0x0000, 0x0001, 0x0002, 0x0003, 0x0100, 0x0101, 0x0102, 0x0103}
+#define COUNT_OF_CPUS CONFIG_NR_CPUS
+
+/* Enable Fastcall worker thread */
+#define MC_FASTCALL_WORKER_THREAD
+
+#if !defined(CONFIG_ARCH_MT6580)
+/* Enable LPAE */
+#define LPAE_SUPPORT
+/* Enable AARCH32 Fast call IDs */
+#define MC_AARCH32_FC
+#endif
+
+#endif /* _MC_DRV_PLATFORM_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.c
new file mode 100644
index 000000000..e3ea6b530
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore Driver Kernel Module.
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ */
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/suspend.h>
+#include <linux/device.h>
+
+#include "main.h"
+#include "pm.h"
+#include "fastcall.h"
+#include "ops.h"
+#include "logging.h"
+#include "debug.h"
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+ #include <linux/clk.h>
+ #include <linux/err.h>
+
+ struct clk *mc_ce_iface_clk = NULL;
+ struct clk *mc_ce_core_clk = NULL;
+ struct clk *mc_ce_bus_clk = NULL;
+#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */
+
+#ifdef MC_PM_RUNTIME
+
+static struct mc_context *ctx;
+
+static bool sleep_ready(void)
+{
+ if (!ctx->mcp)
+ return false;
+
+ if (!(ctx->mcp->flags.sleep_mode.ready_to_sleep & READY_TO_SLEEP))
+ return false;
+
+ return true;
+}
+
+static void mc_suspend_handler(struct work_struct *work)
+{
+ if (!ctx->mcp)
+ return;
+
+ ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
+ _nsiq();
+}
+DECLARE_WORK(suspend_work, mc_suspend_handler);
+
+static inline void dump_sleep_params(struct mc_flags *flags)
+{
+ MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
+ MCDRV_DBG(mcd,
+ "MobiCore Request Sleep=%d!", flags->sleep_mode.sleep_req);
+ MCDRV_DBG(mcd,
+ "MobiCore Sleep Ready=%d!", flags->sleep_mode.ready_to_sleep);
+}
+
+static int mc_suspend_notifier(struct notifier_block *nb,
+ unsigned long event, void *dummy)
+{
+ struct mc_mcp_buffer *mcp = ctx->mcp;
+ /* We have noting to say if MobiCore is not initialized */
+ if (!mcp)
+ return 0;
+
+#ifdef MC_MEM_TRACES
+ mobicore_log_read();
+#endif
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /*
+ * Make sure we have finished all the work otherwise
+ * we end up in a race condition
+ */
+ cancel_work_sync(&suspend_work);
+ /*
+ * We can't go to sleep if MobiCore is not IDLE
+ * or not Ready to sleep
+ */
+ dump_sleep_params(&mcp->flags);
+ if (!sleep_ready()) {
+ ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
+ schedule_work_on(0, &suspend_work);
+ flush_work(&suspend_work);
+ if (!sleep_ready()) {
+ dump_sleep_params(&mcp->flags);
+ ctx->mcp->flags.sleep_mode.sleep_req = 0;
+ MCDRV_DBG_ERROR(mcd, "MobiCore can't SLEEP!");
+ return NOTIFY_BAD;
+ }
+ }
+ break;
+ case PM_POST_SUSPEND:
+ MCDRV_DBG(mcd, "Resume MobiCore system!");
+ ctx->mcp->flags.sleep_mode.sleep_req = 0;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block mc_notif_block = {
+ .notifier_call = mc_suspend_notifier,
+};
+
+#ifdef MC_BL_NOTIFIER
+
+static int bl_switcher_notifier_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ unsigned int mpidr, cpu, cluster;
+ struct mc_mcp_buffer *mcp = ctx->mcp;
+
+ if (!mcp)
+ return 0;
+
+ asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
+ cpu = mpidr & 0x3;
+ cluster = (mpidr >> 8) & 0xf;
+ MCDRV_DBG(mcd, "%s switching!!, cpu: %u, Out=%u",
+ (event == SWITCH_ENTER ? "Before" : "After"), cpu, cluster);
+
+ if (cpu != 0)
+ return 0;
+
+ switch (event) {
+ case SWITCH_ENTER:
+ if (!sleep_ready()) {
+ ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
+ _nsiq();
+ /* By this time we should be ready for sleep or we are
+ * in the middle of something important */
+ if (!sleep_ready()) {
+ dump_sleep_params(&mcp->flags);
+ MCDRV_DBG(mcd,
+ "MobiCore: Don't allow switch!");
+ ctx->mcp->flags.sleep_mode.sleep_req = 0;
+ return -EPERM;
+ }
+ }
+ break;
+ case SWITCH_EXIT:
+ ctx->mcp->flags.sleep_mode.sleep_req = 0;
+ break;
+ default:
+ MCDRV_DBG(mcd, "MobiCore: Unknown switch event!");
+ }
+
+ return 0;
+}
+
+static struct notifier_block switcher_nb = {
+ .notifier_call = bl_switcher_notifier_handler,
+};
+#endif
+
+int mc_pm_initialize(struct mc_context *context)
+{
+ int ret = 0;
+
+ ctx = context;
+
+ ret = register_pm_notifier(&mc_notif_block);
+ if (ret)
+ MCDRV_DBG_ERROR(mcd, "device pm register failed");
+#ifdef MC_BL_NOTIFIER
+ if (register_bL_swicher_notifier(&switcher_nb))
+ MCDRV_DBG_ERROR(mcd,
+ "Failed to register to bl_switcher_notifier");
+#endif
+
+ return ret;
+}
+
+int mc_pm_free(void)
+{
+ int ret = unregister_pm_notifier(&mc_notif_block);
+ if (ret)
+ MCDRV_DBG_ERROR(mcd, "device pm unregister failed");
+#ifdef MC_BL_NOTIFIER
+ ret = unregister_bL_swicher_notifier(&switcher_nb);
+ if (ret)
+ MCDRV_DBG_ERROR(mcd, "device bl unregister failed");
+#endif
+ return ret;
+}
+
+bool mc_pm_sleep_ready(void)
+{
+ if (ctx == 0)
+ return true;
+ return sleep_ready();
+}
+#endif /* MC_PM_RUNTIME */
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+
+int mc_pm_clock_initialize(void)
+{
+ int ret = 0;
+
+ /* Get core clk */
+ mc_ce_core_clk = clk_get(mcd, "core_clk");
+ if (IS_ERR(mc_ce_core_clk)) {
+ ret = PTR_ERR(mc_ce_core_clk);
+ MCDRV_DBG_ERROR(mcd, "cannot get core clock");
+ goto error;
+ }
+ /* Get Interface clk */
+ mc_ce_iface_clk = clk_get(mcd, "iface_clk");
+ if (IS_ERR(mc_ce_iface_clk)) {
+ clk_put(mc_ce_core_clk);
+ ret = PTR_ERR(mc_ce_iface_clk);
+ MCDRV_DBG_ERROR(mcd, "cannot get iface clock");
+ goto error;
+ }
+ /* Get AXI clk */
+ mc_ce_bus_clk = clk_get(mcd, "bus_clk");
+ if (IS_ERR(mc_ce_bus_clk)) {
+ clk_put(mc_ce_iface_clk);
+ clk_put(mc_ce_core_clk);
+ ret = PTR_ERR(mc_ce_bus_clk);
+ MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock");
+ goto error;
+ }
+ return ret;
+
+error:
+ mc_ce_core_clk = NULL;
+ mc_ce_iface_clk = NULL;
+ mc_ce_bus_clk = NULL;
+
+ return ret;
+}
+
+void mc_pm_clock_finalize(void)
+{
+ if (mc_ce_iface_clk != NULL)
+ clk_put(mc_ce_iface_clk);
+
+ if (mc_ce_core_clk != NULL)
+ clk_put(mc_ce_core_clk);
+
+ if (mc_ce_bus_clk != NULL)
+ clk_put(mc_ce_bus_clk);
+}
+
+int mc_pm_clock_enable(void)
+{
+ int rc = 0;
+
+ rc = clk_prepare_enable(mc_ce_core_clk);
+ if (rc) {
+ MCDRV_DBG_ERROR(mcd, "cannot enable clock");
+ } else {
+ rc = clk_prepare_enable(mc_ce_iface_clk);
+ if (rc) {
+ clk_disable_unprepare(mc_ce_core_clk);
+ MCDRV_DBG_ERROR(mcd, "cannot enable clock");
+ } else {
+ rc = clk_prepare_enable(mc_ce_bus_clk);
+ if (rc) {
+ clk_disable_unprepare(mc_ce_iface_clk);
+ MCDRV_DBG_ERROR(mcd, "cannot enable clock");
+ }
+ }
+ }
+ return rc;
+}
+
+void mc_pm_clock_disable(void)
+{
+ if (mc_ce_iface_clk != NULL)
+ clk_disable_unprepare(mc_ce_iface_clk);
+
+ if (mc_ce_core_clk != NULL)
+ clk_disable_unprepare(mc_ce_core_clk);
+
+ if (mc_ce_bus_clk != NULL)
+ clk_disable_unprepare(mc_ce_bus_clk);
+}
+
+#endif /* MC_CRYPTO_CLOCK_MANAGEMENT */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.h
new file mode 100644
index 000000000..6581425a7
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/pm.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_PM_H_
+#define _MC_PM_H_
+
+#include "main.h"
+#ifdef MC_BL_NOTIFIER
+#include <asm/bL_switcher.h>
+#endif
+
+
+#define NO_SLEEP_REQ 0
+#define REQ_TO_SLEEP 1
+
+#define NORMAL_EXECUTION 0
+#define READY_TO_SLEEP 1
+
+/* How much time after resume the daemon should backoff */
+#define DAEMON_BACKOFF_TIME 500
+
+/* Initialize Power Management */
+int mc_pm_initialize(struct mc_context *context);
+/* Free all Power Management resources*/
+int mc_pm_free(void);
+/* Initialize secure crypto clocks */
+int mc_pm_clock_initialize(void);
+/* Free secure crypto clocks */
+void mc_pm_clock_finalize(void);
+/* Enable secure crypto clocks */
+int mc_pm_clock_enable(void);
+/* Disable secure crypto clocks */
+void mc_pm_clock_disable(void);
+/* Test if sleep is possible */
+bool mc_pm_sleep_ready(void);
+
+#endif /* _MC_PM_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_kernel_api.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_kernel_api.h
new file mode 100644
index 000000000..96805fda1
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_kernel_api.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Interface to be used by module MobiCoreKernelAPI.
+ */
+#ifndef _MC_KERNEL_API_H_
+#define _MC_KERNEL_API_H_
+
+struct mc_instance;
+
+/*
+ * mobicore_open() - Initialize a new MobiCore API instance object
+ *
+ * Returns a MobiCore Instance or NULL if no allocation was possible.
+ */
+struct mc_instance *mobicore_open(void);
+
+/*
+ * mobicore_release() - Release a MobiCore instance object
+ * @instance: MobiCore instance
+ *
+ * Returns 0 if Ok or -E ERROR
+ */
+int mobicore_release(struct mc_instance *instance);
+
+/*
+ * mobicore_allocate_wsm() - Allocate MobiCore WSM
+ * @instance: instance data for MobiCore Daemon and TLCs
+ * @requested_size: memory size requested in bytes
+ * @handle: pointer to handle
+ * @kernel_virt_addr: virtual user start address
+ *
+ * Returns 0 if OK
+ */
+int mobicore_allocate_wsm(struct mc_instance *instance,
+ unsigned long requested_size, uint32_t *handle,
+ void **virt_kernel_addr);
+
+/*
+ * mobicore_free() - Free a WSM buffer allocated with mobicore_allocate_wsm
+ * @instance: instance data for MobiCore Daemon and TLCs
+ * @handle: handle of the buffer
+ *
+ * Returns 0 if OK
+ */
+int mobicore_free_wsm(struct mc_instance *instance, uint32_t handle);
+
+/*
+ * mobicore_map_vmem() - Map a virtual memory buffer structure to Mobicore
+ * @instance: instance data for MobiCore Daemon and TLCs
+ * @addr: address of the buffer (NB it must be kernel virtual!)
+ * @len: buffer length (in bytes)
+ * @handle: unique handle
+ *
+ * Returns 0 if no error
+ */
+int mobicore_map_vmem(struct mc_instance *instance, void *addr,
+ uint32_t len, uint32_t *handle);
+
+/*
+ * mobicore_unmap_vmem() - Unmap a virtual memory buffer from MobiCore
+ * @instance: instance data for MobiCore Daemon and TLCs
+ * @handle: unique handle
+ *
+ * Returns 0 if no error
+ */
+int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle);
+
+/*
+ * mobicore_sleep_ready() - Test if mobicore can sleep
+ *
+ * Returns true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void);
+
+
+#endif /* _MC_KERNEL_API_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_linux.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_linux.h
new file mode 100644
index 000000000..b9c4934d5
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/mc_linux.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * The <t-base Driver Kernel Module is a Linux device driver, which represents
+ * the command proxy on the lowest layer to the secure world (Swd). Additional
+ * services like memory allocation via mmap and generation of a MMU tables for
+ * given virtual memory are also supported. IRQ functionality receives
+ * information from the SWd in the non secure world (NWd).
+ * As customary the driver is handled as Linux device driver with "open",
+ * "close" and "ioctl" commands. Access to the driver is possible after the
+ * devices "/dev/mobicore" and "/dev/mobicore-user" have been created.
+ */
+
+#ifndef _MC_LINUX_H_
+#define _MC_LINUX_H_
+
+#include "version.h"
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
+
+#define MC_ADMIN_DEVNODE "mobicore"
+#define MC_USER_DEVNODE "mobicore-user"
+
+/*
+ * Data exchange structure of the MC_DRV_MODULE_INIT ioctl command.
+ * INIT request data to SWD
+ */
+struct mc_ioctl_init {
+ /* length of notification queue */
+ uint32_t nq_length;
+ /* mcp buffer start/length [16:16] [start, length] */
+ uint32_t mcp_offset;
+ /* length of mcp buffer */
+ uint32_t mcp_length;
+};
+
+/*
+ * Data exchange structure of the MC_DRV_MODULE_INFO ioctl command.
+ * INFO request data to the SWD
+ */
+struct mc_ioctl_info {
+ uint32_t ext_info_id; /* extended info ID */
+ uint32_t state; /* state */
+ uint32_t ext_info; /* extended info */
+};
+
+/*
+ * Data exchange structure of the MC_IO_MAP_WSM and MC_IO_MAP_MCI commands.
+ *
+ * Allocate a contiguous memory buffer for a process.
+ * The physical address can be used as for later calls to mmap.
+ * The handle can be used to communicate about this buffer to the Daemon.
+ * For MC_IO_MAP_MCI command, the reused field indicates that MCI was set up
+ * already. I.e. Daemon was restarted.
+ */
+struct mc_ioctl_map {
+ uint32_t len; /* Buffer length */
+ uint32_t handle; /* WSM handle */
+ uint64_t phys_addr; /* physical address of WSM (or 0) */
+ uint32_t rfu;
+ bool reused; /* if WSM memory was reused, or new allocated */
+};
+
+/*
+ * Data exchange structure of the MC_IO_REG_WSM command.
+ *
+ * Allocates a physical MMU table and maps the buffer into this page.
+ * Returns the physical address of the MMU table.
+ * The page alignment will be created and the appropriated pSize and pOffsetMMU
+ * will be modified to the used values.
+ *
+ * We assume the 64 bit compatible one to be the default and the
+ * 32 bit one to be the compat one but we must serve both of them.
+ */
+struct mc_compat_ioctl_reg_wsm {
+ uint32_t buffer; /* base address of the virtual address */
+ uint32_t len; /* size of the virtual address space */
+ uint32_t pid; /* process id */
+ uint32_t handle; /* driver handle for locked memory */
+ uint64_t table_phys; /* physical address of the MMU table */
+};
+
+struct mc_ioctl_reg_wsm {
+ uint64_t buffer; /* base address of the virtual address */
+ uint32_t len; /* size of the virtual address space */
+ uint32_t pid; /* process id */
+ uint32_t handle; /* driver handle for locked memory */
+ uint64_t table_phys;/* physical address of the MMU table */
+};
+
+/*
+ * Data exchange structure of the MC_IO_RESOLVE_CONT_WSM ioctl command.
+ */
+struct mc_ioctl_resolv_cont_wsm {
+ /* driver handle for buffer */
+ uint32_t handle;
+ /* length memory */
+ uint32_t length;
+ /* base address of memory */
+ uint64_t phys;
+ /* fd to owner of the buffer */
+ int32_t fd;
+};
+
+/*
+ * Data exchange structure of the MC_IO_RESOLVE_WSM ioctl command.
+ */
+struct mc_ioctl_resolv_wsm {
+ /* driver handle for buffer */
+ uint32_t handle;
+ /* fd to owner of the buffer */
+ int32_t fd;
+ /* base address of memory */
+ uint64_t phys;
+};
+
+
+/*
+ * defines for the ioctl mobicore driver module function call from user space.
+ */
+/* MobiCore IOCTL magic number */
+#define MC_IOC_MAGIC 'M'
+
+#define MC_IO_INIT _IOWR(MC_IOC_MAGIC, 0, struct mc_ioctl_init)
+#define MC_IO_INFO _IOWR(MC_IOC_MAGIC, 1, struct mc_ioctl_info)
+#define MC_IO_VERSION _IOR(MC_IOC_MAGIC, 2, uint32_t)
+/*
+ * ioctl parameter to send the YIELD command to the SWD.
+ * Only possible in Privileged Mode.
+ * ioctl(fd, MC_DRV_MODULE_YIELD)
+ */
+#define MC_IO_YIELD _IO(MC_IOC_MAGIC, 3)
+/*
+ * ioctl parameter to send the NSIQ signal to the SWD.
+ * Only possible in Privileged Mode
+ * ioctl(fd, MC_DRV_MODULE_NSIQ)
+ */
+#define MC_IO_NSIQ _IO(MC_IOC_MAGIC, 4)
+/*
+ * Free's memory which is formerly allocated by the driver's mmap
+ * command. The parameter must be this mmaped address.
+ * The internal instance data regarding to this address are deleted as
+ * well as each according memory page and its appropriated reserved bit
+ * is cleared (ClearPageReserved).
+ * Usage: ioctl(fd, MC_DRV_MODULE_FREE, &address) with address being of
+ * type long address
+ */
+#define MC_IO_FREE _IO(MC_IOC_MAGIC, 5)
+/*
+ * Creates a MMU Table of the given base address and the size of the
+ * data.
+ * Parameter: mc_ioctl_reg_wsm
+ *
+ * Since the end ID is also based on the size of the structure it is
+ * safe to use the same ID(6) for both
+ */
+#define MC_IO_REG_WSM _IOWR(MC_IOC_MAGIC, 6, struct mc_ioctl_reg_wsm)
+#define MC_COMPAT_REG_WSM _IOWR(MC_IOC_MAGIC, 6, \
+ struct mc_compat_ioctl_reg_wsm)
+
+#define MC_IO_UNREG_WSM _IO(MC_IOC_MAGIC, 7)
+#define MC_IO_LOCK_WSM _IO(MC_IOC_MAGIC, 8)
+#define MC_IO_UNLOCK_WSM _IO(MC_IOC_MAGIC, 9)
+
+/*
+ * Allocate contiguous memory for a process for later mapping with mmap.
+ * MC_IO_MAP_WSM usual operation, pages are registered in
+ * device structure and freed later.
+ * MC_IO_MAP_MCI get Instance of MCI, allocates or mmaps
+ * the MCI to daemon
+ */
+#define MC_IO_MAP_WSM _IOWR(MC_IOC_MAGIC, 11, struct mc_ioctl_map)
+#define MC_IO_MAP_MCI _IOWR(MC_IOC_MAGIC, 12, struct mc_ioctl_map)
+
+/*
+ * Clean orphaned WSM buffers. Only available to the daemon and should
+ * only be carried out if the TLC crashes or otherwise calls exit() in
+ * an unexpected manner.
+ * The clean is needed together with the lock/unlock mechanism so the daemon
+ * has clear control of the mapped buffers so it can close a Trustlet before
+ * release all the WSM buffers, otherwise the Trustlet would be able to write
+ * to possibly kernel memory areas
+ */
+#define MC_IO_CLEAN_WSM _IO(MC_IOC_MAGIC, 14)
+
+/*
+ * Get MMU phys address of a buffer handle allocated to the user.
+ * Only available to the daemon.
+ */
+#define MC_IO_RESOLVE_WSM _IOWR(MC_IOC_MAGIC, 15, \
+ struct mc_ioctl_resolv_wsm)
+
+/*
+ * Get the phys address & length of a allocated contiguous buffer.
+ * Only available to the daemon */
+#define MC_IO_RESOLVE_CONT_WSM _IOWR(MC_IOC_MAGIC, 16, \
+ struct mc_ioctl_resolv_cont_wsm)
+
+/*
+ * Setup the mem traces when called.
+ * Only available to the daemon */
+#define MC_IO_LOG_SETUP _IO(MC_IOC_MAGIC, 17)
+
+#endif /* _MC_LINUX_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/version.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/version.h
new file mode 100644
index 000000000..8db48a09b
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreDriver/public/version.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_DRV_VERSION_H_
+#define _MC_DRV_VERSION_H_
+
+#define MCDRVMODULEAPI_VERSION_MAJOR 1
+#define MCDRVMODULEAPI_VERSION_MINOR 1
+
+#endif /* _MC_DRV_VERSION_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/clientlib.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/clientlib.c
new file mode 100644
index 000000000..17ecd5e82
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/clientlib.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/netlink.h>
+#include <net/sock.h>
+#include <net/net_namespace.h>
+#include <linux/list.h>
+
+#include "public/mobicore_driver_api.h"
+#include "public/mobicore_driver_cmd.h"
+#include "include/mcinq.h"
+#include "device.h"
+#include "session.h"
+
+/* device list */
+LIST_HEAD(devices);
+/* lock used to prevent concurrent add/delete action on the device list */
+struct mutex device_mutex;
+atomic_t device_usage = ATOMIC_INIT(0);
+
+static struct mcore_device_t *resolve_device_id(uint32_t device_id)
+{
+ struct mcore_device_t *tmp;
+ struct list_head *pos;
+
+ /* Get mcore_device_t for device_id */
+ mutex_lock(&device_mutex);
+ list_for_each(pos, &devices) {
+ tmp = list_entry(pos, struct mcore_device_t, list);
+ if (tmp->device_id == device_id) {
+ mutex_unlock(&device_mutex);
+ return tmp;
+ }
+ }
+ mutex_unlock(&device_mutex);
+ return NULL;
+}
+
+static void add_device(struct mcore_device_t *device)
+{
+ mutex_lock(&device_mutex);
+ list_add_tail(&(device->list), &devices);
+ mutex_unlock(&device_mutex);
+}
+
+static bool remove_device(uint32_t device_id)
+{
+ struct mcore_device_t *device, *candidate = NULL;
+ struct list_head *pos, *q;
+ bool found = false;
+
+ mutex_lock(&device_mutex);
+ list_for_each_safe(pos, q, &devices) {
+ device = list_entry(pos, struct mcore_device_t, list);
+ if (device->device_id == device_id) {
+ list_del(pos);
+ candidate = device;
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&device_mutex);
+ if (!candidate)
+ mcore_device_cleanup(candidate);
+ return found;
+}
+
+enum mc_result mc_open_device(uint32_t device_id)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+ struct connection *dev_con = NULL;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ struct mcore_device_t *device = resolve_device_id(device_id);
+ if (device != NULL) {
+ MCDRV_DBG(mc_kapi,
+ "Device %d already opened\n", device_id);
+ atomic_inc(&device_usage);
+ mc_result = MC_DRV_OK;
+ break;
+ }
+
+ /* Open new connection to device */
+ dev_con = connection_new();
+ if (dev_con == NULL) {
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
+
+ if (!connection_connect(dev_con, MC_DAEMON_PID)) {
+ MCDRV_DBG_ERROR(
+ mc_kapi,
+ "Could not setup netlink connection to PID %u",
+ MC_DAEMON_PID);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ /* Forward device open to the daemon and read result */
+ struct mc_drv_cmd_open_device_t mc_drv_cmd_open_device = {
+ {
+ MC_DRV_CMD_OPEN_DEVICE
+ },
+ {
+ device_id
+ }
+ };
+
+ int len = connection_write_data(
+ dev_con,
+ &mc_drv_cmd_open_device,
+ sizeof(struct mc_drv_cmd_open_device_t));
+ if (len < 0) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_DEVICE writeCmd failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ struct mc_drv_response_header_t rsp_header;
+ memset(&rsp_header, 0, sizeof(rsp_header));
+ len = connection_read_datablock(
+ dev_con,
+ &rsp_header,
+ sizeof(rsp_header));
+ if (len != sizeof(rsp_header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_DEVICE readRsp failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+ if (rsp_header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_DEVICE failed, respId=%d",
+ rsp_header.response_id);
+ switch (rsp_header.response_id) {
+ case MC_DRV_RSP_PAYLOAD_LENGTH_ERROR:
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ case MC_DRV_INVALID_DEVICE_NAME:
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ case MC_DRV_RSP_DEVICE_ALREADY_OPENED:
+ default:
+ mc_result = MC_DRV_ERR_INVALID_OPERATION;
+ break;
+ }
+ break;
+ }
+
+ /* there is no payload to read */
+
+ device = mcore_device_create(device_id, dev_con);
+ if (device == NULL) {
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
+ if (!mcore_device_open(device, MC_DRV_MOD_DEVNODE_FULLPATH)) {
+ mcore_device_cleanup(device);
+ MCDRV_DBG_ERROR(mc_kapi,
+ "could not open device file: %s",
+ MC_DRV_MOD_DEVNODE_FULLPATH);
+ mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+ break;
+ }
+
+ add_device(device);
+ atomic_inc(&device_usage);
+
+ } while (false);
+
+ if (mc_result != MC_DRV_OK)
+ connection_cleanup(dev_con);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_open_device);
+
+enum mc_result mc_close_device(uint32_t device_id)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ struct mcore_device_t *device = resolve_device_id(device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ /* Check if it's not used by other modules */
+ if (!atomic_dec_and_test(&device_usage)) {
+ mc_result = MC_DRV_OK;
+ break;
+ }
+
+ struct connection *dev_con = device->connection;
+
+ /* Return if not all sessions have been closed */
+ if (mcore_device_has_sessions(device)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "cannot close with sessions pending");
+ mc_result = MC_DRV_ERR_SESSION_PENDING;
+ break;
+ }
+
+ struct mc_drv_cmd_close_device_t mc_drv_cmd_close_device = {
+ {
+ MC_DRV_CMD_CLOSE_DEVICE
+ }
+ };
+ int len = connection_write_data(
+ dev_con,
+ &mc_drv_cmd_close_device,
+ sizeof(struct mc_drv_cmd_close_device_t));
+ /* ignore error, but log details */
+ if (len < 0) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_CLOSE_DEVICE writeCmd failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ }
+
+ struct mc_drv_response_header_t rsp_header;
+ memset(&rsp_header, 0, sizeof(rsp_header));
+ len = connection_read_datablock(
+ dev_con,
+ &rsp_header,
+ sizeof(rsp_header));
+ if (len != sizeof(rsp_header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_CLOSE_DEVICE readResp failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ if (rsp_header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_CLOSE_DEVICE failed, respId=%d",
+ rsp_header.response_id);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ remove_device(device_id);
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_close_device);
+
+enum mc_result mc_open_session(struct mc_session_handle *session,
+ const struct mc_uuid_t *uuid,
+ uint8_t *tci, uint32_t tci_len)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (uuid == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "UUID is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (tci == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "TCI is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (tci_len > MC_MAX_TCI_LEN) {
+ MCDRV_DBG_ERROR(mc_kapi, "TCI length is longer than %d",
+ MC_MAX_TCI_LEN);
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* Get the device associated with the given session */
+ struct mcore_device_t *device =
+ resolve_device_id(session->device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ struct connection *dev_con = device->connection;
+
+ /* Get the wsm of the given TCI */
+ struct wsm *wsm =
+ mcore_device_find_contiguous_wsm(device, tci);
+ if (wsm == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "Could not resolve TCI address ");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ if (wsm->len < tci_len) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "length is more than allocated TCI");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* Prepare open session command */
+ struct mc_drv_cmd_open_session_t cmd_open_session = {
+ {
+ MC_DRV_CMD_OPEN_SESSION
+ },
+ {
+ session->device_id,
+ *uuid,
+ (uint32_t)((uintptr_t)(wsm->virt_addr) & 0xFFF),
+ wsm->handle,
+ tci_len
+ }
+ };
+
+ /* Transmit command data */
+ int len = connection_write_data(dev_con,
+ &cmd_open_session,
+ sizeof(cmd_open_session));
+ if (len != sizeof(cmd_open_session)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_SESSION writeData failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ /* Read command response */
+ struct mc_drv_rsp_open_session_t rsp_open_session;
+ memset(&rsp_open_session, 0, sizeof(rsp_open_session));
+
+ /* read whole response, to prevent being interrupted
+ between header and payload */
+ len = connection_read_datablock(dev_con,
+ &rsp_open_session,
+ sizeof(rsp_open_session));
+
+ /* header processing */
+ if (len < sizeof(rsp_open_session.header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_SESSION readResp failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ if (rsp_open_session.header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_SESSION failed, respId=%d",
+ rsp_open_session.header.response_id);
+ switch (rsp_open_session.header.response_id) {
+ case MC_DRV_RSP_TRUSTLET_NOT_FOUND:
+ mc_result = MC_DRV_ERR_INVALID_DEVICE_FILE;
+ break;
+ case MC_DRV_RSP_PAYLOAD_LENGTH_ERROR:
+ case MC_DRV_RSP_DEVICE_NOT_OPENED:
+ case MC_DRV_RSP_FAILED:
+ default:
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+ break;
+ }
+
+ /* payload */
+ len -= sizeof(rsp_open_session.header);
+ if (len != sizeof(rsp_open_session.payload)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_OPEN_SESSION readPayload fail %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ /* Register session with handle */
+ session->session_id = rsp_open_session.payload.session_id;
+
+ /* Set up second channel for notifications */
+ struct connection *session_connection = connection_new();
+ if (session_connection == NULL) {
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
+
+ if (!connection_connect(session_connection, MC_DAEMON_PID)) {
+ MCDRV_DBG_ERROR(
+ mc_kapi,
+ "Could not setup netlink connection to PID %u",
+ MC_DAEMON_PID);
+ connection_cleanup(session_connection);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ /* Write command to use channel for notifications */
+ struct mc_drv_cmd_nqconnect_t cmd_nqconnect = {
+ {
+ MC_DRV_CMD_NQ_CONNECT
+ },
+ {
+ session->device_id,
+ session->session_id,
+ rsp_open_session.payload.device_session_id,
+ rsp_open_session.payload.session_magic
+ }
+ };
+ connection_write_data(session_connection,
+ &cmd_nqconnect,
+ sizeof(cmd_nqconnect));
+
+ /* Read command response, header first */
+ struct mc_drv_response_header_t rsp_header;
+ len = connection_read_datablock(session_connection,
+ &rsp_header,
+ sizeof(rsp_header));
+ if (len != sizeof(rsp_header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_NQ_CONNECT readRsp failed %d",
+ len);
+ connection_cleanup(session_connection);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ if (rsp_header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_NQ_CONNECT failed, respId=%d",
+ rsp_header.response_id);
+ connection_cleanup(session_connection);
+ mc_result = MC_DRV_ERR_NQ_FAILED;
+ break;
+ }
+
+ /* there is no payload. */
+
+ /* Session established, new session object must be created */
+ if (!mcore_device_create_new_session(device,
+ session->session_id,
+ session_connection)) {
+ connection_cleanup(session_connection);
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_open_session);
+
+enum mc_result mc_close_session(struct mc_session_handle *session)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ struct mcore_device_t *device =
+ resolve_device_id(session->device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ struct connection *dev_con = device->connection;
+
+ struct session *nq_session =
+ mcore_device_resolve_session_id(device,
+ session->session_id);
+
+ if (nq_session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+ break;
+ }
+
+ /* Write close session command */
+ struct mc_drv_cmd_close_session_t cmd_close_session = {
+ {
+ MC_DRV_CMD_CLOSE_SESSION
+ },
+ {
+ session->session_id,
+ }
+ };
+ connection_write_data(dev_con,
+ &cmd_close_session,
+ sizeof(cmd_close_session));
+
+ /* Read command response */
+ struct mc_drv_response_header_t rsp_header;
+ memset(&rsp_header, 0, sizeof(rsp_header));
+ int len = connection_read_datablock(dev_con,
+ &rsp_header,
+ sizeof(rsp_header));
+ if (len != sizeof(rsp_header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_CLOSE_SESSION readRsp failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ if (rsp_header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_CLOSE_SESSION failed, respId=%d",
+ rsp_header.response_id);
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+
+ mcore_device_remove_session(device, session->session_id);
+ mc_result = MC_DRV_OK;
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_close_session);
+
+enum mc_result mc_notify(struct mc_session_handle *session)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ struct mcore_device_t *device =
+ resolve_device_id(session->device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ struct connection *dev_con = device->connection;
+
+ struct session *nqsession =
+ mcore_device_resolve_session_id(device, session->session_id);
+ if (nqsession == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+ break;
+ }
+
+ struct mc_drv_cmd_notify_t cmd_notify = {
+ {
+ MC_DRV_CMD_NOTIFY
+ },
+ {
+ session->session_id
+ }
+ };
+
+ connection_write_data(dev_con,
+ &cmd_notify,
+ sizeof(cmd_notify));
+
+ /* Daemon will not return a response */
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_notify);
+
+enum mc_result mc_wait_notification(struct mc_session_handle *session,
+ int32_t timeout)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session == NULL) {
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ struct mcore_device_t *device =
+ resolve_device_id(session->device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+
+ struct session *nq_session =
+ mcore_device_resolve_session_id(device,
+ session->session_id);
+ if (nq_session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+ break;
+ }
+
+ struct connection *nqconnection =
+ nq_session->notification_connection;
+ uint32_t count = 0;
+
+ /* Read notification queue till it's empty */
+ for (;;) {
+ struct notification notification;
+ memset(&notification, 0, sizeof(notification));
+ ssize_t num_read =
+ connection_read_data(nqconnection,
+ &notification,
+ sizeof(notification),
+ timeout);
+ /*
+ * Exit on timeout in first run. Later runs have
+ * timeout set to 0.
+ * -2 means, there is no more data.
+ */
+ if (count == 0 && num_read == -2) {
+ MCDRV_DBG_ERROR(mc_kapi, "read timeout");
+ mc_result = MC_DRV_ERR_TIMEOUT;
+ break;
+ }
+ /*
+ * After first notification the queue will be
+ * drained, Thus we set no timeout for the
+ * following reads
+ */
+ timeout = 0;
+
+ if (num_read != sizeof(struct notification)) {
+ if (count == 0) {
+ /* failure in first read, notify it */
+ mc_result = MC_DRV_ERR_NOTIFICATION;
+ MCDRV_DBG_ERROR(
+ mc_kapi,
+ "read notification failed, "
+ "%i bytes received", (int)num_read);
+ break;
+ } else {
+ /*
+ * Read of the n-th notification
+ * failed/timeout. We don't tell the
+ * caller, as we got valid notifications
+ * before.
+ */
+ mc_result = MC_DRV_OK;
+ break;
+ }
+ }
+
+ count++;
+ MCDRV_DBG_VERBOSE(mc_kapi,
+ "count=%d, SessionID=%d, Payload=%d",
+ count,
+ notification.session_id,
+ notification.payload);
+
+ if (notification.payload != 0) {
+ /* Session end point died -> store exit code */
+ session_set_error_info(nq_session,
+ notification.payload);
+
+ mc_result = MC_DRV_INFO_NOTIFICATION;
+ break;
+ }
+ } /* for(;;) */
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_wait_notification);
+
+enum mc_result mc_malloc_wsm(uint32_t device_id, uint32_t align, uint32_t len,
+ uint8_t **wsm, uint32_t wsm_flags)
+{
+ enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ struct mcore_device_t *device = resolve_device_id(device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ if (wsm == NULL) {
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ struct wsm *wsm_stack =
+ mcore_device_allocate_contiguous_wsm(device, len);
+ if (wsm_stack == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation of WSM failed");
+ mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+ break;
+ }
+
+ *wsm = (uint8_t *)wsm_stack->virt_addr;
+ mc_result = MC_DRV_OK;
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_malloc_wsm);
+
+enum mc_result mc_free_wsm(uint32_t device_id, uint8_t *wsm)
+{
+ enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+ struct mcore_device_t *device;
+
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ /* Get the device associated wit the given session */
+ device = resolve_device_id(device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+
+ /* find WSM object */
+ struct wsm *wsm_stack =
+ mcore_device_find_contiguous_wsm(device, wsm);
+ if (wsm_stack == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "unknown address");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* Free the given virtual address */
+ if (!mcore_device_free_contiguous_wsm(device, wsm_stack)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "Free of virtual address failed");
+ mc_result = MC_DRV_ERR_FREE_MEMORY_FAILED;
+ break;
+ }
+ mc_result = MC_DRV_OK;
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_free_wsm);
+
+enum mc_result mc_map(struct mc_session_handle *session_handle, void *buf,
+ uint32_t buf_len, struct mc_bulk_map *map_info)
+{
+ enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session_handle == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "session_handle is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (map_info == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "map_info is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (buf == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "buf is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* Determine device the session belongs to */
+ struct mcore_device_t *device =
+ resolve_device_id(session_handle->device_id);
+
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ struct connection *dev_con = device->connection;
+
+ /* Get session */
+ uint32_t session_id = session_handle->session_id;
+ struct session *session =
+ mcore_device_resolve_session_id(device,
+ session_id);
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+ break;
+ }
+
+ /*
+ * Register mapped bulk buffer to Kernel Module and keep mapped
+ * bulk buffer in mind
+ */
+ struct bulk_buffer_descriptor *bulk_buf =
+ session_add_bulk_buf(session, buf, buf_len);
+ if (bulk_buf == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Error mapping bulk buffer");
+ mc_result = MC_DRV_ERR_BULK_MAPPING;
+ break;
+ }
+
+ /* Prepare map command */
+ uintptr_t offset = (uintptr_t)(bulk_buf->virt_addr) & 0xFFF;
+ struct mc_drv_cmd_map_bulk_mem_t mc_drv_cmd_map_bulk_mem = {
+ {
+ MC_DRV_CMD_MAP_BULK_BUF
+ },
+ {
+ session->session_id,
+ bulk_buf->handle,
+ 0,
+ (uint32_t)offset,
+ bulk_buf->len
+ }
+ };
+
+ /* Transmit map command to MobiCore device */
+ connection_write_data(dev_con,
+ &mc_drv_cmd_map_bulk_mem,
+ sizeof(mc_drv_cmd_map_bulk_mem));
+
+ /* Read command response */
+ struct mc_drv_response_header_t rsp_header;
+ memset(&rsp_header, 0, sizeof(rsp_header));
+ int len = connection_read_datablock(dev_con,
+ &rsp_header,
+ sizeof(rsp_header));
+ if (len != sizeof(rsp_header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_MAP_BULK_BUF readRsp failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ if (rsp_header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_MAP_BULK_BUF failed, respId=%d",
+ rsp_header.response_id);
+
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+
+ /*
+ * Unregister mapped bulk buffer from Kernel Module and
+ * remove mapped bulk buffer from session maintenance
+ */
+ if (!session_remove_bulk_buf(session, buf)) {
+ /* Removing of bulk buffer not possible */
+ MCDRV_DBG_ERROR(mc_kapi,
+ "Unreg of bulk memory failed");
+ }
+ break;
+ }
+
+ struct mc_drv_rsp_map_bulk_mem_payload_t
+ rsp_map_bulk_mem_payload;
+ memset(&rsp_map_bulk_mem_payload, 0,
+ sizeof(rsp_map_bulk_mem_payload));
+ connection_read_datablock(dev_con,
+ &rsp_map_bulk_mem_payload,
+ sizeof(rsp_map_bulk_mem_payload));
+
+ /* Set mapping info for Trustlet */
+ map_info->secure_virt_addr =
+ rsp_map_bulk_mem_payload.secure_virtual_adr;
+ map_info->secure_virt_len = buf_len;
+ mc_result = MC_DRV_OK;
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_map);
+
+enum mc_result mc_unmap(struct mc_session_handle *session_handle, void *buf,
+ struct mc_bulk_map *map_info)
+{
+ enum mc_result mc_result = MC_DRV_ERR_UNKNOWN;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session_handle == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "session_handle is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (map_info == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "map_info is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+ if (buf == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "buf is null");
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* Determine device the session belongs to */
+ struct mcore_device_t *device =
+ resolve_device_id(session_handle->device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+ struct connection *dev_con = device->connection;
+
+ /* Get session */
+ uint32_t session_id = session_handle->session_id;
+ struct session *session =
+ mcore_device_resolve_session_id(device,
+ session_id);
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+ break;
+ }
+
+ uint32_t handle = session_find_bulk_buf(session, buf);
+ if (handle == 0) {
+ MCDRV_DBG_ERROR(mc_kapi, "Buffer not found");
+ mc_result = MC_DRV_ERR_BULK_UNMAPPING;
+ break;
+ }
+
+
+ /* Prepare unmap command */
+ struct mc_drv_cmd_unmap_bulk_mem_t cmd_unmap_bulk_mem = {
+ {
+ MC_DRV_CMD_UNMAP_BULK_BUF
+ },
+ {
+ session->session_id,
+ handle,
+ map_info->secure_virt_addr,
+ map_info->secure_virt_len
+ }
+ };
+
+ connection_write_data(dev_con,
+ &cmd_unmap_bulk_mem,
+ sizeof(cmd_unmap_bulk_mem));
+
+ /* Read command response */
+ struct mc_drv_response_header_t rsp_header;
+ memset(&rsp_header, 0, sizeof(rsp_header));
+ int len = connection_read_datablock(dev_con,
+ &rsp_header,
+ sizeof(rsp_header));
+ if (len != sizeof(rsp_header)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_UNMAP_BULK_BUF readRsp failed %d",
+ len);
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ if (rsp_header.response_id != MC_DRV_RSP_OK) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "CMD_UNMAP_BULK_BUF failed, respId=%d",
+ rsp_header.response_id);
+
+ mc_result = MC_DRV_ERR_DAEMON_UNREACHABLE;
+ break;
+ }
+
+ /*struct mc_drv_rsp_unmap_bulk_mem_payload_t
+ rsp_unmap_bulk_mem_payload;
+ connection_read_datablock(dev_con,
+ &rsp_unmap_bulk_mem_payload,
+ sizeof(rsp_unmap_bulk_mem_payload));*/
+
+ /*
+ * Unregister mapped bulk buffer from Kernel Module and
+ * remove mapped bulk buffer from session maintenance
+ */
+ if (!session_remove_bulk_buf(session, buf)) {
+ /* Removing of bulk buffer not possible */
+ MCDRV_DBG_ERROR(mc_kapi,
+ "Unregistering of bulk memory failed");
+ mc_result = MC_DRV_ERR_BULK_UNMAPPING;
+ break;
+ }
+
+ mc_result = MC_DRV_OK;
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_unmap);
+
+enum mc_result mc_get_session_error_code(struct mc_session_handle *session,
+ int32_t *last_error)
+{
+ enum mc_result mc_result = MC_DRV_OK;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
+
+ do {
+ if (session == NULL || last_error == NULL) {
+ mc_result = MC_DRV_ERR_INVALID_PARAMETER;
+ break;
+ }
+
+ /* Get device */
+ struct mcore_device_t *device =
+ resolve_device_id(session->device_id);
+ if (device == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Device not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
+ break;
+ }
+
+ /* Get session */
+ uint32_t session_id = session->session_id;
+ struct session *nqsession =
+ mcore_device_resolve_session_id(device,
+ session_id);
+ if (nqsession == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Session not found");
+ mc_result = MC_DRV_ERR_UNKNOWN_SESSION;
+ break;
+ }
+
+ *last_error = session_get_last_err(nqsession);
+
+ } while (false);
+
+ return mc_result;
+}
+EXPORT_SYMBOL(mc_get_session_error_code);
+
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/common.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/common.h
new file mode 100644
index 000000000..7904e2bef
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/common.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Common data types for use by the MobiCore Kernel API Driver
+ */
+#ifndef _MC_KAPI_COMMON_H
+#define _MC_KAPI_COMMON_H
+
+#include "connection.h"
+#include "mcinq.h"
+
+void mcapi_insert_connection(struct connection *connection);
+void mcapi_remove_connection(uint32_t seq);
+unsigned int mcapi_unique_id(void);
+
+#define MC_DAEMON_PID 0xFFFFFFFF
+#define MC_DRV_MOD_DEVNODE_FULLPATH "/dev/mobicore"
+
+/* dummy function helper macro */
+#define DUMMY_FUNCTION() do {} while (0)
+
+/* Found in main.c */
+extern struct device *mc_kapi;
+
+/* Found in clientlib.c */
+extern struct mutex device_mutex;
+
+#define MCDRV_ERROR(dev, txt, ...) \
+ dev_err(dev, "%s() ### ERROR: " txt, __func__, ##__VA_ARGS__)
+
+#if defined(DEBUG)
+
+/* #define DEBUG_VERBOSE */
+#if defined(DEBUG_VERBOSE)
+#define MCDRV_DBG_VERBOSE MCDRV_DBG
+#else
+#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
+#endif
+
+#define MCDRV_DBG(dev, txt, ...) \
+ dev_info(dev, "%s(): " txt, __func__, ##__VA_ARGS__)
+
+#define MCDRV_DBG_WARN(dev, txt, ...) \
+ dev_warn(dev, "%s() WARNING: " txt, __func__, ##__VA_ARGS__)
+
+#define MCDRV_DBG_ERROR(dev, txt, ...) \
+ dev_err(dev, "%s() ### ERROR: " txt, __func__, ##__VA_ARGS__)
+
+#define MCDRV_ASSERT(cond) \
+ do { \
+ if (unlikely(!(cond))) { \
+ panic("mc_kernelapi Assertion failed: %s:%d\n", \
+ __FILE__, __LINE__); \
+ } \
+ } while (0)
+
+#elif defined(NDEBUG)
+
+#define MCDRV_DBG_VERBOSE(...) DUMMY_FUNCTION()
+#define MCDRV_DBG(...) DUMMY_FUNCTION()
+#define MCDRV_DBG_WARN(...) DUMMY_FUNCTION()
+#define MCDRV_DBG_ERROR(...) DUMMY_FUNCTION()
+
+#define MCDRV_ASSERT(...) DUMMY_FUNCTION()
+
+#else
+#error "Define DEBUG or NDEBUG"
+#endif /* [not] defined(DEBUG_MCMODULE) */
+
+#define assert(expr) MCDRV_ASSERT(expr)
+
+#endif /* _MC_KAPI_COMMON_H */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.c
new file mode 100644
index 000000000..7f6ca1fa7
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/netlink.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/semaphore.h>
+#include <linux/time.h>
+#include <net/sock.h>
+#include <net/net_namespace.h>
+
+#include "connection.h"
+#include "common.h"
+
+/* Define the initial state of the Data Available Semaphore */
+#define SEM_NO_DATA_AVAILABLE 0
+
+struct connection *connection_new(void)
+{
+ struct connection *conn;
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (conn == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
+ conn->sequence_magic = mcapi_unique_id();
+ mutex_init(&conn->data_lock);
+ sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE);
+
+ mcapi_insert_connection(conn);
+ return conn;
+}
+
+void connection_cleanup(struct connection *conn)
+{
+ if (!conn)
+ return;
+
+ kfree_skb(conn->skb);
+
+ mcapi_remove_connection(conn->sequence_magic);
+ kfree(conn);
+}
+
+bool connection_connect(struct connection *conn, pid_t dest)
+{
+ /* Nothing to connect */
+ conn->peer_pid = dest;
+ return true;
+}
+
+size_t connection_read_data_msg(struct connection *conn, void *buffer,
+ uint32_t len)
+{
+ size_t ret = -1;
+ MCDRV_DBG_VERBOSE(mc_kapi,
+ "reading connection data %u, connection data left %u",
+ len, conn->data_len);
+ /* trying to read more than the left data */
+ if (len > conn->data_len) {
+ ret = conn->data_len;
+ memcpy(buffer, conn->data_start, conn->data_len);
+ conn->data_len = 0;
+ } else {
+ ret = len;
+ memcpy(buffer, conn->data_start, len);
+ conn->data_len -= len;
+ conn->data_start += len;
+ }
+
+ if (conn->data_len == 0) {
+ conn->data_start = NULL;
+ kfree_skb(conn->skb);
+ conn->skb = NULL;
+ }
+ MCDRV_DBG_VERBOSE(mc_kapi, "read %zu", ret);
+ return ret;
+}
+
+size_t connection_read_datablock(struct connection *conn, void *buffer,
+ uint32_t len)
+{
+ return connection_read_data(conn, buffer, len, -1);
+}
+
+size_t connection_read_data(struct connection *conn, void *buffer, uint32_t len,
+ int32_t timeout)
+{
+ size_t ret = 0;
+
+ MCDRV_ASSERT(buffer != NULL);
+ MCDRV_ASSERT(conn->socket_descriptor != NULL);
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "read data len = %u for PID = %u",
+ len, conn->sequence_magic);
+ do {
+ /*
+ * Wait until data is available or timeout
+ * msecs_to_jiffies(-1) -> wait forever for the sem
+ */
+ if (down_timeout(&(conn->data_available_sem),
+ msecs_to_jiffies(timeout))) {
+ MCDRV_DBG_VERBOSE(mc_kapi,
+ "Timeout reading the data sem");
+ ret = -2;
+ break;
+ }
+
+ if (mutex_lock_interruptible(&(conn->data_lock))) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "interrupted reading the data sem");
+ ret = -1;
+ break;
+ }
+
+ /* Have data, use it */
+ if (conn->data_len > 0)
+ ret = connection_read_data_msg(conn, buffer, len);
+
+ mutex_unlock(&(conn->data_lock));
+
+ /* There is still some data left */
+ if (conn->data_len > 0)
+ up(&conn->data_available_sem);
+
+ } while (0);
+
+ return ret;
+}
+
+int connection_write_data(struct connection *conn, void *buffer,
+ uint32_t len)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ int ret;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "buffer length %u from pid %u\n", len,
+ conn->sequence_magic);
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ nlh = nlmsg_put(skb, 0, conn->sequence_magic, 2, len,
+ NLM_F_REQUEST);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* netlink_unicast frees skb */
+ memcpy(NLMSG_DATA(nlh), buffer, len);
+ ret = netlink_unicast(conn->socket_descriptor, skb, conn->peer_pid,
+ MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+int connection_process(struct connection *conn, struct sk_buff *skb)
+{
+ int ret = 0;
+ do {
+ if (mutex_lock_interruptible(&(conn->data_lock))) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "Interrupted getting data semaphore!");
+ ret = -1;
+ break;
+ }
+
+ kfree_skb(conn->skb);
+
+ /* Get a reference to the incoming skb */
+ conn->skb = skb_get(skb);
+ if (conn->skb) {
+ conn->data_msg = nlmsg_hdr(conn->skb);
+ conn->data_len = NLMSG_PAYLOAD(conn->data_msg, 0);
+ conn->data_start = NLMSG_DATA(conn->data_msg);
+ up(&(conn->data_available_sem));
+ }
+ mutex_unlock(&(conn->data_lock));
+ ret = 0;
+ } while (0);
+ return ret;
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.h
new file mode 100644
index 000000000..04fa1f707
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/connection.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_KAPI_CONNECTION_H_
+#define _MC_KAPI_CONNECTION_H_
+
+#include <linux/semaphore.h>
+#include <linux/mutex.h>
+
+#include <stddef.h>
+#include <stdbool.h>
+
+struct connection {
+ /* Netlink socket */
+ struct sock *socket_descriptor;
+ /* Random? magic to match requests/answers */
+ uint32_t sequence_magic;
+
+ struct nlmsghdr *data_msg;
+ /* How much connection data is left */
+ uint32_t data_len;
+ /* Start pointer of remaining data */
+ void *data_start;
+ struct sk_buff *skb;
+
+ /* Data protection lock */
+ struct mutex data_lock;
+ /* Data protection semaphore */
+ struct semaphore data_available_sem;
+
+ /* PID address used for local connection */
+ pid_t self_pid;
+ /* Remote PID for connection */
+ pid_t peer_pid;
+
+ /* The list param for using the kernel lists */
+ struct list_head list;
+};
+
+struct connection *connection_new(void);
+void connection_cleanup(struct connection *conn);
+bool connection_connect(struct connection *conn, pid_t dest);
+size_t connection_read_datablock(struct connection *conn, void *buffer,
+ uint32_t len);
+size_t connection_read_data(struct connection *conn, void *buffer,
+ uint32_t len, int32_t timeout);
+int connection_write_data(struct connection *conn, void *buffer,
+ uint32_t len);
+int connection_process(struct connection *conn, struct sk_buff *skb);
+
+#endif /* _MC_KAPI_CONNECTION_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.c
new file mode 100644
index 000000000..7795c7e95
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore client library device management.
+ *
+ * Device and Trustlet Session management Functions.
+ */
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "mc_kernel_api.h"
+#include "public/mobicore_driver_api.h"
+
+#include "device.h"
+#include "common.h"
+
+static struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle)
+{
+ struct wsm *wsm;
+
+ wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
+ if (wsm == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
+ wsm->virt_addr = virt_addr;
+ wsm->len = len;
+ wsm->handle = handle;
+ return wsm;
+}
+
+struct mcore_device_t *mcore_device_create(uint32_t device_id,
+ struct connection *connection)
+{
+ struct mcore_device_t *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
+ dev->device_id = device_id;
+ dev->connection = connection;
+
+ INIT_LIST_HEAD(&dev->session_vector);
+ INIT_LIST_HEAD(&dev->wsm_mmu_vector);
+ mutex_init(&(dev->session_vector_lock));
+ mutex_init(&(dev->wsm_mmu_vector_lock));
+
+ return dev;
+}
+
+void mcore_device_cleanup(struct mcore_device_t *dev)
+{
+ struct session *session = NULL;
+ struct wsm *wsm;
+ struct list_head *pos, *q;
+
+ /*
+ * Delete all session objects. Usually this should not be needed
+ * as close_device() requires that all sessions have been closed before.
+ */
+ do {
+ session = NULL;
+ mutex_lock(&(dev->session_vector_lock));
+ if (!list_empty(&(dev->session_vector))) {
+ session = list_first_entry(&(dev->session_vector),
+ struct session, list);
+ list_del(&(session->list));
+ }
+ mutex_unlock(&(dev->session_vector_lock));
+ if (!session)
+ break;
+ session_cleanup(session);
+ } while (true);
+
+ /* Free all allocated WSM descriptors */
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
+ list_for_each_safe(pos, q, &(dev->wsm_mmu_vector)) {
+ wsm = list_entry(pos, struct wsm, list);
+ list_del(pos);
+ kfree(wsm);
+ }
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
+ connection_cleanup(dev->connection);
+
+ mcore_device_close(dev);
+ kfree(dev);
+}
+
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name)
+{
+ dev->instance = mobicore_open();
+ return (dev->instance != NULL);
+}
+
+void mcore_device_close(struct mcore_device_t *dev)
+{
+ mobicore_release(dev->instance);
+}
+
+bool mcore_device_has_sessions(struct mcore_device_t *dev)
+{
+ int ret = 0;
+ mutex_lock(&(dev->session_vector_lock));
+ ret = !list_empty(&dev->session_vector);
+ mutex_unlock(&(dev->session_vector_lock));
+ return ret;
+}
+
+bool mcore_device_create_new_session(struct mcore_device_t *dev,
+ uint32_t session_id,
+ struct connection *connection)
+{
+ /* Check if session_id already exists */
+ if (mcore_device_resolve_session_id(dev, session_id)) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ " session %u already exists", session_id);
+ return false;
+ }
+ struct session *session =
+ session_create(session_id, dev->instance, connection);
+ if (session == NULL)
+ return false;
+ mutex_lock(&(dev->session_vector_lock));
+ list_add_tail(&(session->list), &(dev->session_vector));
+ mutex_unlock(&(dev->session_vector_lock));
+ return true;
+}
+
+bool mcore_device_remove_session(struct mcore_device_t *dev,
+ uint32_t session_id)
+{
+ bool found = false;
+ struct session *session = NULL;
+ struct list_head *pos;
+
+ mutex_lock(&(dev->session_vector_lock));
+ list_for_each(pos, &dev->session_vector) {
+ session = list_entry(pos, struct session, list);
+ if (session->session_id == session_id) {
+ list_del(pos);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&(dev->session_vector_lock));
+ if (found)
+ session_cleanup(session);
+ return found;
+}
+
+struct session *mcore_device_resolve_session_id(struct mcore_device_t *dev,
+ uint32_t session_id)
+{
+ struct session *ret = NULL;
+ struct session *session;
+
+ /* Get session for session_id */
+ mutex_lock(&(dev->session_vector_lock));
+ list_for_each_entry(session, &dev->session_vector, list) {
+ if (session->session_id == session_id) {
+ ret = session;
+ break;
+ }
+ }
+ mutex_unlock(&(dev->session_vector_lock));
+ return ret;
+}
+
+struct wsm *mcore_device_allocate_contiguous_wsm(struct mcore_device_t *dev,
+ uint32_t len)
+{
+ struct wsm *wsm = NULL;
+ do {
+ if (len == 0)
+ break;
+
+ /* Allocate shared memory */
+ void *virt_addr;
+ uint32_t handle;
+ int ret = mobicore_allocate_wsm(dev->instance, len, &handle,
+ &virt_addr);
+ if (ret != 0)
+ break;
+
+ /* Register (vaddr) with device */
+ wsm = wsm_create(virt_addr, len, handle);
+ if (wsm == NULL) {
+ mobicore_free_wsm(dev->instance, handle);
+ break;
+ }
+
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
+ list_add_tail(&(wsm->list), &(dev->wsm_mmu_vector));
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
+
+ } while (0);
+
+ return wsm;
+}
+
+bool mcore_device_free_contiguous_wsm(struct mcore_device_t *dev,
+ struct wsm *wsm)
+{
+ bool ret = false;
+ struct wsm *tmp;
+ struct list_head *pos;
+
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
+ list_for_each(pos, &dev->wsm_mmu_vector) {
+ tmp = list_entry(pos, struct wsm, list);
+ if (tmp == wsm) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
+ if (ret) {
+ MCDRV_DBG_VERBOSE(mc_kapi,
+ "freeWsm virt_addr=0x%p, handle=%d",
+ wsm->virt_addr, wsm->handle);
+
+ /* ignore return code */
+ mobicore_free_wsm(dev->instance, wsm->handle);
+
+ list_del(pos);
+ kfree(wsm);
+ }
+ return ret;
+}
+
+struct wsm *mcore_device_find_contiguous_wsm(struct mcore_device_t *dev,
+ void *virt_addr)
+{
+ struct wsm *wsm, *candidate = NULL;
+
+ mutex_lock(&(dev->wsm_mmu_vector_lock));
+ list_for_each_entry(wsm, &dev->wsm_mmu_vector, list) {
+ if (virt_addr == wsm->virt_addr) {
+ candidate = wsm;
+ break;
+ }
+ }
+ mutex_unlock(&(dev->wsm_mmu_vector_lock));
+
+ return candidate;
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.h
new file mode 100644
index 000000000..8387aac60
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/device.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore client library device management.
+ *
+ * Device and Trustlet Session management Functions.
+ */
+#ifndef _MC_KAPI_DEVICE_H_
+#define _MC_KAPI_DEVICE_H_
+
+#include <linux/list.h>
+
+#include "connection.h"
+#include "session.h"
+#include "wsm.h"
+
+struct mcore_device_t {
+ /* MobiCore Trustlet session associated with the device */
+/* lock used to prevent concurrent add/del action on the session list */
+ struct mutex session_vector_lock;
+ struct list_head session_vector;
+/* lock used to prevent concurrent add/del action on the mmu table list */
+ struct mutex wsm_mmu_vector_lock;
+ struct list_head wsm_mmu_vector; /* WSM L2 or L3 Table */
+
+ uint32_t device_id; /* Device identifier */
+ struct connection *connection; /* The device connection */
+ struct mc_instance *instance; /* MobiCore Driver instance */
+
+ /* The list param for using the kernel lists */
+ struct list_head list;
+};
+
+struct mcore_device_t *mcore_device_create(
+ uint32_t device_id, struct connection *connection);
+void mcore_device_cleanup(struct mcore_device_t *dev);
+
+
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name);
+void mcore_device_close(struct mcore_device_t *dev);
+bool mcore_device_has_sessions(struct mcore_device_t *dev);
+bool mcore_device_create_new_session(
+ struct mcore_device_t *dev, uint32_t session_id,
+ struct connection *connection);
+bool mcore_device_remove_session(
+ struct mcore_device_t *dev, uint32_t session_id);
+struct session *mcore_device_resolve_session_id(
+ struct mcore_device_t *dev, uint32_t session_id);
+struct wsm *mcore_device_allocate_contiguous_wsm(
+ struct mcore_device_t *dev, uint32_t len);
+bool mcore_device_free_contiguous_wsm(
+ struct mcore_device_t *dev, struct wsm *wsm);
+struct wsm *mcore_device_find_contiguous_wsm(
+ struct mcore_device_t *dev, void *virt_addr);
+
+#endif /* _MC_KAPI_DEVICE_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcinq.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcinq.h
new file mode 100644
index 000000000..a12696e04
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcinq.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Notifications inform the MobiCore runtime environment that information is
+ * pending in a WSM buffer.
+ *
+ * The Trustlet Connector (TLC) and the corresponding Trustlet also utilize
+ * this buffer to notify each other about new data within the
+ * Trustlet Connector Interface (TCI).
+ *
+ * The buffer is set up as a queue, which means that more than one
+ * notification can be written to the buffer before the switch to the other
+ * world is performed. Each side therefore facilitates an incoming and an
+ * outgoing queue for communication with the other side.
+ *
+ * Notifications hold the session ID, which is used to reference the
+ * communication partner in the other world.
+ * So if, e.g., the TLC in the normal world wants to notify his Trustlet
+ * about new data in the TLC buffer
+ *
+ * Notification queue declarations.
+ */
+#ifndef _MCINQ_H_
+#define _MCINQ_H_
+
+/* Minimum and maximum count of elements in the notification queue */
+#define MIN_NQ_ELEM 1 /* Minimum notification queue elements. */
+#define MAX_NQ_ELEM 64 /* Maximum notification queue elements. */
+
+/* Compute notification queue size in bytes from its number of elements */
+#define NQ_SIZE(n) (2*(sizeof(struct notification_queue_header) \
+ + (n)*sizeof(struct notification)))
+
+/** \name NQ Length Defines
+ * Note that there is one queue for NWd->SWd and one queue for SWd->NWd
+ */
+/** Minimum size for the notification queue data structure */
+#define MIN_NQ_LEN NQ_SIZE(MIN_NQ_ELEM)
+/** Maximum size for the notification queue data structure */
+#define MAX_NQ_LEN NQ_SIZE(MAX_NQ_ELEM)
+
+/*
+ * MCP session ID is used when directly communicating with the MobiCore
+ * (e.g. for starting and stopping of Trustlets).
+ */
+#define SID_MCP 0
+/* Invalid session id is returned in case of an error. */
+#define SID_INVALID 0xffffffff
+
+/* Notification data structure. */
+struct notification {
+ uint32_t session_id; /* Session ID. */
+ int32_t payload; /* Additional notification info */
+};
+
+/*
+ * Notification payload codes.
+ * 0 indicated a plain simple notification,
+ * a positive value is a termination reason from the task,
+ * a negative value is a termination reason from MobiCore.
+ * Possible negative values are given below.
+ */
+enum notification_payload {
+ /* task terminated, but exit code is invalid */
+ ERR_INVALID_EXIT_CODE = -1,
+ /* task terminated due to session end, no exit code available */
+ ERR_SESSION_CLOSE = -2,
+ /* task terminated due to invalid operation */
+ ERR_INVALID_OPERATION = -3,
+ /* session ID is unknown */
+ ERR_INVALID_SID = -4,
+ /* session is not active */
+ ERR_SID_NOT_ACTIVE = -5
+};
+
+/*
+ * Declaration of the notification queue header.
+ * Layout as specified in the data structure specification.
+ */
+struct notification_queue_header {
+ uint32_t write_cnt; /* Write counter. */
+ uint32_t read_cnt; /* Read counter. */
+ uint32_t queue_size; /* Queue size. */
+};
+
+/*
+ * Queue struct which defines a queue object.
+ * The queue struct is accessed by the queue<operation> type of
+ * function. elementCnt must be a power of two and the power needs
+ * to be smaller than power of uint32_t (obviously 32).
+ */
+struct notification_queue {
+ /* Queue header. */
+ struct notification_queue_header hdr;
+ /* Notification elements. */
+ struct notification notification[MIN_NQ_ELEM];
+};
+
+#endif /* _MCINQ_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcuuid.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcuuid.h
new file mode 100644
index 000000000..eca5191ed
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/include/mcuuid.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MCUUID_H_
+#define _MCUUID_H_
+
+#define UUID_TYPE
+
+/* Universally Unique Identifier (UUID) according to ISO/IEC 11578. */
+struct mc_uuid_t {
+ uint8_t value[16]; /* Value of the UUID. */
+};
+
+#endif /* _MCUUID_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/main.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/main.c
new file mode 100644
index 000000000..5c1635ebe
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/main.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/netlink.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <net/sock.h>
+
+#include <linux/list.h>
+
+#include "connection.h"
+#include "common.h"
+
+#define MC_DAEMON_NETLINK 17
+
+struct mc_kernelapi_ctx {
+ struct sock *sk;
+ struct list_head peers;
+ atomic_t counter;
+ struct mutex peers_lock; /* peers lock */
+};
+
+struct mc_kernelapi_ctx *mod_ctx;
+
+/* Define a MobiCore Kernel API device structure for use with dev_debug() etc */
+struct device_driver mc_kernel_api_name = {
+ .name = "mckernelapi"
+};
+
+struct device mc_kernel_api_subname = {
+ .init_name = "", /* Set to 'mcapi' at mcapi_init() time */
+ .driver = &mc_kernel_api_name
+};
+
+struct device *mc_kapi = &mc_kernel_api_subname;
+
+/* get a unique ID */
+unsigned int mcapi_unique_id(void)
+{
+ return (unsigned int)atomic_inc_return(&(mod_ctx->counter));
+}
+
+static struct connection *mcapi_find_connection(uint32_t seq)
+{
+ struct connection *tmp, *conn = NULL;
+ struct list_head *pos;
+
+ mutex_lock(&(mod_ctx->peers_lock));
+
+ /* Get session for session_id */
+ list_for_each(pos, &mod_ctx->peers) {
+ tmp = list_entry(pos, struct connection, list);
+ if (tmp->sequence_magic == seq) {
+ conn = tmp;
+ break;
+ }
+ }
+
+ mutex_unlock(&(mod_ctx->peers_lock));
+
+ return conn;
+}
+
+void mcapi_insert_connection(struct connection *connection)
+{
+ mutex_lock(&(mod_ctx->peers_lock));
+
+ list_add_tail(&(connection->list), &(mod_ctx->peers));
+ connection->socket_descriptor = mod_ctx->sk;
+
+ mutex_unlock(&(mod_ctx->peers_lock));
+}
+
+void mcapi_remove_connection(uint32_t seq)
+{
+ struct connection *tmp;
+ struct list_head *pos, *q;
+
+ /*
+ * Delete all session objects. Usually this should not be needed as
+ * closeDevice() requires that all sessions have been closed before.
+ */
+ mutex_lock(&(mod_ctx->peers_lock));
+ list_for_each_safe(pos, q, &mod_ctx->peers) {
+ tmp = list_entry(pos, struct connection, list);
+ if (tmp->sequence_magic == seq) {
+ list_del(pos);
+ break;
+ }
+ }
+
+ mutex_unlock(&(mod_ctx->peers_lock));
+}
+
+static int mcapi_process(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct connection *c;
+ int seq;
+ int ret;
+
+ seq = nlh->nlmsg_seq;
+ MCDRV_DBG_VERBOSE(mc_kapi, "nlmsg len %d type %d pid 0x%X seq %d\n",
+ nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_pid, seq);
+ do {
+ c = mcapi_find_connection(seq);
+ if (!c) {
+ MCDRV_ERROR(mc_kapi,
+ "Invalid incoming connection - seq=%u!",
+ seq);
+ ret = -1;
+ break;
+ }
+
+ /* Pass the buffer to the appropriate connection */
+ connection_process(c, skb);
+
+ ret = 0;
+ } while (false);
+ return ret;
+}
+
+static void mcapi_callback(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ int len = skb->len;
+ int err = 0;
+
+ while (NLMSG_OK(nlh, len)) {
+ err = mcapi_process(skb, nlh);
+
+ /* if err or if this message says it wants a response */
+ if (err || (nlh->nlmsg_flags & NLM_F_ACK))
+ netlink_ack(skb, nlh, err);
+
+ nlh = NLMSG_NEXT(nlh, len);
+ }
+}
+
+static int __init mcapi_init(void)
+{
+#if defined MC_NETLINK_COMPAT || defined MC_NETLINK_COMPAT_V37
+ struct netlink_kernel_cfg cfg = {
+ .input = mcapi_callback,
+ };
+#endif
+
+ dev_set_name(mc_kapi, "mcapi");
+
+ dev_info(mc_kapi, "Mobicore API module initialized!\n");
+
+ mod_ctx = kzalloc(sizeof(struct mc_kernelapi_ctx), GFP_KERNEL);
+ if (mod_ctx == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return -ENOMEM;
+ }
+#ifdef MC_NETLINK_COMPAT_V37
+ mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
+ &cfg);
+#elif defined MC_NETLINK_COMPAT
+ mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
+ THIS_MODULE, &cfg);
+#else
+ /* start kernel thread */
+ mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK, 0,
+ mcapi_callback, NULL, THIS_MODULE);
+#endif
+
+ if (!mod_ctx->sk) {
+ MCDRV_ERROR(mc_kapi, "register of receive handler failed");
+ kfree(mod_ctx);
+ mod_ctx = NULL;
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&mod_ctx->peers);
+
+ mutex_init(&mod_ctx->peers_lock);
+ mutex_init(&device_mutex);
+
+ return 0;
+}
+
+static void __exit mcapi_exit(void)
+{
+ dev_info(mc_kapi, "Unloading Mobicore API module.\n");
+
+ if (mod_ctx->sk != NULL) {
+ netlink_kernel_release(mod_ctx->sk);
+ mod_ctx->sk = NULL;
+ }
+ kfree(mod_ctx);
+ mod_ctx = NULL;
+}
+
+module_init(mcapi_init);
+module_exit(mcapi_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MobiCore API driver");
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_api.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
new file mode 100644
index 000000000..7bf2a2f66
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MobiCore Driver API.
+ *
+ * The MobiCore (MC) Driver API provides access functions to the MobiCore
+ * runtime environment and the contained Trustlets.
+ */
+#ifndef _MOBICORE_DRIVER_API_H_
+#define _MOBICORE_DRIVER_API_H_
+
+#define __MC_CLIENT_LIB_API
+
+#include "mcuuid.h"
+
+/*
+ * Return values of MobiCore driver functions.
+ */
+enum mc_result {
+ /* Function call succeeded. */
+ MC_DRV_OK = 0,
+ /* No notification available. */
+ MC_DRV_NO_NOTIFICATION = 1,
+ /* Error during notification on communication level. */
+ MC_DRV_ERR_NOTIFICATION = 2,
+ /* Function not implemented. */
+ MC_DRV_ERR_NOT_IMPLEMENTED = 3,
+ /* No more resources available. */
+ MC_DRV_ERR_OUT_OF_RESOURCES = 4,
+ /* Driver initialization failed. */
+ MC_DRV_ERR_INIT = 5,
+ /* Unknown error. */
+ MC_DRV_ERR_UNKNOWN = 6,
+ /* The specified device is unknown. */
+ MC_DRV_ERR_UNKNOWN_DEVICE = 7,
+ /* The specified session is unknown.*/
+ MC_DRV_ERR_UNKNOWN_SESSION = 8,
+ /* The specified operation is not allowed. */
+ MC_DRV_ERR_INVALID_OPERATION = 9,
+ /* The response header from the MC is invalid. */
+ MC_DRV_ERR_INVALID_RESPONSE = 10,
+ /* Function call timed out. */
+ MC_DRV_ERR_TIMEOUT = 11,
+ /* Can not allocate additional memory. */
+ MC_DRV_ERR_NO_FREE_MEMORY = 12,
+ /* Free memory failed. */
+ MC_DRV_ERR_FREE_MEMORY_FAILED = 13,
+ /* Still some open sessions pending. */
+ MC_DRV_ERR_SESSION_PENDING = 14,
+ /* MC daemon not reachable */
+ MC_DRV_ERR_DAEMON_UNREACHABLE = 15,
+ /* The device file of the kernel module could not be opened. */
+ MC_DRV_ERR_INVALID_DEVICE_FILE = 16,
+ /* Invalid parameter. */
+ MC_DRV_ERR_INVALID_PARAMETER = 17,
+ /* Unspecified error from Kernel Module*/
+ MC_DRV_ERR_KERNEL_MODULE = 18,
+ /* Error during mapping of additional bulk memory to session. */
+ MC_DRV_ERR_BULK_MAPPING = 19,
+ /* Error during unmapping of additional bulk memory to session. */
+ MC_DRV_ERR_BULK_UNMAPPING = 20,
+ /* Notification received, exit code available. */
+ MC_DRV_INFO_NOTIFICATION = 21,
+ /* Set up of NWd connection failed. */
+ MC_DRV_ERR_NQ_FAILED = 22
+};
+
+/*
+ * Driver control command.
+ */
+enum mc_driver_ctrl {
+ /* Return the driver version */
+ MC_CTRL_GET_VERSION = 1
+};
+
+/*
+ * Structure of Session Handle, includes the Session ID and the Device ID the
+ * Session belongs to.
+ * The session handle will be used for session-based MobiCore communication.
+ * It will be passed to calls which address a communication end point in the
+ * MobiCore environment.
+ */
+struct mc_session_handle {
+ uint32_t session_id; /* MobiCore session ID */
+ uint32_t device_id; /* Device ID the session belongs to */
+};
+
+/*
+ * Information structure about additional mapped Bulk buffer between the
+ * Trustlet Connector (NWd) and the Trustlet (SWd). This structure is
+ * initialized from a Trustlet Connector by calling mc_map().
+ * In order to use the memory within a Trustlet the Trustlet Connector has to
+ * inform the Trustlet with the content of this structure via the TCI.
+ */
+struct mc_bulk_map {
+ /* The virtual address of the Bulk buffer regarding the address space
+ * of the Trustlet, already includes a possible offset! */
+ uint32_t secure_virt_addr;
+ uint32_t secure_virt_len; /* Length of the mapped Bulk buffer */
+};
+
+/* The default device ID */
+#define MC_DEVICE_ID_DEFAULT 0
+/* Wait infinite for a response of the MC. */
+#define MC_INFINITE_TIMEOUT ((int32_t)(-1))
+/* Do not wait for a response of the MC. */
+#define MC_NO_TIMEOUT 0
+/* TCI/DCI must not exceed 1MiB */
+#define MC_MAX_TCI_LEN 0x100000
+
+/**
+ * mc_open_device() - Open a new connection to a MobiCore device.
+ * @device_id: Identifier for the MobiCore device to be used.
+ * MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * Initializes all device specific resources required to communicate with a
+ * MobiCore instance located on the specified device in the system. If the
+ * device does not exist the function will return MC_DRV_ERR_UNKNOWN_DEVICE.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_ERR_INVALID_OPERATION: device already opened
+ * MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device_id unknown
+ * MC_DRV_ERR_INVALID_DEVICE_FILE: kernel module under /dev/mobicore
+ * cannot be opened
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_device(uint32_t device_id);
+
+/**
+ * mc_close_device() - Close the connection to a MobiCore device.
+ * @device_id: Identifier for the MobiCore device.
+ *
+ * When closing a device, active sessions have to be closed beforehand.
+ * Resources associated with the device will be released.
+ * The device may be opened again after it has been closed.
+ *
+ * MC_DEVICE_ID_DEFAULT refers to the default device.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
+ * MC_DRV_ERR_SESSION_PENDING: a session is still open
+ * MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_device(uint32_t device_id);
+
+/**
+ * mc_open_session() - Open a new session to a Trustlet.
+ * @session: On success, the session data will be returned
+ * @uuid: UUID of the Trustlet to be opened
+ * @tci: TCI buffer for communicating with the Trustlet
+ * @tci_len: Length of the TCI buffer. Maximum allowed value
+ * is MC_MAX_TCI_LEN
+ *
+ * The Trustlet with the given UUID has to be available in the flash filesystem.
+ *
+ * Write MCP open message to buffer and notify MobiCore about the availability
+ * of a new command.
+ *
+ * Waits till the MobiCore responses with the new session ID (stored in the MCP
+ * buffer).
+ *
+ * Note that session.device_id has to be the device id of an opened device.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: session parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
+ * MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon socket occur
+ * MC_DRV_ERR_NQ_FAILED: daemon returns an error
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_open_session(
+ struct mc_session_handle *session, const struct mc_uuid_t *uuid,
+ uint8_t *tci, uint32_t tci_len);
+
+/**
+ * mc_close_session() - Close a Trustlet session.
+ * @session: Session to be closed.
+ *
+ * Closes the specified MobiCore session. The call will block until the
+ * session has been closed.
+ *
+ * Device device_id has to be opened in advance.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: session parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
+ * MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
+ * MC_DRV_ERR_INVALID_DEVICE_FILE: daemon cannot open Trustlet file
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_close_session(
+ struct mc_session_handle *session);
+
+/**
+ * mc_notify() - Notify a session.
+ * @session: The session to be notified.
+ *
+ * Notifies the session end point about available message data.
+ * If the session parameter is correct, notify will always succeed.
+ * Corresponding errors can only be received by mc_wait_notification().
+ *
+ * A session has to be opened in advance.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: session parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_notify(struct mc_session_handle *session);
+
+/**
+ * mc_wait_notification() - Wait for a notification.
+ * @session: The session the notification should correspond to.
+ * @timeout: Time in milliseconds to wait
+ * (MC_NO_TIMEOUT : direct return, > 0 : milliseconds,
+ * MC_INFINITE_TIMEOUT : wait infinitely)
+ *
+ * Wait for a notification issued by the MobiCore for a specific session.
+ * The timeout parameter specifies the number of milliseconds the call will wait
+ * for a notification.
+ *
+ * If the caller passes 0 as timeout value the call will immediately return.
+ * If timeout value is below 0 the call will block until a notification for the
+ * session has been received.
+ *
+ * If timeout is below 0, call will block.
+ *
+ * Caller has to trust the other side to send a notification to wake him up
+ * again.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_ERR_TIMEOUT: no notification arrived in time
+ * MC_DRV_INFO_NOTIFICATION: a problem with the session was
+ * encountered. Get more details with
+ * mc_get_session_error_code()
+ * MC_DRV_ERR_NOTIFICATION: a problem with the socket occurred
+ * MC_DRV_INVALID_PARAMETER: a parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_wait_notification(
+ struct mc_session_handle *session, int32_t timeout);
+
+/**
+ * mc_malloc_wsm() - Allocate a block of world shared memory (WSM).
+ * @device_id: The ID of an opened device to retrieve the WSM from.
+ * @align: The alignment (number of pages) of the memory block
+ * (e.g. 0x00000001 for 4kb).
+ * @len: Length of the block in bytes.
+ * @wsm: Virtual address of the world shared memory block.
+ * @wsm_flags: Platform specific flags describing the memory to
+ * be allocated.
+ *
+ * The MC driver allocates a contiguous block of memory which can be used as
+ * WSM.
+ * This implicates that the allocated memory is aligned according to the
+ * alignment parameter.
+ *
+ * Always returns a buffer of size WSM_SIZE aligned to 4K.
+ *
+ * Align and wsm_flags are currently ignored
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: a parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id is invalid
+ * MC_DRV_ERR_NO_FREE_MEMORY: no more contiguous memory is
+ * available in this size or for this
+ * process
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_malloc_wsm(
+ uint32_t device_id,
+ uint32_t align,
+ uint32_t len,
+ uint8_t **wsm,
+ uint32_t wsm_flags
+);
+
+/**
+ * mc_free_wsm() - Free a block of world shared memory (WSM).
+ * @device_id: The ID to which the given address belongs
+ * @wsm: Address of WSM block to be freed
+ *
+ * The MC driver will free a block of world shared memory (WSM) previously
+ * allocated with mc_malloc_wsm(). The caller has to assure that the address
+ * handed over to the driver is a valid WSM address.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: a parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: when device id is invalid
+ * MC_DRV_ERR_FREE_MEMORY_FAILED: on failure
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_free_wsm(uint32_t device_id,
+ uint8_t *wsm);
+
+/**
+ *mc_map() - Map additional bulk buffer between a Trustlet Connector (TLC)
+ * and the Trustlet (TL) for a session
+ * @session: Session handle with information of the device_id and
+ * the session_id. The given buffer is mapped to the
+ * session specified in the sessionHandle
+ * @buf: Virtual address of a memory portion (relative to TLC)
+ * to be shared with the Trustlet, already includes a
+ * possible offset!
+ * @len: length of buffer block in bytes.
+ * @map_info: Information structure about the mapped Bulk buffer
+ * between the TLC (NWd) and the TL (SWd).
+ *
+ * Memory allocated in user space of the TLC can be mapped as additional
+ * communication channel (besides TCI) to the Trustlet. Limitation of the
+ * Trustlet memory structure apply: only 6 chunks can be mapped with a maximum
+ * chunk size of 1 MiB each.
+ *
+ * It is up to the application layer (TLC) to inform the Trustlet
+ * about the additional mapped bulk memory.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: a parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
+ * MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
+ * MC_DRV_ERR_BULK_MAPPING: buf is already uses as bulk buffer or
+ * when registering the buffer failed
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_map(
+ struct mc_session_handle *session, void *buf, uint32_t len,
+ struct mc_bulk_map *map_info);
+
+/**
+ * mc_unmap() - Remove additional mapped bulk buffer between Trustlet Connector
+ * (TLC) and the Trustlet (TL) for a session
+ * @session: Session handle with information of the device_id and
+ * the session_id. The given buffer is unmapped from the
+ * session specified in the sessionHandle.
+ * @buf: Virtual address of a memory portion (relative to TLC)
+ * shared with the TL, already includes a possible offset!
+ * @map_info: Information structure about the mapped Bulk buffer
+ * between the TLC (NWd) and the TL (SWd)
+ *
+ * The bulk buffer will immediately be unmapped from the session context.
+ *
+ * The application layer (TLC) must inform the TL about unmapping of the
+ * additional bulk memory before calling mc_unmap!
+ *
+ * The clientlib currently ignores the len field in map_info.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: a parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
+ * MC_DRV_ERR_DAEMON_UNREACHABLE: problems with daemon occur
+ * MC_DRV_ERR_BULK_UNMAPPING: buf was not registered earlier
+ * or when unregistering failed
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_unmap(
+ struct mc_session_handle *session, void *buf,
+ struct mc_bulk_map *map_info);
+
+/**
+ * mc_get_session_error_code() - Get additional error information of the last
+ * error that occurred on a session.
+ * @session: Session handle with information of the device_id and
+ * the session_id
+ * @last_error: >0 Trustlet has terminated itself with this value,
+ * <0 Trustlet is dead because of an error within the
+ * MobiCore (e.g. Kernel exception). See also MCI
+ * definition.
+ *
+ * After the request the stored error code will be deleted.
+ *
+ * Return codes:
+ * MC_DRV_OK: operation completed successfully
+ * MC_DRV_INVALID_PARAMETER: a parameter is invalid
+ * MC_DRV_ERR_UNKNOWN_SESSION: session id is invalid
+ * MC_DRV_ERR_UNKNOWN_DEVICE: device id of session is invalid
+ */
+__MC_CLIENT_LIB_API enum mc_result mc_get_session_error_code(
+ struct mc_session_handle *session, int32_t *last_error);
+
+#endif /* _MOBICORE_DRIVER_API_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
new file mode 100644
index 000000000..4e6ba0ddf
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MOBICORE_DRIVER_CMD_H_
+#define _MOBICORE_DRIVER_CMD_H_
+
+#include "mcuuid.h"
+
+enum mc_drv_cmd_t {
+ MC_DRV_CMD_PING = 0,
+ MC_DRV_CMD_GET_INFO = 1,
+ MC_DRV_CMD_OPEN_DEVICE = 2,
+ MC_DRV_CMD_CLOSE_DEVICE = 3,
+ MC_DRV_CMD_NQ_CONNECT = 4,
+ MC_DRV_CMD_OPEN_SESSION = 5,
+ MC_DRV_CMD_CLOSE_SESSION = 6,
+ MC_DRV_CMD_NOTIFY = 7,
+ MC_DRV_CMD_MAP_BULK_BUF = 8,
+ MC_DRV_CMD_UNMAP_BULK_BUF = 9
+};
+
+
+enum mc_drv_rsp_t {
+ MC_DRV_RSP_OK = 0,
+ MC_DRV_RSP_FAILED = 1,
+ MC_DRV_RSP_DEVICE_NOT_OPENED = 2,
+ MC_DRV_RSP_DEVICE_ALREADY_OPENED = 3,
+ MC_DRV_RSP_COMMAND_NOT_ALLOWED = 4,
+ MC_DRV_INVALID_DEVICE_NAME = 5,
+ MC_DRV_RSP_MAP_BULK_ERRO = 6,
+ MC_DRV_RSP_TRUSTLET_NOT_FOUND = 7,
+ MC_DRV_RSP_PAYLOAD_LENGTH_ERROR = 8,
+};
+
+
+struct mc_drv_command_header_t {
+ uint32_t command_id;
+};
+
+struct mc_drv_response_header_t {
+ uint32_t response_id;
+};
+
+#define MC_DEVICE_ID_DEFAULT 0 /* The default device ID */
+
+struct mc_drv_cmd_open_device_payload_t {
+ uint32_t device_id;
+};
+
+struct mc_drv_cmd_open_device_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_open_device_payload_t payload;
+};
+
+
+struct mc_drv_rsp_open_device_payload_t {
+ /* empty */
+};
+
+struct mc_drv_rsp_open_device_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_open_device_payload_t payload;
+};
+
+struct mc_drv_cmd_close_device_t {
+ struct mc_drv_command_header_t header;
+ /*
+ * no payload here because close has none.
+ * If we use an empty struct, C++ will count it as 4 bytes.
+ * This will write too much into the socket at write(cmd,sizeof(cmd))
+ */
+};
+
+
+struct mc_drv_rsp_close_device_payload_t {
+ /* empty */
+};
+
+struct mc_drv_rsp_close_device_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_close_device_payload_t payload;
+};
+
+struct mc_drv_cmd_open_session_payload_t {
+ uint32_t device_id;
+ struct mc_uuid_t uuid;
+ uint32_t tci;
+ uint32_t handle;
+ uint32_t len;
+};
+
+struct mc_drv_cmd_open_session_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_open_session_payload_t payload;
+};
+
+
+struct mc_drv_rsp_open_session_payload_t {
+ uint32_t session_id;
+ uint32_t device_session_id;
+ uint32_t session_magic;
+};
+
+struct mc_drv_rsp_open_session_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_open_session_payload_t payload;
+};
+
+struct mc_drv_cmd_close_session_payload_t {
+ uint32_t session_id;
+};
+
+struct mc_drv_cmd_close_session_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_close_session_payload_t payload;
+};
+
+
+struct mc_drv_rsp_close_session_payload_t {
+ /* empty */
+};
+
+struct mc_drv_rsp_close_session_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_close_session_payload_t payload;
+};
+
+struct mc_drv_cmd_notify_payload_t {
+ uint32_t session_id;
+};
+
+struct mc_drv_cmd_notify_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_notify_payload_t payload;
+};
+
+
+struct mc_drv_rsp_notify_payload_t {
+ /* empty */
+};
+
+struct mc_drv_rsp_notify_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_notify_payload_t payload;
+};
+
+struct mc_drv_cmd_map_bulk_mem_payload_t {
+ uint32_t session_id;
+ uint32_t handle;
+ uint32_t rfu;
+ uint32_t offset_payload;
+ uint32_t len_bulk_mem;
+};
+
+struct mc_drv_cmd_map_bulk_mem_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_map_bulk_mem_payload_t payload;
+};
+
+
+struct mc_drv_rsp_map_bulk_mem_payload_t {
+ uint32_t session_id;
+ uint32_t secure_virtual_adr;
+};
+
+struct mc_drv_rsp_map_bulk_mem_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_map_bulk_mem_payload_t payload;
+};
+
+struct mc_drv_cmd_unmap_bulk_mem_payload_t {
+ uint32_t session_id;
+ uint32_t handle;
+ uint32_t secure_virtual_adr;
+ uint32_t len_bulk_mem;
+};
+
+struct mc_drv_cmd_unmap_bulk_mem_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_unmap_bulk_mem_payload_t payload;
+};
+
+
+struct mc_drv_rsp_unmap_bulk_mem_payload_t {
+ uint32_t response_id;
+ uint32_t session_id;
+};
+
+struct mc_drv_rsp_unmap_bulk_mem_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_unmap_bulk_mem_payload_t payload;
+};
+
+struct mc_drv_cmd_nqconnect_payload_t {
+ uint32_t device_id;
+ uint32_t session_id;
+ uint32_t device_session_id;
+ uint32_t session_magic; /* Random data */
+};
+
+struct mc_drv_cmd_nqconnect_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_nqconnect_payload_t payload;
+};
+
+
+struct mc_drv_rsp_nqconnect_payload_t {
+ /* empty; */
+};
+
+struct mc_drv_rsp_nqconnect_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_nqconnect_payload_t payload;
+};
+
+union mc_drv_command_t {
+ struct mc_drv_command_header_t header;
+ struct mc_drv_cmd_open_device_t mc_drv_cmd_open_device;
+ struct mc_drv_cmd_close_device_t mc_drv_cmd_close_device;
+ struct mc_drv_cmd_open_session_t mc_drv_cmd_open_session;
+ struct mc_drv_cmd_close_session_t mc_drv_cmd_close_session;
+ struct mc_drv_cmd_nqconnect_t mc_drv_cmd_nqconnect;
+ struct mc_drv_cmd_notify_t mc_drv_cmd_notify;
+ struct mc_drv_cmd_map_bulk_mem_t mc_drv_cmd_map_bulk_mem;
+ struct mc_drv_cmd_unmap_bulk_mem_t mc_drv_cmd_unmap_bulk_mem;
+};
+
+union mc_drv_response_t {
+ struct mc_drv_response_header_t header;
+ struct mc_drv_rsp_open_device_t mc_drv_rsp_open_device;
+ struct mc_drv_rsp_close_device_t mc_drv_rsp_close_device;
+ struct mc_drv_rsp_open_session_t mc_drv_rsp_open_session;
+ struct mc_drv_rsp_close_session_t mc_drv_rsp_close_session;
+ struct mc_drv_rsp_nqconnect_t mc_drv_rsp_nqconnect;
+ struct mc_drv_rsp_notify_t mc_drv_rsp_notify;
+ struct mc_drv_rsp_map_bulk_mem_t mc_drv_rsp_map_bulk_mem;
+ struct mc_drv_rsp_unmap_bulk_mem_t mc_drv_rsp_unmap_bulk_mem;
+};
+
+#endif /* _MOBICORE_DRIVER_CMD_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.c b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.c
new file mode 100644
index 000000000..639564673
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include "mc_kernel_api.h"
+#include "public/mobicore_driver_api.h"
+
+#include "session.h"
+
+struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
+ void *virt_addr, uint32_t len, uint32_t handle)
+{
+ struct bulk_buffer_descriptor *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (desc == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
+ desc->virt_addr = virt_addr;
+ desc->len = len;
+ desc->handle = handle;
+
+ return desc;
+}
+
+struct session *session_create(
+ uint32_t session_id, void *instance, struct connection *connection)
+{
+ struct session *session;
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (session == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+ return NULL;
+ }
+ session->session_id = session_id;
+ session->instance = instance;
+ session->notification_connection = connection;
+ session->session_info.last_error = SESSION_ERR_NO;
+ session->session_info.state = SESSION_STATE_INITIAL;
+
+ INIT_LIST_HEAD(&(session->bulk_buffer_descriptors));
+ mutex_init(&(session->bulk_buffer_descriptors_lock));
+ return session;
+}
+
+void session_cleanup(struct session *session)
+{
+ struct bulk_buffer_descriptor *bulk_buf_descr;
+ struct list_head *pos, *q;
+
+ /* Unmap still mapped buffers */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
+ list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
+ bulk_buf_descr =
+ list_entry(pos, struct bulk_buffer_descriptor, list);
+
+ MCDRV_DBG_VERBOSE(mc_kapi,
+ "handle= %d",
+ bulk_buf_descr->handle);
+
+ /* ignore any error, as we cannot do anything in this case. */
+ int ret = mobicore_unmap_vmem(session->instance,
+ bulk_buf_descr->handle);
+ if (ret != 0)
+ MCDRV_DBG_ERROR(mc_kapi,
+ "mobicore_unmap_vmem failed: %d", ret);
+
+ list_del(pos);
+ kfree(bulk_buf_descr);
+ }
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+
+ /* Finally delete notification connection */
+ connection_cleanup(session->notification_connection);
+ kfree(session);
+}
+
+void session_set_error_info(struct session *session, int32_t err)
+{
+ session->session_info.last_error = err;
+}
+
+int32_t session_get_last_err(struct session *session)
+{
+ return session->session_info.last_error;
+}
+
+struct bulk_buffer_descriptor *session_add_bulk_buf(struct session *session,
+ void *buf, uint32_t len)
+{
+ struct bulk_buffer_descriptor *bulk_buf_descr = NULL;
+ struct bulk_buffer_descriptor *tmp;
+ struct list_head *pos;
+ int ret = 0;
+
+ /*
+ * Search bulk buffer descriptors for existing vAddr
+ * At the moment a virtual address can only be added one time
+ */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
+ list_for_each(pos, &session->bulk_buffer_descriptors) {
+ tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
+ if (tmp->virt_addr == buf) {
+ ret = -1;
+ break;
+ }
+ }
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+ if (ret == -1)
+ return NULL;
+ do {
+ /*
+ * Prepare the interface structure for memory registration in
+ * Kernel Module
+ */
+ uint32_t handle;
+
+ int ret = mobicore_map_vmem(session->instance, buf, len,
+ &handle);
+
+ if (ret != 0) {
+ MCDRV_DBG_ERROR(mc_kapi,
+ "mobicore_map_vmem failed, ret=%d",
+ ret);
+ break;
+ }
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "handle=%d", handle);
+
+ /* Create new descriptor */
+ bulk_buf_descr =
+ bulk_buffer_descriptor_create(buf, len, handle);
+ if (bulk_buf_descr == NULL) {
+ /* Discard the returned value */
+ (void)mobicore_unmap_vmem(session->instance, handle);
+ break;
+ }
+
+ /* Add to vector of descriptors */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
+ list_add_tail(&(bulk_buf_descr->list),
+ &(session->bulk_buffer_descriptors));
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+ } while (0);
+
+ return bulk_buf_descr;
+}
+
+bool session_remove_bulk_buf(struct session *session, void *virt_addr)
+{
+ bool ret = true;
+ struct bulk_buffer_descriptor *bulk_buf = NULL;
+ struct bulk_buffer_descriptor *tmp;
+ struct list_head *pos, *q;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "Virtual Address = 0x%p",
+ virt_addr);
+
+ /* Search and remove bulk buffer descriptor */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
+ list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
+ tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
+ if (tmp->virt_addr == virt_addr) {
+ bulk_buf = tmp;
+ list_del(pos);
+ break;
+ }
+ }
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+
+ if (bulk_buf == NULL) {
+ MCDRV_DBG_ERROR(mc_kapi, "Virtual Address not found");
+ ret = false;
+ } else {
+ MCDRV_DBG_VERBOSE(mc_kapi, "Wsm handle=%d",
+ bulk_buf->handle);
+
+ /* ignore any error, as we cannot do anything */
+ int ret = mobicore_unmap_vmem(session->instance,
+ bulk_buf->handle);
+ if (ret != 0)
+ MCDRV_DBG_ERROR(mc_kapi,
+ "mobicore_unmap_vmem failed: %d", ret);
+
+ kfree(bulk_buf);
+ }
+
+ return ret;
+}
+
+uint32_t session_find_bulk_buf(struct session *session, void *virt_addr)
+{
+ struct bulk_buffer_descriptor *tmp;
+ struct list_head *pos, *q;
+ uint32_t handle = 0;
+
+ MCDRV_DBG_VERBOSE(mc_kapi, "Virtual Address = 0x%p",
+ virt_addr);
+
+ /* Search and return buffer descriptor handle */
+ mutex_lock(&(session->bulk_buffer_descriptors_lock));
+ list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
+ tmp = list_entry(pos, struct bulk_buffer_descriptor, list);
+ if (tmp->virt_addr == virt_addr) {
+ handle = tmp->handle;
+ break;
+ }
+ }
+ mutex_unlock(&(session->bulk_buffer_descriptors_lock));
+ return handle;
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.h
new file mode 100644
index 000000000..0babd60c2
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/session.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2015 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MC_KAPI_SESSION_H_
+#define _MC_KAPI_SESSION_H_
+
+#include "common.h"
+
+#include <linux/list.h>
+#include "connection.h"
+
+
+struct bulk_buffer_descriptor {
+ void *virt_addr; /* The VA of the Bulk buffer */
+ uint32_t len; /* Length of the Bulk buffer */
+ uint32_t handle;
+
+ /* The list param for using the kernel lists*/
+ struct list_head list;
+};
+
+struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
+ void *virt_addr,
+ uint32_t len,
+ uint32_t handle
+);
+
+/*
+ * Session states.
+ * At the moment not used !!
+ */
+enum session_state {
+ SESSION_STATE_INITIAL,
+ SESSION_STATE_OPEN,
+ SESSION_STATE_TRUSTLET_DEAD
+};
+
+#define SESSION_ERR_NO 0 /* No session error */
+
+/*
+ * Session information structure.
+ * The information structure is used to hold the state of the session, which
+ * will limit further actions for the session.
+ * Also the last error code will be stored till it's read.
+ */
+struct session_information {
+ enum session_state state; /* Session state */
+ int32_t last_error; /* Last error of session */
+};
+
+
+struct session {
+ struct mc_instance *instance;
+
+ /* Descriptors of additional bulk buffer of a session */
+ struct list_head bulk_buffer_descriptors;
+/* lock used to prevent concurrent add/delete action on the descriptor list */
+ struct mutex bulk_buffer_descriptors_lock;
+
+ /* Information about session */
+ struct session_information session_info;
+
+ uint32_t session_id;
+ struct connection *notification_connection;
+
+ /* The list param for using the kernel lists */
+ struct list_head list;
+};
+
+struct session *session_create(
+ uint32_t session_id,
+ void *instance,
+ struct connection *connection
+);
+
+void session_cleanup(struct session *session);
+
+/*
+ * session_add_bulk_buf() - Add address information of additional bulk
+ * buffer memory to session and register virtual
+ * memory in kernel module
+ * @session: Session information structure
+ * @buf: The virtual address of bulk buffer.
+ * @len: Length of bulk buffer.
+ *
+ * The virtual address can only be added one time. If the virtual
+ * address already exist, NULL is returned.
+ *
+ * On success the actual Bulk buffer descriptor with all address information
+ * is returned, NULL if an error occurs.
+ */
+struct bulk_buffer_descriptor *session_add_bulk_buf(
+ struct session *session, void *buf, uint32_t len);
+
+/*
+ * session_remove_bulk_buf() - Remove address information of additional bulk
+ * buffer memory from session and unregister
+ * virtual memory in kernel module
+ * @session: Session information structure
+ * @buf: The virtual address of the bulk buffer
+ *
+ * Returns true on success
+ */
+bool session_remove_bulk_buf(struct session *session, void *buf);
+
+
+/*
+ * session_find_bulk_buf() - Find the handle of the bulk buffer for this
+ * session
+ *
+ * @session: Session information structure
+ * @buf: The virtual address of bulk buffer.
+ *
+ * On success the actual Bulk buffer handle is returned, 0
+ * if an error occurs.
+ */
+uint32_t session_find_bulk_buf(struct session *session, void *virt_addr);
+
+/*
+ * session_set_error_info() - Set additional error information of the last
+ * error that occurred.
+ * @session: Session information structure
+ * @err: The actual error
+ */
+void session_set_error_info(struct session *session, int32_t err);
+
+/*
+ * session_get_last_err() - Get additional error information of the last
+ * error that occurred.
+ * @session: Session information structure
+ *
+ * After request the information is set to SESSION_ERR_NO.
+ *
+ * Returns the last stored error code or SESSION_ERR_NO
+ */
+int32_t session_get_last_err(struct session *session);
+
+#endif /* _MC_KAPI_SESSION_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/wsm.h b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/wsm.h
new file mode 100644
index 000000000..b8d4b26c6
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/MobiCoreKernelApi/wsm.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * World shared memory definitions.
+ */
+#ifndef _MC_KAPI_WSM_H_
+#define _MC_KAPI_WSM_H_
+
+#include "common.h"
+#include <linux/list.h>
+
+struct wsm {
+ void *virt_addr;
+ uint32_t len;
+ uint32_t handle;
+ struct list_head list;
+};
+
+#endif /* _MC_KAPI_WSM_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/Out/Public/tui_ioctl.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/Out/Public/tui_ioctl.h
new file mode 100644
index 000000000..def13393d
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/Out/Public/tui_ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TUI_IOCTL_H_
+#define TUI_IOCTL_H_
+
+
+
+/* Response header */
+struct tlc_tui_response_t {
+ uint32_t id;
+ uint32_t return_code;
+};
+
+/* Command IDs */
+#define TLC_TUI_CMD_NONE 0
+#define TLC_TUI_CMD_START_ACTIVITY 1
+#define TLC_TUI_CMD_STOP_ACTIVITY 2
+
+/* Return codes */
+#define TLC_TUI_OK 0
+#define TLC_TUI_ERROR 1
+#define TLC_TUI_ERR_UNKNOWN_CMD 2
+
+
+/*
+ * defines for the ioctl TUI driver module function call from user space.
+ */
+#define TUI_DEV_NAME "t-base-tui"
+
+#define TUI_IO_MAGIC 't'
+
+#define TUI_IO_NOTIFY _IOW(TUI_IO_MAGIC, 1, uint32_t)
+#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, uint32_t)
+#define TUI_IO_ACK _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
+
+#endif /* TUI_IOCTL_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/build_tag.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/build_tag.h
new file mode 100644
index 000000000..9eda58638
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/build_tag.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define MOBICORE_COMPONENT_BUILD_TAG \
+ "t-base-Mediatek-Armv8-Android-302C-V005-20151127_180020_32"
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/dciTui.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/dciTui.h
new file mode 100644
index 000000000..5bee85cad
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/dciTui.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DCITUI_H__
+#define __DCITUI_H__
+
+/**< Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmd_id) (((uint32_t)(cmd_id)) | RSP_ID_MASK)
+#define IS_CMD(cmd_id) ((((uint32_t)(cmd_id)) & RSP_ID_MASK) == 0)
+#define IS_RSP(cmd_id) ((((uint32_t)(cmd_id)) & RSP_ID_MASK) == RSP_ID_MASK)
+#define CMD_ID_FROM_RSP(rsp_id) (rsp_id & (~RSP_ID_MASK))
+
+/**
+ * Return codes of driver commands.
+ */
+#define TUI_DCI_OK 0x00030000
+#define TUI_DCI_ERR_UNKNOWN_CMD 0x00030001
+#define TUI_DCI_ERR_NOT_SUPPORTED 0x00030002
+#define TUI_DCI_ERR_INTERNAL_ERROR 0x00030003
+#define TUI_DCI_ERR_NO_RESPONSE 0x00030004
+#define TUI_DCI_ERR_BAD_PARAMETERS 0x00030005
+#define TUI_DCI_ERR_NO_EVENT 0x00030006
+#define TUI_DCI_ERR_OUT_OF_DISPLAY 0x00030007
+/* ... add more error codes when needed */
+
+
+/**
+ * Notification ID's for communication Trustlet Connector -> Driver.
+ */
+#define NOT_TUI_NONE 0
+/* NWd system event that closes the current TUI session*/
+#define NOT_TUI_CANCEL_EVENT 1
+
+
+/**
+ * Command ID's for communication Driver -> Trustlet Connector.
+ */
+#define CMD_TUI_SW_NONE 0
+/* SWd request to NWd to start the TUI session */
+#define CMD_TUI_SW_OPEN_SESSION 1
+/* SWd request to NWd to close the TUI session */
+#define CMD_TUI_SW_CLOSE_SESSION 2
+/* SWd request to NWd stop accessing display controller */
+#define CMD_TUI_SW_STOP_DISPLAY 3
+
+
+/**
+ * Maximum data length.
+ */
+#define MAX_DCI_DATA_LEN (1024*100)
+
+/* Command payload */
+struct tui_alloc_data_t {
+ uint32_t alloc_size;
+ uint32_t num_of_buff;
+};
+
+union dci_cmd_payload_t {
+ struct tui_alloc_data_t alloc_data;
+};
+
+/* Command */
+struct dci_command_t {
+ volatile uint32_t id;
+ union dci_cmd_payload_t payload;
+};
+
+/* TUI frame buffer (output from NWd) */
+typedef struct {
+ uint64_t pa;
+} tuiAllocBuffer_t;
+
+#define MAX_DCI_BUFFER_NUMBER 4
+
+/* Response */
+struct dci_response_t {
+ volatile uint32_t id; /* must be command ID | RSP_ID_MASK */
+ uint32_t return_code;
+ union {
+ tuiAllocBuffer_t alloc_buffer[MAX_DCI_BUFFER_NUMBER];
+ };
+};
+
+/* DCI buffer */
+struct tui_dci_msg_t {
+ volatile uint32_t nwd_notif; /* Notification from TlcTui to DrTui */
+ struct dci_command_t cmd_nwd; /* Command from DrTui to TlcTui */
+ struct dci_response_t nwd_rsp; /* Response from TlcTui to DrTui */
+};
+
+/**
+ * Driver UUID. Update accordingly after reserving UUID
+ */
+#define DR_TUI_UUID { { 7, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
+
+#endif /* __DCITUI_H__ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/t-base-tui.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/t-base-tui.h
new file mode 100644
index 000000000..4f34a286e
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/inc/t-base-tui.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TBASE_TUI_H__
+#define __TBASE_TUI_H__
+
+#define TRUSTEDUI_MODE_OFF 0x00
+#define TRUSTEDUI_MODE_ALL 0xff
+#define TRUSTEDUI_MODE_TUI_SESSION 0x01
+#define TRUSTEDUI_MODE_VIDEO_SECURED 0x02
+#define TRUSTEDUI_MODE_INPUT_SECURED 0x04
+
+#ifdef CONFIG_TRUSTONIC_TRUSTED_UI
+
+int trustedui_blank_inc(void);
+int trustedui_blank_dec(void);
+int trustedui_blank_get_counter(void);
+void trustedui_blank_set_counter(int counter);
+
+int trustedui_get_current_mode(void);
+void trustedui_set_mode(int mode);
+int trustedui_set_mask(int mask);
+int trustedui_clear_mask(int mask);
+
+#endif /* CONFIG_TRUSTONIC_TRUSTED_UI */
+
+#endif /* __TBASE_TUI_H__ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/main.c b/drivers/misc/mediatek/gud/302c/gud/TlcTui/main.c
new file mode 100644
index 000000000..8cb82108e
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/main.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "mobicore_driver_api.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+#include "build_tag.h"
+
+/*static int tui_dev_major_number = 122; */
+
+/*module_param(tui_dev_major_number, int, 0000); */
+/*MODULE_PARM_DESC(major, */
+/* "The device major number used to register a unique char device driver"); */
+
+/* Static variables */
+static struct cdev tui_cdev;
+
+static long tui_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int __user *uarg = (int __user *)arg;
+
+ if (_IOC_TYPE(cmd) != TUI_IO_MAGIC)
+ return -EINVAL;
+
+ pr_info("t-base-tui module: ioctl 0x%x ", cmd);
+
+ switch (cmd) {
+ case TUI_IO_NOTIFY:
+ pr_info("TUI_IO_NOTIFY\n");
+
+ if (tlc_notify_event(arg))
+ ret = 0;
+ else
+ ret = -EFAULT;
+ break;
+
+ case TUI_IO_WAITCMD: {
+ uint32_t cmd_id;
+
+ pr_info("TUI_IO_WAITCMD\n");
+
+ ret = tlc_wait_cmd(&cmd_id);
+ if (ret)
+ return ret;
+
+ /* Write command id to user */
+ pr_debug("IOCTL: sending command %d to user.\n", cmd_id);
+
+ if (copy_to_user(uarg, &cmd_id, sizeof(cmd_id)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+ /* Reset the value of the command, to ensure that commands sent
+ * due to interrupted wait_for_completion are TLC_TUI_CMD_NONE.
+ */
+ reset_global_command_id();
+
+ break;
+ }
+
+ case TUI_IO_ACK: {
+ struct tlc_tui_response_t rsp_id;
+
+ pr_info("TUI_IO_ACK\n");
+
+ /* Read user response */
+ if (copy_from_user(&rsp_id, uarg, sizeof(rsp_id)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+ pr_debug("IOCTL: User completed command %d.\n", rsp_id.id);
+ ret = tlc_ack_cmd(&rsp_id);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ default:
+ pr_info("undefined!\n");
+ return -ENOTTY;
+ }
+
+ return ret;
+}
+
+static const struct file_operations tui_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = tui_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tui_ioctl,
+#endif
+};
+
+/*--------------------------------------------------------------------------- */
+static int __init tlc_tui_init(void)
+{
+ pr_info("Loading t-base-tui module.\n");
+ pr_debug("\n=============== Running TUI Kernel TLC ===============\n");
+ pr_info("%s\n", MOBICORE_COMPONENT_BUILD_TAG);
+
+ dev_t devno;
+ int err;
+ static struct class *tui_class;
+
+ err = alloc_chrdev_region(&devno, 0, 1, TUI_DEV_NAME);
+ if (err) {
+ pr_debug(KERN_ERR "Unable to allocate Trusted UI device number\n");
+ return err;
+ }
+
+ cdev_init(&tui_cdev, &tui_fops);
+ tui_cdev.owner = THIS_MODULE;
+ /* tui_cdev.ops = &tui_fops; */
+
+ err = cdev_add(&tui_cdev, devno, 1);
+ if (err) {
+ pr_debug(KERN_ERR "Unable to add Trusted UI char device\n");
+ unregister_chrdev_region(devno, 1);
+ return err;
+ }
+
+ tui_class = class_create(THIS_MODULE, "tui_cls");
+ device_create(tui_class, NULL, devno, NULL, TUI_DEV_NAME);
+
+ if (!hal_tui_init())
+ return -1;
+
+ return 0;
+}
+
+static void __exit tlc_tui_exit(void)
+{
+ pr_info("Unloading t-base-tui module.\n");
+
+ unregister_chrdev_region(tui_cdev.dev, 1);
+ cdev_del(&tui_cdev);
+
+ hal_tui_exit();
+}
+
+module_init(tlc_tui_init);
+module_exit(tlc_tui_exit);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("<t-base TUI");
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/public/tui_ioctl.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/public/tui_ioctl.h
new file mode 100644
index 000000000..def13393d
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/public/tui_ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TUI_IOCTL_H_
+#define TUI_IOCTL_H_
+
+
+
+/* Response header */
+struct tlc_tui_response_t {
+ uint32_t id;
+ uint32_t return_code;
+};
+
+/* Command IDs */
+#define TLC_TUI_CMD_NONE 0
+#define TLC_TUI_CMD_START_ACTIVITY 1
+#define TLC_TUI_CMD_STOP_ACTIVITY 2
+
+/* Return codes */
+#define TLC_TUI_OK 0
+#define TLC_TUI_ERROR 1
+#define TLC_TUI_ERR_UNKNOWN_CMD 2
+
+
+/*
+ * defines for the ioctl TUI driver module function call from user space.
+ */
+#define TUI_DEV_NAME "t-base-tui"
+
+#define TUI_IO_MAGIC 't'
+
+#define TUI_IO_NOTIFY _IOW(TUI_IO_MAGIC, 1, uint32_t)
+#define TUI_IO_WAITCMD _IOR(TUI_IO_MAGIC, 2, uint32_t)
+#define TUI_IO_ACK _IOW(TUI_IO_MAGIC, 3, struct tlc_tui_response_t)
+
+#endif /* TUI_IOCTL_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.c b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.c
new file mode 100644
index 000000000..7ea5f1bc0
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include "mobicore_driver_api.h"
+#include "tui_ioctl.h"
+#include "tlcTui.h"
+#include "dciTui.h"
+#include "tui-hal.h"
+
+
+/* ------------------------------------------------------------- */
+/* Globals */
+struct tui_dci_msg_t *dci;
+DECLARE_COMPLETION(dci_comp);
+DECLARE_COMPLETION(io_comp);
+
+/* ------------------------------------------------------------- */
+/* Static */
+static const uint32_t DEVICE_ID = MC_DEVICE_ID_DEFAULT;
+static struct task_struct *thread_id;
+static uint32_t g_cmd_id = TLC_TUI_CMD_NONE;
+static struct mc_session_handle dr_session_handle = {0, 0};
+static struct tlc_tui_response_t g_user_rsp = {
+ TLC_TUI_CMD_NONE, TLC_TUI_ERR_UNKNOWN_CMD};
+/* Functions */
+
+/* ------------------------------------------------------------- */
+static bool tlc_open_driver(void)
+{
+ bool ret = false;
+ enum mc_result mc_ret;
+ struct mc_uuid_t dr_uuid = DR_TUI_UUID;
+
+ /* Allocate WSM buffer for the DCI */
+ mc_ret = mc_malloc_wsm(DEVICE_ID, 0, sizeof(struct tui_dci_msg_t),
+ (uint8_t **)&dci, 0);
+ if (MC_DRV_OK != mc_ret) {
+ pr_debug("ERROR %s: Allocation of DCI WSM failed: %d\n",
+ __func__, mc_ret);
+ return false;
+ }
+
+ /* Clear the session handle */
+ memset(&dr_session_handle, 0, sizeof(dr_session_handle));
+ /* The device ID (default device is used */
+ dr_session_handle.device_id = DEVICE_ID;
+ /* Open session with the Driver */
+ mc_ret = mc_open_session(&dr_session_handle, &dr_uuid, (uint8_t *)dci,
+ (uint32_t)sizeof(struct tui_dci_msg_t));
+ if (MC_DRV_OK != mc_ret) {
+ pr_debug("ERROR %s: Open driver session failed: %d\n",
+ __func__, mc_ret);
+ ret = false;
+ } else {
+ ret = true;
+ }
+
+ return ret;
+}
+
+
+/* ------------------------------------------------------------- */
+static bool tlc_open(void)
+{
+ bool ret = false;
+ enum mc_result mc_ret;
+
+ /* Open the tbase device */
+ pr_debug("%s: Opening tbase device\n", __func__);
+ mc_ret = mc_open_device(DEVICE_ID);
+
+ /* In case the device is already open, mc_open_device will return an
+ * error (MC_DRV_ERR_INVALID_OPERATION). But in this case, we can
+ * continue, even though mc_open_device returned an error. Stop in all
+ * other case of error
+ */
+ if (MC_DRV_OK != mc_ret && MC_DRV_ERR_INVALID_OPERATION != mc_ret) {
+ pr_debug("ERROR %s: Error %d opening device\n", __func__,
+ mc_ret);
+ return false;
+ }
+
+ pr_debug("%s: Opening driver session\n", __func__);
+ ret = tlc_open_driver();
+
+ return ret;
+}
+
+
+/* ------------------------------------------------------------- */
+static void tlc_wait_cmd_from_driver(void)
+{
+ uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
+
+ /* Wait for a command from secure driver */
+ ret = mc_wait_notification(&dr_session_handle, -1);
+ if (MC_DRV_OK == ret)
+ pr_debug("tlc_wait_cmd_from_driver: Got a command\n");
+ else
+ pr_debug("ERROR %s: mc_wait_notification() failed: %d\n",
+ __func__, ret);
+}
+
+
+static uint32_t send_cmd_to_user(uint32_t command_id)
+{
+ uint32_t ret = TUI_DCI_ERR_NO_RESPONSE;
+
+ /* Init shared variables */
+ g_cmd_id = command_id;
+ g_user_rsp.id = TLC_TUI_CMD_NONE;
+ g_user_rsp.return_code = TLC_TUI_ERR_UNKNOWN_CMD;
+
+ /* Give way to ioctl thread */
+ complete(&dci_comp);
+ pr_debug("send_cmd_to_user: give way to ioctl thread\n");
+
+ /* Wait for ioctl thread to complete */
+ wait_for_completion(&io_comp);
+ pr_debug("send_cmd_to_user: Got an answer from ioctl thread.\n");
+ reinit_completion(&io_comp);
+
+ /* Check id of the cmd processed by ioctl thread (paranoia) */
+ if (g_user_rsp.id != command_id) {
+ pr_debug("ERROR %s: Wrong response id 0x%08x iso 0x%08x\n",
+ __func__, dci->nwd_rsp.id, RSP_ID(command_id));
+ ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ } else {
+ /* retrieve return code */
+ switch (g_user_rsp.return_code) {
+ case TLC_TUI_OK:
+ ret = TUI_DCI_OK;
+ break;
+ case TLC_TUI_ERROR:
+ ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ break;
+ case TLC_TUI_ERR_UNKNOWN_CMD:
+ ret = TUI_DCI_ERR_UNKNOWN_CMD;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* ------------------------------------------------------------- */
+static void tlc_process_cmd(void)
+{
+ uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ uint32_t command_id = CMD_TUI_SW_NONE;
+
+ if (NULL == dci) {
+ pr_debug("ERROR %s: DCI has not been set up properly - exiting"\
+ "\n", __func__);
+ return;
+ } else {
+ command_id = dci->cmd_nwd.id;
+ }
+
+ /* Warn if previous response was not acknowledged */
+ if (CMD_TUI_SW_NONE == command_id) {
+ pr_debug("ERROR %s: Notified without command\n", __func__);
+ return;
+ } else {
+ if (dci->nwd_rsp.id != CMD_TUI_SW_NONE)
+ pr_debug("%s: Warning, previous response not ack\n",
+ __func__);
+ }
+
+ /* Handle command */
+ switch (command_id) {
+ case CMD_TUI_SW_OPEN_SESSION:
+ pr_debug("%s: CMD_TUI_SW_OPEN_SESSION.\n", __func__);
+
+ /* Start android TUI activity */
+ ret = send_cmd_to_user(TLC_TUI_CMD_START_ACTIVITY);
+ if (TUI_DCI_OK != ret)
+ break;
+
+ /* allocate TUI frame buffer */
+ ret = hal_tui_alloc(dci->nwd_rsp.alloc_buffer,
+ dci->cmd_nwd.payload.alloc_data.alloc_size,
+ dci->cmd_nwd.payload.alloc_data.num_of_buff);
+
+ if (TUI_DCI_OK != ret)
+ break;
+
+ /* Deactivate linux UI drivers */
+ ret = hal_tui_deactivate();
+
+ if (TUI_DCI_OK != ret) {
+ hal_tui_free();
+ send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY);
+ break;
+ }
+
+ break;
+
+ case CMD_TUI_SW_CLOSE_SESSION:
+ pr_debug("%s: CMD_TUI_SW_CLOSE_SESSION.\n", __func__);
+
+ /* Activate linux UI drivers */
+ ret = hal_tui_activate();
+
+ hal_tui_free();
+
+ /* Stop android TUI activity */
+ ret = send_cmd_to_user(TLC_TUI_CMD_STOP_ACTIVITY);
+ break;
+
+ default:
+ pr_debug("ERROR %s: Unknown command %d\n",
+ __func__, command_id);
+ break;
+ }
+
+ /* Fill in response to SWd, fill ID LAST */
+ pr_debug("%s: return 0x%08x to cmd 0x%08x\n",
+ __func__, ret, command_id);
+ dci->nwd_rsp.return_code = ret;
+ dci->nwd_rsp.id = RSP_ID(command_id);
+
+ /* Acknowledge command */
+ dci->cmd_nwd.id = CMD_TUI_SW_NONE;
+
+ /* Notify SWd */
+ pr_debug("DCI RSP NOTIFY CORE\n");
+ ret = mc_notify(&dr_session_handle);
+ if (MC_DRV_OK != ret)
+ pr_debug("ERROR %s: Notify failed: %d\n", __func__, ret);
+}
+
+
+/* ------------------------------------------------------------- */
+static void tlc_close_driver(void)
+{
+ enum mc_result ret;
+
+ /* Close session with the Driver */
+ ret = mc_close_session(&dr_session_handle);
+ if (MC_DRV_OK != ret) {
+ pr_debug("ERROR %s: Closing driver session failed: %d\n",
+ __func__, ret);
+ }
+}
+
+
+/* ------------------------------------------------------------- */
+static void tlc_close(void)
+{
+ enum mc_result ret;
+
+ pr_debug("%s: Closing driver session\n", __func__);
+ tlc_close_driver();
+
+ pr_debug("%s: Closing tbase\n", __func__);
+ /* Close the tbase device */
+ ret = mc_close_device(DEVICE_ID);
+ if (MC_DRV_OK != ret) {
+ pr_debug("ERROR %s: Closing tbase device failed: %d\n",
+ __func__, ret);
+ }
+}
+
+void reset_global_command_id(void)
+{
+ g_cmd_id = TLC_TUI_CMD_NONE;
+}
+
+/* ------------------------------------------------------------- */
+bool tlc_notify_event(uint32_t event_type)
+{
+ bool ret = false;
+ enum mc_result result;
+
+ if (NULL == dci) {
+ pr_debug("ERROR tlc_notify_event: DCI has not been set up "\
+ "properly - exiting\n");
+ return false;
+ }
+
+ /* Prepare notification message in DCI */
+ pr_debug("tlc_notify_event: event_type = %d\n", event_type);
+ dci->nwd_notif = event_type;
+
+ /* Signal the Driver */
+ pr_debug("DCI EVENT NOTIFY CORE\n");
+ result = mc_notify(&dr_session_handle);
+ if (MC_DRV_OK != result) {
+ pr_debug("ERROR tlc_notify_event: mc_notify failed: %d\n",
+ result);
+ ret = false;
+ } else {
+ ret = true;
+ }
+
+ return ret;
+}
+
+/* ------------------------------------------------------------- */
+/**
+ */
+int main_thread(void *uarg)
+{
+ pr_debug("main_thread: TlcTui start!\n");
+
+ /* Open session on the driver */
+ if (!tlc_open()) {
+ pr_debug("ERROR main_thread: open driver failed!\n");
+ return 1;
+ }
+
+ /* TlcTui main thread loop */
+ for (;;) {
+ /* Wait for a command from the DrTui on DCI*/
+ tlc_wait_cmd_from_driver();
+ /* Something has been received, process it. */
+ tlc_process_cmd();
+ }
+
+ /* Close tlc. Note that this frees the DCI pointer.
+ * Do not use this pointer after tlc_close().*/
+ tlc_close();
+
+ return 0;
+}
+
+int tlc_wait_cmd(uint32_t *cmd_id)
+{
+ /* Create the TlcTui Main thread and start secure driver (only
+ 1st time) */
+ if (dr_session_handle.session_id == 0) {
+ thread_id = kthread_run(main_thread, NULL, "dci_thread");
+ if (!thread_id) {
+ pr_debug(KERN_ERR "Unable to start Trusted UI main thread\n");
+ return -EFAULT;
+ }
+ }
+
+ /* Wait for signal from DCI handler */
+ /* In case of an interrupted sys call, return with -EINTR */
+ if (wait_for_completion_interruptible(&dci_comp)) {
+ pr_debug("interrupted by system\n");
+ return -ERESTARTSYS;
+ }
+ reinit_completion(&dci_comp);
+
+ *cmd_id = g_cmd_id;
+ return 0;
+}
+
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id)
+{
+ g_user_rsp = *rsp_id;
+
+ /* Send signal to DCI */
+ complete(&io_comp);
+
+ return 0;
+}
+
+/** @} */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.h
new file mode 100644
index 000000000..bd8eb3036
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tlcTui.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef TLCTUI_H_
+#define TLCTUI_H_
+
+void reset_global_command_id(void);
+int tlc_wait_cmd(uint32_t *cmd_id);
+int tlc_ack_cmd(struct tlc_tui_response_t *rsp_id);
+bool tlc_notify_event(uint32_t event_type);
+
+#endif /* TLCTUI_H_ */
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/trustedui.c b/drivers/misc/mediatek/gud/302c/gud/TlcTui/trustedui.c
new file mode 100644
index 000000000..91e27ac26
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/trustedui.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2013 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * File : trustedui.c
+ * Created : 26-02-2010
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+//#include <linux/t-base-tui.h>
+#include <t-base-tui.h>
+
+static int trustedui_mode = TRUSTEDUI_MODE_OFF;
+static int trustedui_blank_counter;
+
+static DEFINE_SPINLOCK(trustedui_lock);
+
+int trustedui_blank_inc(void)
+{
+ unsigned long flags;
+ int newvalue;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ newvalue = ++trustedui_blank_counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_inc);
+
+int trustedui_blank_dec(void)
+{
+ unsigned long flags;
+ int newvalue;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ newvalue = --trustedui_blank_counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_dec);
+
+int trustedui_blank_get_counter(void)
+{
+ unsigned long flags;
+ int newvalue;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ newvalue = trustedui_blank_counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return newvalue;
+}
+EXPORT_SYMBOL(trustedui_blank_get_counter);
+
+void trustedui_blank_set_counter(int counter)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ trustedui_blank_counter = counter;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_blank_set_counter);
+
+int trustedui_get_current_mode(void)
+{
+ unsigned long flags;
+ int mode;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ mode = trustedui_mode;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(trustedui_get_current_mode);
+
+void trustedui_set_mode(int mode)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ trustedui_mode = mode;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+}
+EXPORT_SYMBOL(trustedui_set_mode);
+
+
+int trustedui_set_mask(int mask)
+{
+ unsigned long flags;
+ int mode;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ mode = trustedui_mode |= mask;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(trustedui_set_mask);
+
+int trustedui_clear_mask(int mask)
+{
+ unsigned long flags;
+ int mode;
+
+ spin_lock_irqsave(&trustedui_lock, flags);
+ mode = trustedui_mode &= ~mask;
+ spin_unlock_irqrestore(&trustedui_lock, flags);
+
+ return mode;
+}
+EXPORT_SYMBOL(trustedui_clear_mask);
+
+MODULE_AUTHOR("Trustonic Limited");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("<t-base TUI");
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal.h b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal.h
new file mode 100644
index 000000000..778b49338
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _TUI_HAL_H_
+#define _TUI_HAL_H_
+
+#include <linux/types.h>
+
+uint32_t hal_tui_init(void);
+void hal_tui_exit(void);
+uint32_t hal_tui_alloc(tuiAllocBuffer_t allocbuffer[MAX_DCI_BUFFER_NUMBER],
+ size_t allocsize, uint32_t number);
+void hal_tui_free(void);
+uint32_t hal_tui_deactivate(void);
+uint32_t hal_tui_activate(void);
+
+#endif
diff --git a/drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal_mt6735.c b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal_mt6735.c
new file mode 100644
index 000000000..7b93981f6
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/TlcTui/tui-hal_mt6735.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fb.h>
+
+#include <t-base-tui.h>
+
+#include "tui_ioctl.h"
+#include "dciTui.h"
+#include "tlcTui.h"
+#include "tui-hal.h"
+#include <linux/delay.h>
+
+#include <mach/mt_clkmgr.h>
+
+
+#define TUI_MEMPOOL_SIZE 0
+
+/* Extrac memory size required for TUI driver */
+#define TUI_EXTRA_MEM_SIZE (0x200000)
+
+struct tui_mempool {
+ void *va;
+ unsigned long pa;
+ size_t size;
+};
+
+/* for TUI EINT mepping to Security World */
+extern void gt1x_power_reset(void);
+extern int mt_eint_set_deint(int eint_num, int irq_num);
+extern int mt_eint_clr_deint(int eint_num);
+extern int tpd_reregister_from_tui(void);
+extern int tpd_enter_tui(void);
+extern int tpd_exit_tui(void);
+extern int i2c_tui_enable_clock(void);
+extern int i2c_tui_disable_clock(void);
+#ifndef CONFIG_CMA
+extern int secmem_api_alloc_pa(u32 alignment, u32 size, u32 *refcount, u32 *sec_handle,
+ const uint8_t *owner, uint32_t id);
+#endif
+extern int secmem_api_unref_pa(u32 sec_handle, const uint8_t *owner, uint32_t id);
+extern int tui_region_offline(phys_addr_t *pa, unsigned long *size);
+extern int tui_region_online(void);
+static struct tui_mempool g_tui_mem_pool;
+static u32 g_tui_secmem_handle;
+extern int display_enter_tui(void);
+extern int display_exit_tui(void);
+
+
+/* basic implementation of a memory pool for TUI framebuffer. This
+ * implementation is using kmalloc, for the purpose of demonstration only.
+ * A real implementation might prefer using more advanced allocator, like ION,
+ * in order not to exhaust memory available to kmalloc
+ */
+static bool allocate_tui_memory_pool(struct tui_mempool *pool, size_t size)
+{
+ bool ret = false;
+ void *tui_mem_pool = NULL;
+
+ pr_info("%s %s:%d\n", __func__, __FILE__, __LINE__);
+ if (!size) {
+ pr_debug("TUI frame buffer: nothing to allocate.");
+ return true;
+ }
+
+ tui_mem_pool = kmalloc(size, GFP_KERNEL);
+ if (!tui_mem_pool) {
+ pr_debug("ERROR Could not allocate TUI memory pool");
+ } else if (ksize(tui_mem_pool) < size) {
+ pr_debug("ERROR TUI memory pool allocated size is too small."\
+ " required=%zd allocated=%zd",
+ size, ksize(tui_mem_pool));
+ kfree(tui_mem_pool);
+ } else {
+ pool->va = tui_mem_pool;
+ pool->pa = virt_to_phys(tui_mem_pool);
+ pool->size = ksize(tui_mem_pool);
+ ret = true;
+ }
+ return ret;
+}
+
+static void free_tui_memory_pool(struct tui_mempool *pool)
+{
+ kfree(pool->va);
+ memset(pool, 0, sizeof(*pool));
+}
+
+/**
+ * hal_tui_init() - integrator specific initialization for kernel module
+ *
+ * This function is called when the kernel module is initialized, either at
+ * boot time, if the module is built statically in the kernel, or when the
+ * kernel is dynamically loaded if the module is built as a dynamic kernel
+ * module. This function may be used by the integrator, for instance, to get a
+ * memory pool that will be used to allocate the secure framebuffer and work
+ * buffer for TUI sessions.
+ *
+ * Return: must return 0 on success, or non-zero on error. If the function
+ * returns an error, the module initialization will fail.
+ */
+uint32_t hal_tui_init(void)
+{
+ /* Allocate memory pool for the framebuffer
+ */
+ if (!allocate_tui_memory_pool(&g_tui_mem_pool, TUI_MEMPOOL_SIZE))
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+
+ return TUI_DCI_OK;
+}
+
+/**
+ * hal_tui_exit() - integrator specific exit code for kernel module
+ *
+ * This function is called when the kernel module exit. It is called when the
+ * kernel module is unloaded, for a dynamic kernel module, and never called for
+ * a module built into the kernel. It can be used to free any resources
+ * allocated by hal_tui_init().
+ */
+void hal_tui_exit(void)
+{
+ /* delete memory pool if any */
+ if (g_tui_mem_pool.va)
+ free_tui_memory_pool(&g_tui_mem_pool);
+}
+
+/**
+ * hal_tui_alloc() - allocator for secure framebuffer and working buffer
+ * @allocbuffer: putput parameter that the allocator fills with the physical
+ * addresses of the allocated buffers
+ * @allocsize: size of the buffer to allocate. All the buffer are of the
+ * same size
+ * @number: Number to allocate.
+ *
+ * This function is called when the module receives a CMD_TUI_SW_OPEN_SESSION
+ * message from the secure driver. The function must allocate 'number'
+ * buffer(s) of physically contiguous memory, where the length of each buffer
+ * is at least 'allocsize' bytes. The physical address of each buffer must be
+ * stored in the array of structure 'allocbuffer' which is provided as
+ * arguments.
+ *
+ * Physical address of the first buffer must be put in allocate[0].pa , the
+ * second one on allocbuffer[1].pa, and so on. The function must return 0 on
+ * success, non-zero on error. For integrations where the framebuffer is not
+ * allocated by the Normal World, this function should do nothing and return
+ * success (zero).
+ */
+uint32_t hal_tui_alloc(
+ tuiAllocBuffer_t *allocbuffer, size_t allocsize, uint32_t number)
+{
+ uint32_t ret = TUI_DCI_ERR_INTERNAL_ERROR;
+#ifndef CONFIG_CMA
+ u32 refcount = 0;
+ u32 sec_pa = 0;
+#else
+ phys_addr_t pa = 0;
+ unsigned long size = 0;
+#endif
+
+ if (!allocbuffer) {
+ pr_debug("%s(%d): allocbuffer is null\n", __func__, __LINE__);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ pr_debug("%s(%d): Requested size=0x%zx x %u chunks\n",
+ __func__, __LINE__, allocsize, number);
+
+ if ((size_t)allocsize == 0) {
+ pr_debug("%s(%d): Nothing to allocate\n", __func__, __LINE__);
+ return TUI_DCI_OK;
+ }
+
+ if (number != 2) {
+ pr_debug("%s(%d): Unexpected number of buffers requested\n",
+ __func__, __LINE__);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+#ifndef CONFIG_CMA
+ ret = secmem_api_alloc_pa(4096, allocsize*number+TUI_EXTRA_MEM_SIZE, &refcount,
+ &sec_pa, __func__, __LINE__);
+ pr_err("%s: sec_pa=%x ret=%d", __func__, sec_pa, (int)ret);
+ if (ret) {
+ pr_err("%s(%d): secmem_api_alloc failed! ret=%d\n",
+ __func__, __LINE__, ret);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+#else
+ ret = tui_region_offline(&pa, &size);
+ pr_debug("tui pa=0x%x, size=0x%lx", (uint32_t)pa, size);
+ if (ret) {
+ pr_err("%s(%d): tui_region_offline failed!\n",
+ __func__, __LINE__);
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+#endif
+
+ if (ret == 0) {
+#ifndef CONFIG_CMA
+ g_tui_secmem_handle = sec_pa;
+ allocbuffer[0].pa = (uint64_t) sec_pa;
+ allocbuffer[1].pa = (uint64_t) (sec_pa + allocsize);
+#else
+ g_tui_secmem_handle = (u32)pa;
+ allocbuffer[0].pa = (uint64_t) pa;
+ allocbuffer[1].pa = (uint64_t) (pa + allocsize);
+#endif
+ } else {
+ return TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ pr_debug("tui-hal allocasize=%ld number=%d, extra=%d\n", allocsize, number, TUI_EXTRA_MEM_SIZE);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[0].pa);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[1].pa);
+
+ return TUI_DCI_OK;
+
+#if 0
+ if ((size_t)(allocsize*number) <= g_tui_mem_pool.size) {
+ /* requested buffer fits in the memory pool */
+ allocbuffer[0].pa = (uint64_t) g_tui_mem_pool.pa;
+ allocbuffer[1].pa = (uint64_t) (g_tui_mem_pool.pa +
+ g_tui_mem_pool.size/2);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[0].pa);
+ pr_debug("%s(%d): allocated at %llx\n", __func__, __LINE__,
+ allocbuffer[1].pa);
+ ret = TUI_DCI_OK;
+ } else {
+ /* requested buffer is bigger than the memory pool, return an
+ error */
+ pr_debug("%s(%d): Memory pool too small\n", __func__, __LINE__);
+ ret = TUI_DCI_ERR_INTERNAL_ERROR;
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * hal_tui_free() - free memory allocated by hal_tui_alloc()
+ *
+ * This function is called at the end of the TUI session, when the TUI module
+ * receives the CMD_TUI_SW_CLOSE_SESSION message. The function should free the
+ * buffers allocated by hal_tui_alloc(...).
+ */
+void hal_tui_free(void)
+{
+ pr_info("[TUI-HAL] hal_tui_free()\n");
+ if (g_tui_secmem_handle) {
+#ifndef CONFIG_CMA
+ secmem_api_unref_pa(g_tui_secmem_handle, __func__, __LINE__);
+#else
+ tui_region_online();
+#endif
+ g_tui_secmem_handle = 0;
+ }
+}
+
+/**
+ * hal_tui_deactivate() - deactivate Normal World display and input
+ *
+ * This function should stop the Normal World display and, if necessary, Normal
+ * World input. It is called when a TUI session is opening, before the Secure
+ * World takes control of display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+
+uint32_t hal_tui_deactivate(void)
+{
+ int ret = TUI_DCI_OK, tmp;
+ pr_info("[TUI-HAL] hal_tui_deactivate()\n");
+ /* Set linux TUI flag */
+ trustedui_set_mask(TRUSTEDUI_MODE_TUI_SESSION);
+ pr_info("TDDP/[TUI-HAL] %s()\n", __func__);
+ /*
+ * Stop NWd display here. After this function returns, SWd will take
+ * control of the display and input. Therefore the NWd should no longer
+ * access it
+ * This can be done by calling the fb_blank(FB_BLANK_POWERDOWN) function
+ * on the appropriate framebuffer device
+ */
+
+ tpd_enter_tui();
+#if 0
+ enable_clock(MT_CG_PERI_I2C0, "i2c");
+ enable_clock(MT_CG_PERI_I2C1, "i2c");
+ enable_clock(MT_CG_PERI_I2C2, "i2c");
+ enable_clock(MT_CG_PERI_I2C3, "i2c");
+ enable_clock(MT_CG_PERI_APDMA, "i2c");
+#endif
+ i2c_tui_enable_clock();
+
+ //gt1x_power_reset();
+
+ tmp = display_enter_tui();
+ if(tmp) {
+ pr_debug("TDDP/[TUI-HAL] %s() failed because display\n", __func__);
+ ret = TUI_DCI_ERR_OUT_OF_DISPLAY;
+ }
+
+
+ trustedui_set_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
+ TRUSTEDUI_MODE_INPUT_SECURED);
+
+ pr_info("TDDP/[TUI-HAL] %s()\n", __func__);
+
+ return ret;
+}
+
+/**
+ * hal_tui_activate() - restore Normal World display and input after a TUI
+ * session
+ *
+ * This function should enable Normal World display and, if necessary, Normal
+ * World input. It is called after a TUI session, after the Secure World has
+ * released the display and input.
+ *
+ * Return: must return 0 on success, non-zero otherwise.
+ */
+uint32_t hal_tui_activate(void)
+{
+ pr_info("[TUI-HAL] hal_tui_activate()\n");
+ /* Protect NWd */
+ trustedui_clear_mask(TRUSTEDUI_MODE_VIDEO_SECURED|
+ TRUSTEDUI_MODE_INPUT_SECURED);
+
+ pr_info("TDDP %s()\n", __func__);
+
+ /*
+ * Restart NWd display here. TUI session has ended, and therefore the
+ * SWd will no longer use display and input.
+ * This can be done by calling the fb_blank(FB_BLANK_UNBLANK) function
+ * on the appropriate framebuffer device
+ */
+ /* Clear linux TUI flag */
+
+ tpd_exit_tui();
+#if 0
+ disable_clock(MT_CG_PERI_I2C0, "i2c");
+ disable_clock(MT_CG_PERI_I2C1, "i2c");
+ disable_clock(MT_CG_PERI_I2C2, "i2c");
+ disable_clock(MT_CG_PERI_I2C3, "i2c");
+ disable_clock(MT_CG_PERI_APDMA, "i2c");
+#endif
+ i2c_tui_disable_clock();
+
+ display_exit_tui();
+
+
+ trustedui_set_mode(TRUSTEDUI_MODE_OFF);
+
+ return TUI_DCI_OK;
+}
+
+int __weak tui_region_offline(phys_addr_t *pa, unsigned long *size)
+{
+ return -1;
+}
+
+int __weak tui_region_online(void)
+{
+ return 0;
+}
+
+int __weak tpd_reregister_from_tui(void)
+{
+ return 0;
+}
+
+int __weak tpd_enter_tui(void)
+{
+ return 0;
+}
+
+int __weak tpd_exit_tui(void)
+{
+ return 0;
+}
+
+int __weak display_enter_tui(void)
+{
+ return 0;
+}
+
+int __weak display_exit_tui(void)
+{
+ return 0;
+}
+
+int __weak i2c_tui_enable_clock(void)
+{
+ return 0;
+}
+
+int __weak i2c_tui_disable_clock(void)
+{
+ return 0;
+}
diff --git a/drivers/misc/mediatek/gud/302c/gud/build_tag.h b/drivers/misc/mediatek/gud/302c/gud/build_tag.h
new file mode 100644
index 000000000..9eda58638
--- /dev/null
+++ b/drivers/misc/mediatek/gud/302c/gud/build_tag.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2013-2014 TRUSTONIC LIMITED
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define MOBICORE_COMPONENT_BUILD_TAG \
+ "t-base-Mediatek-Armv8-Android-302C-V005-20151127_180020_32"
diff --git a/drivers/misc/mediatek/gud/Kconfig b/drivers/misc/mediatek/gud/Kconfig
new file mode 100644
index 000000000..fabdcd50e
--- /dev/null
+++ b/drivers/misc/mediatek/gud/Kconfig
@@ -0,0 +1,68 @@
+#
+# TRUSTONIC TEE configuration
+#
+config TRUSTONIC_TEE_SUPPORT
+ bool "Enable Trustonic TEE Support"
+ default n
+ ---help---
+ Enable Trustonic TEE Support
+
+config TRUSTONIC_TEE_VERSION
+ string "TRUSTONIC TEE Version"
+ depends on TRUSTONIC_TEE_SUPPORT
+ default "302c" if (ARCH_MT6735 || ARCH_MT6735M || ARCH_MT6753 || ARCH_MT6580)
+
+config MT_TRUSTONIC_TEE_DEBUGFS
+ bool "Enable Trustonic TEE debugfs"
+ default n
+ ---help---
+ Enable Trustonic TEE debugfs
+
+#
+# MobiCore configuration
+#
+config MOBICORE_DRIVER
+ tristate "MobiCore Driver"
+ depends on TRUSTONIC_TEE_SUPPORT
+ default n
+ ---help---
+ Enable Linux Kernel MobiCore Support
+
+config MOBICORE_DEBUG
+ bool "MobiCore Module debug mode"
+ depends on MOBICORE_DRIVER
+ default n
+ ---help---
+ Enable Debug mode in the MobiCore Driver.
+ It enables printing information about mobicore operations
+
+config MOBICORE_VERBOSE
+ bool "MobiCore Module verbose debug mode"
+ depends on MOBICORE_DEBUG
+ default n
+ ---help---
+ Enable Verbose Debug mode in the MobiCore Driver.
+ It enables printing extra information about mobicore operations
+ Beware: this is only useful for debuging deep in the driver because
+ it prints too much logs
+
+config MOBICORE_API
+ tristate "Linux MobiCore API"
+ depends on MOBICORE_DRIVER
+ default n
+ ---help---
+ Enable Linux Kernel MobiCore API
+
+config TRUSTONIC_TRUSTED_UI
+ tristate "<t-base TUI"
+ depends on TRUSTONIC_TEE_SUPPORT
+ default n
+ ---help---
+ Enable <t-base Trusted User Interface
+
+config TRUSTONIC_TRUSTED_UI_FB_BLANK
+ bool "<t-base TUI with fb_blank"
+ depends on TRUSTONIC_TRUSTED_UI
+ default n
+ ---help---
+ Blank the framebuffer before starting a TUI session
diff --git a/drivers/misc/mediatek/gud/Makefile b/drivers/misc/mediatek/gud/Makefile
index 5975a0a82..df7d19c0f 100755..100644
--- a/drivers/misc/mediatek/gud/Makefile
+++ b/drivers/misc/mediatek/gud/Makefile
@@ -1,2 +1,13 @@
-
-obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += $(subst ",,$(CONFIG_MTK_PLATFORM))/
+ifeq ($(CONFIG_TRUSTONIC_TEE_SUPPORT),y)
+ ifeq (,$(filter $(CONFIG_MTK_PLATFORM), "mt6582" "mt6592"))
+ # armv8
+ ifeq ($(CONFIG_TRUSTONIC_TEE_VERSION), "")
+ obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += 302a/
+ else
+ obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += $(subst ",,$(CONFIG_TRUSTONIC_TEE_VERSION))/
+ endif
+ else
+ # armv7
+ obj-$(CONFIG_TRUSTONIC_TEE_SUPPORT) += $(subst ",,$(CONFIG_MTK_PLATFORM))/
+ endif
+endif
diff --git a/drivers/misc/mediatek/gud/Makefile.include b/drivers/misc/mediatek/gud/Makefile.include
new file mode 100644
index 000000000..aa0a4fd1a
--- /dev/null
+++ b/drivers/misc/mediatek/gud/Makefile.include
@@ -0,0 +1,17 @@
+ifeq ($(CONFIG_TRUSTONIC_TEE_SUPPORT),y)
+ ifeq (,$(filter $(CONFIG_MTK_PLATFORM), "mt6582" "mt6592"))
+ # armv8
+ ifeq ($(CONFIG_TRUSTONIC_TEE_VERSION), "")
+ # use default version
+ ccflags-y += -I$(srctree)/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/include \
+ -I$(srctree)/drivers/misc/mediatek/gud/302a/gud/MobiCoreKernelApi/public
+ else
+ ccflags-y += -I$(srctree)/drivers/misc/mediatek/gud/$(CONFIG_TRUSTONIC_TEE_VERSION)/gud/MobiCoreKernelApi/include \
+ -I$(srctree)/drivers/misc/mediatek/gud/$(CONFIG_TRUSTONIC_TEE_VERSION)/gud/MobiCoreKernelApi/public
+ endif
+ else
+ # armv7
+ ccflags-y += -I$(srctree)/drivers/misc/mediatek/gud/$(MTK_PLATFORM)/gud/MobiCoreKernelApi/include \
+ -I$(srctree)/drivers/misc/mediatek/gud/$(MTK_PLATFORM)/gud/MobiCoreKernelApi/public
+ endif
+endif
diff --git a/drivers/misc/mediatek/m4u/2.0/Makefile b/drivers/misc/mediatek/m4u/2.0/Makefile
new file mode 100644
index 000000000..906d9f2fa
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/Makefile
@@ -0,0 +1,17 @@
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/mmp/
+include $(srctree)/drivers/misc/mediatek/gud/Makefile.include
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat/$(MTK_PLATFORM)/include/trustzone/m4u
+
+ifeq ($(CONFIG_ARCH_MT6735),y)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6735/
+endif
+ifeq ($(CONFIG_ARCH_MT6735M),y)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6735m/
+endif
+ifeq ($(CONFIG_ARCH_MT6753),y)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6753/
+endif
+
+obj-y += m4u.o m4u_mva.o m4u_pgtable.o m4u_debug.o
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u.c b/drivers/misc/mediatek/m4u/2.0/m4u.c
new file mode 100644
index 000000000..f1920f285
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u.c
@@ -0,0 +1,2670 @@
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <asm/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+/* #include <linux/earlysuspend.h> */
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <mach/sync_write.h>
+#include <mach/mt_clkmgr.h>
+#include <mach/irqs.h>
+#include <asm/cacheflush.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/dma-direction.h>
+#include <asm/page.h>
+#include <linux/proc_fs.h>
+
+#include "m4u_priv.h"
+#include "m4u.h"
+#include "m4u_hw.h"
+
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+
+#include "mobicore_driver_api.h"
+#include "tz_m4u.h"
+#ifdef __M4U_SECURE_SYSTRACE_ENABLE__
+#include <linux/sectrace.h>
+#endif
+int m4u_tee_en = 0;
+
+#endif
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#endif
+
+static m4u_buf_info_t gMvaNode_unknown = {
+ .va = 0,
+ .mva = 0,
+ .size = 0,
+ .port = M4U_PORT_UNKNOWN,
+};
+
+
+
+
+/* -------------------------------------Global variables------------------------------------------------// */
+#ifdef M4U_PROFILE
+MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
+#endif
+
+#define M4U_DEV_NAME "m4u"
+struct m4u_device *gM4uDev;
+
+static int m4u_buf_show(void *priv, unsigned int mva_start, unsigned int mva_end, void *data)
+{
+ m4u_buf_info_t *pMvaInfo = priv;
+
+ M4U_PRINT_LOG_OR_SEQ(data, "0x%-8x, 0x%-8x, 0x%lx, 0x%-8x, 0x%x, %s, 0x%x, 0x%x, 0x%x\n",
+ pMvaInfo->mva, pMvaInfo->mva+pMvaInfo->size-1, pMvaInfo->va,
+ pMvaInfo->size, pMvaInfo->prot, m4u_get_port_name(pMvaInfo->port),
+ pMvaInfo->flags, mva_start, mva_end);
+
+ return 0;
+}
+
+
+int m4u_dump_buf_info(struct seq_file *seq)
+{
+
+ M4U_PRINT_LOG_OR_SEQ(seq, "\ndump mva allocated info ========>\n");
+ M4U_PRINT_LOG_OR_SEQ(seq,
+ "mva_start mva_end va size prot module flags debug1 debug2\n");
+
+ mva_foreach_priv((void *) m4u_buf_show, seq);
+
+ M4U_PRINT_LOG_OR_SEQ(seq, " dump mva allocated info done ========>\n");
+ return 0;
+}
+
+#ifdef M4U_PROFILE
+static void m4u_profile_init(void)
+{
+ MMP_Event M4U_Event;
+
+ MMProfileEnable(1);
+ M4U_Event = MMProfileRegisterEvent(MMP_RootEvent, "M4U");
+ /* register events */
+ M4U_MMP_Events[M4U_MMP_ALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "Alloc MVA");
+ M4U_MMP_Events[M4U_MMP_DEALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "DeAlloc MVA");
+ M4U_MMP_Events[M4U_MMP_CONFIG_PORT] = MMProfileRegisterEvent(M4U_Event, "Config Port");
+ M4U_MMP_Events[M4U_MMP_M4U_ERROR] = MMProfileRegisterEvent(M4U_Event, "M4U ERROR");
+ M4U_MMP_Events[M4U_MMP_CACHE_SYNC] = MMProfileRegisterEvent(M4U_Event, "M4U_CACHE_SYNC");
+ M4U_MMP_Events[M4U_MMP_TOGGLE_CG] = MMProfileRegisterEvent(M4U_Event, "M4U_Toggle_CG");
+
+ /* enable events by default */
+ MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], 1);
+ MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], 1);
+ MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], 1);
+ MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_M4U_ERROR], 1);
+ MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], 1);
+ /* MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], 0); */
+ MMProfileStart(1);
+}
+#endif
+
+/* get ref count on all pages in sgtable */
+int m4u_get_sgtable_pages(struct sg_table *table)
+{
+ return 0;
+}
+
+/* put ref count on all pages in sgtable */
+int m4u_put_sgtable_pages(struct sg_table *table)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ if (page) {
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ page_cache_release(page);
+ }
+ }
+ return 0;
+}
+
+static m4u_buf_info_t *m4u_alloc_buf_info(void)
+{
+ m4u_buf_info_t *pList = NULL;
+
+ pList = kzalloc(sizeof(m4u_buf_info_t), GFP_KERNEL);
+ if (pList == NULL) {
+ M4UMSG("m4u_client_add_buf(), pList=0x%p\n", pList);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&(pList->link));
+ return pList;
+}
+
+static int m4u_free_buf_info(m4u_buf_info_t *pList)
+{
+ kfree(pList);
+ return 0;
+}
+
+static int m4u_client_add_buf(m4u_client_t *client, m4u_buf_info_t *pList)
+{
+ mutex_lock(&(client->dataMutex));
+ list_add(&(pList->link), &(client->mvaList));
+ mutex_unlock(&(client->dataMutex));
+
+ return 0;
+}
+
+/*
+static int m4u_client_del_buf(m4u_client_t *client, m4u_buf_info_t *pList)
+{
+ mutex_lock(&(client->dataMutex));
+ list_del(&(pList->link));
+ mutex_unlock(&(client->dataMutex));
+
+ return 0;
+}
+*/
+
+/***********************************************************/
+/** find or delete a buffer from client list
+* @param client -- client to be searched
+* @param mva -- mva to be searched
+* @param del -- should we del this buffer from client?
+*
+* @return buffer_info if found, NULL on fail
+* @remark
+* @see
+* @to-do we need to add multi domain support here.
+* @author K Zhang @date 2013/11/14
+************************************************************/
+static m4u_buf_info_t *m4u_client_find_buf(m4u_client_t *client, unsigned int mva, int del)
+{
+ struct list_head *pListHead;
+ m4u_buf_info_t *pList = NULL;
+ m4u_buf_info_t *ret = NULL;
+
+ if (client == NULL) {
+ M4UERR("m4u_delete_from_garbage_list(), client is NULL!\n");
+ m4u_dump_buf_info(NULL);
+ return NULL;
+ }
+
+ mutex_lock(&(client->dataMutex));
+ list_for_each(pListHead, &(client->mvaList)) {
+ pList = container_of(pListHead, m4u_buf_info_t, link);
+ if (pList->mva == mva)
+ break;
+ }
+ if (pListHead == &(client->mvaList)) {
+ ret = NULL;
+ } else {
+ if (del)
+ list_del(pListHead);
+ ret = pList;
+ }
+
+ mutex_unlock(&(client->dataMutex));
+
+ return ret;
+}
+
+/*
+//dump buf info in client
+static void m4u_client_dump_buf(m4u_client_t *client, const char *pMsg)
+{
+ m4u_buf_info_t *pList;
+ struct list_head *pListHead;
+
+ M4UMSG("print mva list [%s] ================================>\n", pMsg);
+ mutex_lock(&(client->dataMutex));
+ list_for_each(pListHead, &(client->mvaList))
+ {
+ pList = container_of(pListHead, m4u_buf_info_t, link);
+ M4UMSG("port=%s, va=0x%x, size=0x%x, mva=0x%x, prot=%d\n",
+ m4u_get_port_name(pList->port), pList->va, pList->size, pList->mva, pList->prot);
+ }
+ mutex_unlock(&(client->dataMutex));
+
+ M4UMSG("print mva list done ==========================>\n");
+}
+*/
+
+m4u_client_t *m4u_create_client(void)
+{
+ m4u_client_t *client;
+
+ client = kmalloc(sizeof(m4u_client_t), GFP_ATOMIC);
+ if (!client)
+ return NULL;
+
+ mutex_init(&(client->dataMutex));
+ mutex_lock(&(client->dataMutex));
+ client->open_pid = current->pid;
+ client->open_tgid = current->tgid;
+ INIT_LIST_HEAD(&(client->mvaList));
+ mutex_unlock(&(client->dataMutex));
+
+ return client;
+}
+
+int m4u_destroy_client(m4u_client_t *client)
+{
+ m4u_buf_info_t *pMvaInfo;
+ unsigned int mva, size;
+ M4U_PORT_ID port;
+
+ while (1) {
+ mutex_lock(&(client->dataMutex));
+ if (list_empty(&client->mvaList)) {
+ mutex_unlock(&(client->dataMutex));
+ break;
+ }
+ pMvaInfo = container_of(client->mvaList.next, m4u_buf_info_t, link);
+ M4UMSG("warnning: clean garbage at m4u close: module=%s,va=0x%lx,mva=0x%x,size=%d\n",
+ m4u_get_port_name(pMvaInfo->port), pMvaInfo->va, pMvaInfo->mva,
+ pMvaInfo->size);
+
+ port = pMvaInfo->port;
+ mva = pMvaInfo->mva;
+ size = pMvaInfo->size;
+
+ mutex_unlock(&(client->dataMutex));
+
+ m4u_reclaim_notify(port, mva, size);
+
+ /* m4u_dealloc_mva will lock client->dataMutex again */
+ m4u_dealloc_mva(client, port, mva);
+ }
+
+ kfree(client);
+
+ return 0;
+}
+
+static int m4u_dump_mmaps(unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ M4ULOG_MID("addr=0x%lx, name=%s, pid=0x%x,", addr, current->comm, current->pid);
+
+ vma = find_vma(current->mm, addr);
+
+ if (vma && (addr >= vma->vm_start)) {
+ M4ULOG_MID("find vma: 0x%16lx-0x%16lx, flags=0x%lx\n",
+ (vma->vm_start), (vma->vm_end), vma->vm_flags);
+ return 0;
+ }
+
+ M4UMSG("cannot find vma for addr 0x%lx\n", addr);
+ return -1;
+}
+
+/* to-do: need modification to support 4G DRAM */
+static phys_addr_t m4u_user_v2p(unsigned long va)
+{
+ unsigned long pageOffset = (va & (PAGE_SIZE - 1));
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ phys_addr_t pa;
+
+ if (NULL == current) {
+ M4UMSG("warning: m4u_user_v2p, current is NULL!\n");
+ return 0;
+ }
+ if (NULL == current->mm) {
+ M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s\n",
+ current->tgid, current->comm);
+ return 0;
+ }
+
+ pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
+ if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+ M4UMSG("m4u_user_v2p(), va=0x%lx, pgd invalid!\n", va);
+ return 0;
+ }
+
+ pud = pud_offset(pgd, va);
+ if (pud_none(*pud) || pud_bad(*pud)) {
+ M4UMSG("m4u_user_v2p(), va=0x%lx, pud invalid!\n", va);
+ return 0;
+ }
+
+ pmd = pmd_offset(pud, va);
+ if (pmd_none(*pmd) || pmd_bad(*pmd)) {
+ M4UMSG("m4u_user_v2p(), va=0x%lx, pmd invalid!\n", va);
+ return 0;
+ }
+
+ pte = pte_offset_map(pmd, va);
+ if (pte_present(*pte)) {
+ /* pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; */
+ pa = (pte_val(*pte) & (PHYS_MASK) & (~((phys_addr_t) 0xfff))) | pageOffset;
+ pte_unmap(pte);
+ return pa;
+ }
+
+ pte_unmap(pte);
+
+ M4UMSG("m4u_user_v2p(), va=0x%lx, pte invalid!\n", va);
+ return 0;
+}
+
+
+static int m4u_fill_sgtable_user(struct vm_area_struct *vma, unsigned long va, int page_num,
+ struct scatterlist **pSg, int has_page)
+{
+ unsigned long va_align;
+ phys_addr_t pa = 0;
+ int i, ret;
+ struct scatterlist *sg = *pSg;
+ struct page *pages;
+
+ va_align = round_down(va, PAGE_SIZE);
+
+ for (i = 0; i < page_num; i++) {
+ int fault_cnt;
+ unsigned long va_tmp = va_align+i*PAGE_SIZE;
+
+ pa = 0;
+
+ for (fault_cnt = 0; fault_cnt < 3000; fault_cnt++) {
+ if (has_page) {
+ ret = get_user_pages(current, current->mm, va_tmp, 1,
+ (vma->vm_flags & VM_WRITE), 0, &pages, NULL);
+
+ if (ret == 1)
+ pa = (page_to_pfn(pages) << PAGE_SHIFT) | (va_tmp & ~PAGE_MASK);
+ } else {
+ pa = m4u_user_v2p(va_tmp);
+ if (!pa) {
+ handle_mm_fault(current->mm, vma, va_tmp,
+ (vma->vm_flags&VM_WRITE) ? FAULT_FLAG_WRITE : 0);
+ }
+ }
+
+ if (pa) {
+ /* Add one line comment for avoid kernel coding style, WARNING:BRACES: */
+ break;
+ }
+ cond_resched();
+ }
+
+ if (!pa || !sg) {
+ M4UMSG("%s: fail va=0x%lx,page_num=0x%x,fail_va=0x%lx,pa=0x%lx,sg=0x%p,i=%d\n",
+ __func__, va, page_num, va_tmp, (unsigned long)pa, sg, i);
+
+ show_pte(current->mm, va_tmp);
+ m4u_dump_mmaps(va);
+ m4u_dump_mmaps(va_tmp);
+ return -1;
+ }
+
+ if (fault_cnt > 2) {
+ M4UINFO("warning: handle_mm_fault for %d times\n", fault_cnt);
+ show_pte(current->mm, va_tmp);
+ m4u_dump_mmaps(va_tmp);
+ }
+ /* debug check... */
+ if ((pa & (PAGE_SIZE - 1)) != 0) {
+ M4ULOG_MID("pa error, pa: 0x%lx, va: 0x%lx, align: 0x%lx\n",
+ (unsigned long)pa, va_tmp, va_align);
+ }
+
+ if (has_page) {
+ struct page *page;
+
+ page = phys_to_page(pa);
+ /* M4UMSG("page=0x%x, pfn=%d\n", page, __phys_to_pfn(pa)); */
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ #ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+ #endif
+ } else {
+ sg_dma_address(sg) = pa;
+ sg_dma_len(sg) = PAGE_SIZE;
+ }
+ sg = sg_next(sg);
+ }
+ *pSg = sg;
+ return 0;
+}
+
+static int m4u_create_sgtable_user(unsigned long va_align, struct sg_table *table)
+{
+ int ret = 0;
+ struct vm_area_struct *vma;
+ struct scatterlist *sg = table->sgl;
+ unsigned int left_page_num = table->nents;
+ unsigned long va = va_align;
+
+ down_read(&current->mm->mmap_sem);
+
+ while (left_page_num) {
+ unsigned int vma_page_num;
+
+ vma = find_vma(current->mm, va);
+ if (vma == NULL || vma->vm_start > va) {
+ M4UMSG("cannot find vma: va=0x%lx, vma=0x%p\n", va, vma);
+ m4u_dump_mmaps(va);
+ ret = -1;
+ goto out;
+ } else {
+ /* M4ULOG_MID("%s va: 0x%lx, vma->vm_start=0x%lx, vma->vm_end=0x%lx\n",
+ __func__, va, vma->vm_start, vma->vm_end); */
+ }
+
+ vma_page_num = (vma->vm_end - va) / PAGE_SIZE;
+ vma_page_num = min(vma_page_num, left_page_num);
+
+ if ((vma->vm_flags) & VM_PFNMAP) {
+ /* ion va or ioremap vma has this flag */
+ /* VM_PFNMAP: Page-ranges managed without "struct page", just pure PFN */
+ ret = m4u_fill_sgtable_user(vma, va, vma_page_num, &sg, 0);
+ M4ULOG_MID("alloc_mva VM_PFNMAP va=0x%lx, page_num=0x%x\n", va,
+ vma_page_num);
+ } else {
+ /* Add one line comment for avoid kernel coding style, WARNING:BRACES: */
+ ret = m4u_fill_sgtable_user(vma, va, vma_page_num, &sg, 1);
+ }
+ if (ret) {
+ /* Add one line comment for avoid kernel coding style, WARNING:BRACES: */
+ goto out;
+ }
+
+ left_page_num -= vma_page_num;
+ va += vma_page_num * PAGE_SIZE;
+ }
+
+out:
+ up_read(&current->mm->mmap_sem);
+ return ret;
+}
+
+/* make a sgtable for virtual buffer */
+struct sg_table *m4u_create_sgtable(unsigned long va, unsigned int size)
+{
+ struct sg_table *table;
+ int ret, i, page_num;
+ unsigned long va_align;
+ phys_addr_t pa;
+ struct scatterlist *sg;
+ struct page *page;
+
+ page_num = M4U_GET_PAGE_NUM(va, size);
+ va_align = round_down(va, PAGE_SIZE);
+
+ table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ M4UMSG("%s table kmalloc fail: va=0x%lx, size=0x%x, page_num=%d\n",
+ __func__, va, size, page_num);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = sg_alloc_table(table, page_num, GFP_KERNEL);
+ if (ret) {
+ kfree(table);
+ M4UMSG("%s alloc_sgtable fail: va=0x%lx, size=0x%x, page_num=%d\n",
+ __func__, va, size, page_num);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ M4ULOG_LOW("%s va=0x%lx, PAGE_OFFSET=0x%lx, VMALLOC_START=0x%lx, VMALLOC_END=0x%lx\n",
+ __func__, va, PAGE_OFFSET, VMALLOC_START, VMALLOC_END);
+
+ if (va < PAGE_OFFSET) { /* from user space */
+ if (va >= VMALLOC_START && va <= VMALLOC_END) { /* vmalloc */
+ M4ULOG_MID(" from user space vmalloc, va = 0x%lx", va);
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ page = vmalloc_to_page((void *)(va_align + i * PAGE_SIZE));
+ if (!page) {
+ M4UMSG("vmalloc_to_page fail, va=0x%lx\n",
+ va_align + i * PAGE_SIZE);
+ goto err;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+ } else {
+ ret = m4u_create_sgtable_user(va_align, table);
+ if (ret) {
+ M4UMSG("%s error va=0x%lx, size=%d\n", __func__, va, size);
+ goto err;
+ }
+ }
+ } else { /* from kernel space */
+ if (va >= VMALLOC_START && va <= VMALLOC_END) { /* vmalloc */
+ M4ULOG_MID(" from kernel space vmalloc, va = 0x%lx", va);
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ page = vmalloc_to_page((void *)(va_align + i * PAGE_SIZE));
+ if (!page) {
+ M4UMSG("vmalloc_to_page fail, va=0x%lx\n",
+ va_align + i * PAGE_SIZE);
+ goto err;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+ } else { /* kmalloc to-do: use one entry sgtable. */
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ pa = virt_to_phys((void *)(va_align + i * PAGE_SIZE));
+ page = phys_to_page(pa);
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ }
+ }
+ }
+
+ return table;
+
+err:
+ sg_free_table(table);
+ kfree(table);
+ return ERR_PTR(-EFAULT);
+}
+
+int m4u_destroy_sgtable(struct sg_table *table)
+{
+ if (!IS_ERR_OR_NULL(table)) {
+ sg_free_table(table);
+ kfree(table);
+ }
+ return 0;
+}
+
+/* #define __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__ */
+
+int m4u_alloc_mva(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, struct sg_table *sg_table,
+ unsigned int size, unsigned int prot, unsigned int flags, unsigned int *pMva)
+{
+ int ret;
+ m4u_buf_info_t *pMvaInfo;
+ unsigned int mva = 0, mva_align, size_align;
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagStart, va, size);
+
+
+ if (va && sg_table) {
+ M4UMSG("%s, va or sg_table are both valid: va=0x%lx, sg=0x%p\n", __func__,
+ va, sg_table);
+ }
+ if (va) {
+ sg_table = m4u_create_sgtable(va, size);
+ if (IS_ERR_OR_NULL(sg_table)) {
+ M4UMSG("%s, cannot create sg: larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x\n"
+ , __func__, m4u_port_2_larb_id(port), m4u_get_port_name(port),
+ va, sg_table, size, prot, flags);
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ /* here we get correct sg_table for this buffer */
+
+ pMvaInfo = m4u_alloc_buf_info();
+ if (!pMvaInfo) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ pMvaInfo->va = va;
+ pMvaInfo->port = port;
+ pMvaInfo->size = size;
+ pMvaInfo->prot = prot;
+ pMvaInfo->flags = flags;
+ pMvaInfo->sg_table = sg_table;
+
+ if (flags & M4U_FLAGS_FIX_MVA)
+ mva = m4u_do_mva_alloc_fix(*pMva, size, pMvaInfo);
+ else
+ mva = m4u_do_mva_alloc(va, size, pMvaInfo);
+
+ if (mva == 0) {
+ m4u_aee_print("alloc mva fail: larb=%d,module=%s,size=%d\n",
+ m4u_port_2_larb_id(port), m4u_get_port_name(port), size);
+ m4u_dump_buf_info(NULL);
+ ret = -EINVAL;
+ goto err1;
+ } else
+ M4ULOG_LOW("%s,mva = 0x%x\n", __func__, mva);
+
+ m4u_get_sgtable_pages(sg_table);
+
+ mva_align = round_down(mva, PAGE_SIZE);
+ size_align = PAGE_ALIGN(mva + size - mva_align);
+
+ ret = m4u_map_sgtable(m4u_get_domain_by_port(port), mva_align, sg_table,
+ size_align, pMvaInfo->prot);
+ if (ret < 0) {
+ M4UMSG("error to map sgtable\n");
+ goto err2;
+ }
+
+ pMvaInfo->mva = mva;
+ pMvaInfo->mva_align = mva_align;
+ pMvaInfo->size_align = size_align;
+ *pMva = mva;
+
+ if (flags & M4U_FLAGS_SEQ_ACCESS)
+ pMvaInfo->seq_id = m4u_insert_seq_range(port, mva, mva + size - 1);
+
+ m4u_client_add_buf(client, pMvaInfo);
+
+ M4ULOG_MID("%s: pMvaInfo=0x%p, larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x,mva=0x%x\n",
+ __func__, pMvaInfo, m4u_port_2_larb_id(port), m4u_get_port_name(port), va, sg_table,
+ size, prot, flags, mva);
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagEnd, port, mva);
+
+#ifdef __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
+ /* map this mva to kernel va just for debug */
+ {
+ unsigned long kernel_va;
+ unsigned int kernel_size;
+ int ret;
+
+ ret = m4u_mva_map_kernel(mva, size, &kernel_va, &kernel_size);
+ if (ret)
+ M4UMSG("error to map kernel va: mva=0x%x, size=%d\n", mva, size);
+ else {
+ pMvaInfo->mapped_kernel_va_for_debug = kernel_va;
+ M4ULOG_MID("[kernel_va_debug] map va: mva=0x%x, kernel_va=0x%lx, size=0x%x\n",
+ mva, kernel_va, size);
+ }
+ }
+#endif
+
+ return 0;
+
+err2:
+ m4u_do_mva_free(mva, size);
+
+err1:
+ m4u_free_buf_info(pMvaInfo);
+
+err:
+ if (va)
+ m4u_destroy_sgtable(sg_table);
+
+ *pMva = 0;
+
+ M4UMSG("error: larb=%d,module=%s,va=0x%lx,size=%d,prot=0x%x,flags=0x%x, mva=0x%x\n",
+ m4u_port_2_larb_id(port), m4u_get_port_name(port), va, size, prot, flags, mva);
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagEnd, port, 0);
+ return ret;
+}
+
+/* interface for ion */
+static m4u_client_t *ion_m4u_client;
+
+int m4u_alloc_mva_sg(int eModuleID,
+ struct sg_table *sg_table,
+ const unsigned int BufSize,
+ int security, int cache_coherent, unsigned int *pRetMVABuf)
+{
+ int prot;
+
+ if (!ion_m4u_client) {
+ ion_m4u_client = m4u_create_client();
+ if (IS_ERR_OR_NULL(ion_m4u_client)) {
+ ion_m4u_client = NULL;
+ return -1;
+ }
+ }
+
+ prot = M4U_PROT_READ | M4U_PROT_WRITE
+ | (cache_coherent ? (M4U_PROT_SHARE | M4U_PROT_CACHE) : 0)
+ | (security ? M4U_PROT_SEC : 0);
+
+ return m4u_alloc_mva(ion_m4u_client, eModuleID, 0, sg_table, BufSize, prot, 0, pRetMVABuf);
+}
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+static int m4u_unmap_nonsec_buffer(unsigned int mva, unsigned int size);
+
+int m4u_register_mva_share(int eModuleID, unsigned int mva)
+{
+ m4u_buf_info_t *pMvaInfo;
+
+ pMvaInfo = mva_get_priv(mva);
+ if (!pMvaInfo) {
+ M4UMSG("%s cannot find mva: module=%s, mva=0x%x\n", __func__,
+ m4u_get_port_name(eModuleID), mva);
+ return -1;
+ }
+ pMvaInfo->flags |= M4U_FLAGS_SEC_SHAREABLE;
+
+ return 0;
+}
+#endif
+
+
+int m4u_dealloc_mva_sg(int eModuleID, struct sg_table *sg_table,
+ const unsigned int BufSize, const unsigned int MVA)
+{
+ if (!ion_m4u_client) {
+ m4u_aee_print("ion_m4u_client==NULL !! oops oops~~~~\n");
+ return -1;
+ }
+
+ return m4u_dealloc_mva(ion_m4u_client, eModuleID, MVA);
+}
+
+/* should not hold client->dataMutex here. */
+int m4u_dealloc_mva(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva)
+{
+ m4u_buf_info_t *pMvaInfo;
+ int ret, is_err = 0;
+ unsigned int size;
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagStart, port, mva);
+
+ pMvaInfo = m4u_client_find_buf(client, mva, 1);
+ if (unlikely(!pMvaInfo)) {
+ M4UMSG("error: m4u_dealloc_mva no mva found in client! module=%s, mva=0x%x\n",
+ m4u_get_port_name(port), mva);
+ m4u_dump_buf_info(NULL);
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagStart, 0x5a5a5a5a, mva);
+ return -EINVAL;
+ }
+
+ pMvaInfo->flags |= M4U_FLAGS_MVA_IN_FREE;
+
+ M4ULOG_MID("m4u_dealloc_mva: larb=%d,module=%s,mva=0x%x, size=%d\n",
+ m4u_port_2_larb_id(port), m4u_get_port_name(port), mva, pMvaInfo->size);
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ if (pMvaInfo->flags & M4U_FLAGS_SEC_SHAREABLE)
+ m4u_unmap_nonsec_buffer(mva, pMvaInfo->size);
+#endif
+
+ ret = m4u_unmap(m4u_get_domain_by_port(port), pMvaInfo->mva_align, pMvaInfo->size_align);
+ if (ret) {
+ is_err = 1;
+ M4UMSG("m4u_unmap fail\n");
+ }
+
+ if (0 != pMvaInfo->va) {
+ /* non ion buffer*/
+ if (pMvaInfo->va < PAGE_OFFSET) { /* from user space */
+ if (!(pMvaInfo->va >= VMALLOC_START && pMvaInfo->va <= VMALLOC_END)) { /* non vmalloc */
+ m4u_put_sgtable_pages(pMvaInfo->sg_table);
+ }
+ }
+ }
+
+ ret = m4u_do_mva_free(mva, pMvaInfo->size);
+ if (ret) {
+ is_err = 1;
+ M4UMSG("do_mva_free fail\n");
+ }
+
+ if (pMvaInfo->va) { /* buffer is allocated by va */
+ m4u_destroy_sgtable(pMvaInfo->sg_table);
+ }
+
+ if (pMvaInfo->flags & M4U_FLAGS_SEQ_ACCESS) {
+ if (pMvaInfo->seq_id > 0)
+ m4u_invalid_seq_range_by_id(port, pMvaInfo->seq_id);
+ }
+
+ if (is_err) {
+ m4u_aee_print("%s fail: port=%s, mva=0x%x, size=0x%x, va=0x%lx\n", __func__,
+ m4u_get_port_name(port), mva, pMvaInfo->size, pMvaInfo->va);
+ ret = -EINVAL;
+ } else
+ ret = 0;
+
+ size = pMvaInfo->size;
+
+#ifdef __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
+ /* unmap kernel va for debug */
+ {
+ if (pMvaInfo->mapped_kernel_va_for_debug) {
+ M4ULOG_MID("[kernel_va_debug] unmap va: mva=0x%x, kernel_va=0x%lx, size=0x%x\n",
+ pMvaInfo->mva, pMvaInfo->mapped_kernel_va_for_debug, pMvaInfo->size);
+ m4u_mva_unmap_kernel(pMvaInfo->mva, pMvaInfo->size,
+ pMvaInfo->mapped_kernel_va_for_debug);
+ }
+ }
+#endif
+
+ m4u_free_buf_info(pMvaInfo);
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagEnd, size, mva);
+
+ return ret;
+}
+
+int m4u_dma_cache_flush_all(void)
+{
+ smp_inner_dcache_flush_all();
+ outer_flush_all();
+ return 0;
+}
+
+static struct vm_struct *cache_map_vm_struct;
+static int m4u_cache_sync_init(void)
+{
+ cache_map_vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ if (!cache_map_vm_struct)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void *m4u_cache_map_page_va(struct page *page)
+{
+ int ret;
+ struct page **ppPage = &page;
+
+ ret = map_vm_area(cache_map_vm_struct, PAGE_KERNEL, ppPage);
+ if (ret) {
+ M4UMSG("error to map page\n");
+ return NULL;
+ }
+ return cache_map_vm_struct->addr;
+}
+
+static void m4u_cache_unmap_page_va(unsigned int va)
+{
+ unmap_kernel_range((unsigned long)cache_map_vm_struct->addr, PAGE_SIZE);
+}
+
+
+static int __m4u_cache_sync_kernel(const void *start, size_t size, M4U_CACHE_SYNC_ENUM sync_type)
+{
+ if (sync_type == M4U_CACHE_CLEAN_BY_RANGE)
+ dmac_map_area((void *)start, size, DMA_TO_DEVICE);
+ else if (sync_type == M4U_CACHE_INVALID_BY_RANGE)
+ dmac_unmap_area((void *)start, size, DMA_FROM_DEVICE);
+ else if (sync_type == M4U_CACHE_FLUSH_BY_RANGE)
+ dmac_flush_range((void *)start, (void *)(start + size));
+
+ return 0;
+}
+
+static struct page *m4u_cache_get_page(unsigned long va)
+{
+ unsigned long start;
+ phys_addr_t pa;
+ struct page *page;
+
+ start = va & (~M4U_PAGE_MASK);
+ pa = m4u_user_v2p(start);
+ if ((pa == 0)) {
+ M4UMSG("error m4u_get_phys user_v2p return 0 on va=0x%lx\n", start);
+ /* dump_page(page); */
+ m4u_dump_mmaps(start);
+ show_pte(current->mm, va);
+ return NULL;
+ }
+ page = phys_to_page(pa);
+
+ return page;
+}
+
+/* lock to protect cache_map_vm_struct */
+static DEFINE_MUTEX(gM4u_cache_sync_user_lock);
+
+static int __m4u_cache_sync_user(unsigned long start, size_t size, M4U_CACHE_SYNC_ENUM sync_type)
+{
+ unsigned long map_size, map_start, map_end;
+ unsigned long end = start + size;
+ struct page *page;
+ unsigned long map_va, map_va_align;
+ int ret = 0;
+
+ mutex_lock(&gM4u_cache_sync_user_lock);
+
+ if (!cache_map_vm_struct) {
+ M4UMSG(" error: cache_map_vm_struct is NULL, retry\n");
+ m4u_cache_sync_init();
+ }
+ if (!cache_map_vm_struct) {
+ M4UMSG("error: cache_map_vm_struct is NULL, no vmalloc area\n");
+ ret = -1;
+ goto out;
+ }
+
+ map_start = start;
+ while (map_start < end) {
+ map_end = min(((map_start & (~M4U_PAGE_MASK)) + PAGE_SIZE), end);
+ map_size = map_end - map_start;
+
+ page = m4u_cache_get_page(map_start);
+ if (!page) {
+ ret = -1;
+ goto out;
+ }
+
+ map_va = (unsigned long)m4u_cache_map_page_va(page);
+ if (!map_va) {
+ ret = -1;
+ goto out;
+ }
+
+ map_va_align = map_va | (map_start & (PAGE_SIZE - 1));
+
+ __m4u_cache_sync_kernel((void *)map_va_align, map_size, sync_type);
+
+ m4u_cache_unmap_page_va(map_va);
+ map_start = map_end;
+ }
+
+out:
+ mutex_unlock(&gM4u_cache_sync_user_lock);
+
+ return ret;
+}
+
+int m4u_cache_sync_by_range(unsigned long va, unsigned int size,
+ M4U_CACHE_SYNC_ENUM sync_type, struct sg_table *table)
+{
+ int ret = 0;
+
+ if (va < PAGE_OFFSET) { /* from user space */
+ ret = __m4u_cache_sync_user(va, size, sync_type);
+ } else {
+ ret = __m4u_cache_sync_kernel((void *)va, size, sync_type);
+ }
+
+#ifdef CONFIG_OUTER_CACHE
+ {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned int len = sg_dma_len(sg);
+ phys_addr_t phys_addr = get_sg_phys(sg);
+
+ if (sync_type == M4U_CACHE_CLEAN_BY_RANGE)
+ outer_clean_range(phys_addr, phys_addr + len);
+ else if (sync_type == M4U_CACHE_INVALID_BY_RANGE)
+ outer_inv_range(phys_addr, phys_addr + len);
+ else if (sync_type == M4U_CACHE_FLUSH_BY_RANGE)
+ outer_flush_range(phys_addr, phys_addr + len);
+ }
+ }
+#endif
+
+ return ret;
+}
+
+/**
+ notes: only mva allocated by m4u_alloc_mva can use this function.
+ if buffer is allocated by ion, please use ion_cache_sync
+**/
+int m4u_cache_sync(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, unsigned int size, unsigned int mva,
+ M4U_CACHE_SYNC_ENUM sync_type)
+{
+ int ret = 0;
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagStart, va, mva);
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagPulse, size, ((sync_type)<<24) | port);
+
+ M4ULOG_MID("cache_sync port=%s, va=0x%lx, size=0x%x, mva=0x%x, type=%d\n",
+ m4u_get_port_name(port), va, size, mva, sync_type);
+
+ if (sync_type < M4U_CACHE_CLEAN_ALL) {
+ m4u_buf_info_t *pMvaInfo = NULL;
+
+ if (client)
+ pMvaInfo = m4u_client_find_buf(client, mva, 0);
+
+ /* some user may sync mva from other client (eg. ovl may not know
+ * who allocated this buffer, but he need to sync cache). */
+ /* we make a workaround here by query mva from mva manager */
+ if (!pMvaInfo)
+ pMvaInfo = mva_get_priv(mva);
+
+ if (!pMvaInfo) {
+ M4UMSG("cache sync fail, cannot find buf: mva=0x%x, client=0x%p\n", mva,
+ client);
+ m4u_dump_buf_info(NULL);
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, 0, 0);
+ return -1;
+ }
+
+ if ((pMvaInfo->size != size) || (pMvaInfo->va != va)) {
+ M4UMSG("cache_sync fail: expect mva=0x%x,size=0x%x,va=0x%lx, but mva=0x%x,size=0x%x,va=0x%lx\n",
+ pMvaInfo->mva, pMvaInfo->size, pMvaInfo->va, mva, size, va);
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd,
+ pMvaInfo->va, pMvaInfo->mva);
+ return -1;
+ }
+
+ if ((va | size) & (L1_CACHE_BYTES - 1)) { /* va size should be cache line align */
+ M4UMSG("warning: cache_sync not align: va=0x%lx,size=0x%x,align=0x%x\n",
+ va, size, L1_CACHE_BYTES);
+ }
+
+ ret = m4u_cache_sync_by_range(va, size, sync_type, pMvaInfo->sg_table);
+ } else {
+ /* All cache operation */
+ if (sync_type == M4U_CACHE_CLEAN_ALL) {
+ smp_inner_dcache_flush_all();
+ outer_clean_all();
+ } else if (sync_type == M4U_CACHE_INVALID_ALL) {
+ M4UMSG("no one can use invalid all!\n");
+ return -1;
+ } else if (sync_type == M4U_CACHE_FLUSH_ALL) {
+ smp_inner_dcache_flush_all();
+ outer_flush_all();
+ }
+ }
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, size, mva);
+ return ret;
+}
+
+void m4u_dma_map_area(void *start, size_t size, M4U_DMA_DIR dir)
+{
+ if (dir == M4U_DMA_FROM_DEVICE)
+ dmac_map_area(start, size, DMA_FROM_DEVICE);
+ else if (dir == M4U_DMA_TO_DEVICE)
+ dmac_map_area(start, size, DMA_TO_DEVICE);
+ else if (dir == M4U_DMA_BIDIRECTIONAL)
+ dmac_map_area(start, size, DMA_BIDIRECTIONAL);
+}
+
+void m4u_dma_unmap_area(void *start, size_t size, M4U_DMA_DIR dir)
+{
+ if (dir == M4U_DMA_FROM_DEVICE)
+ dmac_unmap_area(start, size, DMA_FROM_DEVICE);
+ else if (dir == M4U_DMA_TO_DEVICE)
+ dmac_unmap_area(start, size, DMA_TO_DEVICE);
+ else if (dir == M4U_DMA_BIDIRECTIONAL)
+ dmac_unmap_area(start, size, DMA_BIDIRECTIONAL);
+}
+
+long m4u_dma_op(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, unsigned int size, unsigned int mva,
+ M4U_DMA_TYPE dma_type, M4U_DMA_DIR dma_dir)
+{
+ struct scatterlist *sg;
+ int i, j;
+ struct sg_table *table = NULL;
+ int npages = 0;
+ unsigned long start = -1;
+
+ m4u_buf_info_t *pMvaInfo = NULL;
+
+ if (client)
+ pMvaInfo = m4u_client_find_buf(client, mva, 0);
+
+ /* some user may sync mva from other client
+ (eg. ovl may not know who allocated this buffer,
+ but he need to sync cache).
+ we make a workaround here by query mva from mva manager */
+ if (!pMvaInfo)
+ pMvaInfo = mva_get_priv(mva);
+
+ if (!pMvaInfo) {
+ M4UMSG("m4u dma fail, cannot find buf: mva=0x%x, client=0x%p.\n", mva, client);
+ return -1;
+ }
+
+ if ((pMvaInfo->size != size) || (pMvaInfo->va != va)) {
+ M4UMSG("m4u dma fail: expect mva=0x%x,size=0x%x,va=0x%lx, but mva=0x%x,size=0x%x,va=0x%lx\n",
+ pMvaInfo->mva, pMvaInfo->size, pMvaInfo->va, mva, size, va);
+ return -1;
+ }
+
+ if ((va|size) & (L1_CACHE_BYTES-1)) /* va size should be cache line align */
+ M4UMSG("warning: cache_sync not align: va=0x%lx,size=0x%x,align=0x%x\n",
+ va, size, L1_CACHE_BYTES);
+
+ table = pMvaInfo->sg_table;
+ /* npages = PAGE_ALIGN(size) / PAGE_SIZE; */
+ npages = M4U_GET_PAGE_NUM(va, size);
+
+ mutex_lock(&gM4u_cache_sync_user_lock);
+
+ if (!cache_map_vm_struct) {
+ M4UMSG(" error: cache_map_vm_struct is NULL, retry\n");
+ m4u_cache_sync_init();
+ }
+
+ if (!cache_map_vm_struct) {
+ M4UMSG("error: cache_map_vm_struct is NULL, no vmalloc area\n");
+ mutex_unlock(&gM4u_cache_sync_user_lock);
+ return -ENOMEM;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ struct page *page = sg_page(sg);
+
+ if (!page) {
+ phys_addr_t pa = sg_dma_address(sg);
+
+ if (!pa) {
+ M4UMSG("m4u_dma_op fail, VM_PFNMAP, no page.\n");
+ return -EFAULT;
+ }
+ page = phys_to_page(pa);
+ if (!pfn_valid(page_to_pfn(page))) {
+ M4UMSG("m4u_dma_op fail, VM_PFNMAP, no page, va = 0x%lx, size = 0x%x, npages = 0x%x.\n",
+ va, size, npages);
+ return -EFAULT;
+ }
+ }
+
+ BUG_ON(i >= npages);
+ for (j = 0; j < npages_this_entry; j++) {
+ start = (unsigned long) m4u_cache_map_page_va(page++);
+
+ if (IS_ERR_OR_NULL((void *) start)) {
+ M4UMSG("cannot do cache sync: ret=%lu\n", start);
+ mutex_unlock(&gM4u_cache_sync_user_lock);
+ return -EFAULT;
+ }
+
+ if (dma_type == M4U_DMA_MAP_AREA)
+ m4u_dma_map_area((void *)start, PAGE_SIZE, dma_dir);
+ else if (dma_type == M4U_DMA_UNMAP_AREA)
+ m4u_dma_unmap_area((void *)start, PAGE_SIZE, dma_dir);
+
+ m4u_cache_unmap_page_va(start);
+ }
+ }
+
+ mutex_unlock(&gM4u_cache_sync_user_lock);
+
+ return 0;
+}
+
+int m4u_dump_info(int m4u_index)
+{
+ return 0;
+}
+
+void m4u_get_pgd(m4u_client_t *client, M4U_PORT_ID port, void **pgd_va, void **pgd_pa,
+ unsigned int *size)
+{
+ m4u_domain_t *pDomain;
+
+ pDomain = m4u_get_domain_by_port(port);
+ *pgd_va = pDomain->pgd;
+ *pgd_pa = (void *)(uintptr_t)pDomain->pgd_pa;
+ *size = M4U_PGD_SIZE;
+}
+
+unsigned long m4u_mva_to_pa(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva)
+{
+ unsigned long pa;
+ m4u_domain_t *pDomain;
+
+ pDomain = m4u_get_domain_by_port(port);
+
+ pa = m4u_get_pte(pDomain, mva);
+
+ return pa;
+}
+
+int m4u_query_mva_info(unsigned int mva, unsigned int size, unsigned int *real_mva,
+ unsigned int *real_size)
+{
+ m4u_buf_info_t *pMvaInfo;
+
+ if ((!real_mva) || (!real_size))
+ return -1;
+
+ pMvaInfo = mva_get_priv(mva);
+ if (!pMvaInfo) {
+ M4UMSG("%s cannot find mva: mva=0x%x, size=0x%x\n", __func__, mva, size);
+ *real_mva = 0;
+ *real_size = 0;
+
+ return -2;
+ }
+ *real_mva = pMvaInfo->mva;
+ *real_size = pMvaInfo->size;
+
+ return 0;
+}
+EXPORT_SYMBOL(m4u_query_mva_info);
+
+/***********************************************************/
+/** map mva buffer to kernel va buffer
+* this function should ONLY used for DEBUG
+************************************************************/
+int m4u_mva_map_kernel(unsigned int mva, unsigned int size, unsigned long *map_va,
+ unsigned int *map_size)
+{
+ m4u_buf_info_t *pMvaInfo;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i, j, k, ret = 0;
+ struct page **pages;
+ unsigned int page_num;
+ void *kernel_va;
+ unsigned int kernel_size;
+
+ pMvaInfo = mva_get_priv(mva);
+
+ if (!pMvaInfo || pMvaInfo->size < size) {
+ M4UMSG("%s cannot find mva: mva=0x%x, size=0x%x\n", __func__, mva, size);
+ if (pMvaInfo)
+ M4UMSG("pMvaInfo: mva=0x%x, size=0x%x\n", pMvaInfo->mva, pMvaInfo->size);
+ return -1;
+ }
+
+ table = pMvaInfo->sg_table;
+
+ page_num = M4U_GET_PAGE_NUM(mva, size);
+ pages = vmalloc(sizeof(struct page *) * page_num);
+ if (pages == NULL) {
+ M4UMSG("mva_map_kernel:error to vmalloc for %d\n",
+ (unsigned int)sizeof(struct page *) * page_num);
+ return -1;
+ }
+
+ k = 0;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page_start;
+ int pages_in_this_sg = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ if (0 == sg_dma_address(sg))
+ pages_in_this_sg = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+#endif
+ page_start = sg_page(sg);
+ for (j = 0; j < pages_in_this_sg; j++) {
+ pages[k++] = page_start++;
+ if (k >= page_num)
+ goto get_pages_done;
+ }
+ }
+
+get_pages_done:
+ if (k < page_num) {
+ /* this should not happen, because we have checked the size before. */
+ M4UMSG("mva_map_kernel:only get %d pages: mva=0x%x, size=0x%x, pg_num=%d\n", k, mva,
+ size, page_num);
+ ret = -1;
+ goto error_out;
+ }
+
+ kernel_va = 0;
+ kernel_size = 0;
+ kernel_va = vmap(pages, page_num, VM_MAP, PAGE_KERNEL);
+ if (kernel_va == 0 || (unsigned long)kernel_va & M4U_PAGE_MASK) {
+ M4UMSG("mva_map_kernel:vmap fail: page_num=%d, kernel_va=0x%p\n", page_num,
+ kernel_va);
+ ret = -2;
+ goto error_out;
+ }
+
+ kernel_va += ((unsigned long)mva & (M4U_PAGE_MASK));
+
+ *map_va = (unsigned long)kernel_va;
+ *map_size = size;
+
+error_out:
+ vfree(pages);
+ M4ULOG_LOW("mva_map_kernel:mva=0x%x,size=0x%x,map_va=0x%lx,map_size=0x%x\n",
+ mva, size, *map_va, *map_size);
+
+ return ret;
+}
+EXPORT_SYMBOL(m4u_mva_map_kernel);
+
+int m4u_mva_unmap_kernel(unsigned int mva, unsigned int size, unsigned long map_va)
+{
+ M4ULOG_LOW("mva_unmap_kernel:mva=0x%x,size=0x%x,va=0x%lx\n", mva, size, map_va);
+ vunmap((void *)(map_va & (~M4U_PAGE_MASK)));
+ return 0;
+}
+EXPORT_SYMBOL(m4u_mva_unmap_kernel);
+
+static int MTK_M4U_open(struct inode *inode, struct file *file)
+{
+ m4u_client_t *client;
+
+ client = m4u_create_client();
+ if (IS_ERR_OR_NULL(client)) {
+ M4UMSG("createclientfail\n");
+ return -ENOMEM;
+ }
+
+ file->private_data = client;
+
+ return 0;
+}
+
+static int MTK_M4U_release(struct inode *inode, struct file *file)
+{
+ m4u_client_t *client = file->private_data;
+
+ m4u_destroy_client(client);
+ return 0;
+}
+
+static int MTK_M4U_flush(struct file *filp, fl_owner_t a_id)
+{
+ return 0;
+}
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+
+#define TPLAY_DEV_NAME "tz_m4u"
+
+#define M4U_DRV_UUID {{0x90, 0x73, 0xF0, 0x3A, 0x96, 0x18, 0x38, 0x3B, 0xB1, 0x85, 0x6E, 0xB3, 0xF9, 0x90, 0xBA, 0xBD} }
+static const struct mc_uuid_t m4u_drv_uuid = M4U_DRV_UUID;
+static struct mc_session_handle m4u_dci_session;
+static m4u_msg_t *m4u_dci_msg;
+static DEFINE_MUTEX(m4u_dci_mutex);
+
+#define M4U_TL_UUID {{0x98, 0xfb, 0x95, 0xbc, 0xb4, 0xbf, 0x42, 0xd2, 0x64, 0x73, 0xea, 0xe4, 0x86, 0x90, 0xd7, 0xea} }
+static const struct mc_uuid_t m4u_tl_uuid = M4U_TL_UUID;
+static struct mc_session_handle m4u_tci_session;
+static m4u_msg_t *m4u_tci_msg;
+static DEFINE_MUTEX(m4u_tci_mutex);
+
+static int m4u_open_trustlet(uint32_t deviceId)
+{
+
+ enum mc_result mcRet;
+
+ /* Initialize session handle data */
+ memset(&m4u_tci_session, 0, sizeof(m4u_tci_session));
+
+ mcRet = mc_malloc_wsm(deviceId, 0, sizeof(m4u_msg_t), (uint8_t **) &m4u_tci_msg, 0);
+ if (MC_DRV_OK != mcRet) {
+ M4UMSG("tz_m4u: mc_malloc_wsm tci fail: %d\n", mcRet);
+ return -1;
+ }
+
+ /* Open session the trustlet */
+ m4u_tci_session.device_id = deviceId;
+ mcRet = mc_open_session(&m4u_tci_session,
+ &m4u_tl_uuid,
+ (uint8_t *) m4u_tci_msg,
+ (uint32_t) sizeof(m4u_msg_t));
+ if (MC_DRV_OK != mcRet) {
+ M4UMSG("tz_m4u: mc_open_session returned: %d\n", mcRet);
+ return -1;
+ }
+
+ M4UMSG("tz_m4u: open TCI session success\n");
+
+ return 0;
+}
+
+int m4u_close_trustlet(uint32_t deviceId)
+{
+ enum mc_result mcRet;
+
+ mcRet = mc_free_wsm(deviceId, (uint8_t *) m4u_tci_msg);
+ if (mcRet) {
+ M4UMSG("tz_m4u: free tci struct fail: %d\n", mcRet);
+ return -1;
+ }
+
+ /* Close session */
+ mcRet = mc_close_session(&m4u_tci_session);
+ if (MC_DRV_OK != mcRet) {
+ M4UMSG("tz_m4u: mc_close_session returned: %d\n", mcRet);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int m4u_exec_cmd(struct mc_session_handle *m4u_session, m4u_msg_t *m4u_msg)
+{
+ enum mc_result ret;
+
+ if (NULL == m4u_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ return -1;
+ }
+
+ M4UMSG("Notify %x\n", m4u_msg->cmd);
+ ret = mc_notify(m4u_session);
+ if (MC_DRV_OK != ret) {
+ m4u_aee_print("tz_m4u Notify failed: %d\n", ret);
+ goto exit;
+ }
+
+ ret = mc_wait_notification(m4u_session, MC_INFINITE_TIMEOUT);
+ if (MC_DRV_OK != ret) {
+ m4u_aee_print("Wait for response notification failed: 0x%x\n", ret);
+ goto exit;
+ }
+
+ M4UMSG("get_resp %x\n", m4u_msg->cmd);
+exit:
+ return ret;
+}
+
+static int __m4u_sec_init(void)
+{
+ int ret;
+ void *pgd_va;
+ unsigned long pt_pa_nonsec;
+ unsigned int size;
+
+ mutex_lock(&m4u_tci_mutex);
+ if (NULL == m4u_tci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_get_pgd(NULL, 0, &pgd_va, (void *)&pt_pa_nonsec, &size);
+
+ m4u_tci_msg->cmd = CMD_M4UTL_INIT;
+ m4u_tci_msg->init_param.nonsec_pt_pa = pt_pa_nonsec;
+ m4u_tci_msg->init_param.l2_en = gM4U_L2_enable;
+ m4u_tci_msg->init_param.sec_pt_pa = 0; /* m4u_alloc_sec_pt_for_debug(); */
+ M4UMSG("%s call m4u_exec_cmd CMD_M4UTL_INIT, nonsec_pt_pa: 0x%lx\n", __func__,
+ pt_pa_nonsec);
+ ret = m4u_exec_cmd(&m4u_tci_session, m4u_tci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = m4u_tci_msg->rsp;
+out:
+ mutex_unlock(&m4u_tci_mutex);
+ return ret;
+}
+
+/* ------------------------------------------------------------- */
+#ifdef __M4U_SECURE_SYSTRACE_ENABLE__
+static int dr_map(unsigned long pa, size_t size)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+ if (!m4u_dci_msg) {
+ M4UMSG("error: m4u_dci_msg==null\n");
+ ret = -1;
+ goto out;
+ }
+
+ memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
+
+ m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_MAP;
+ m4u_dci_msg->systrace_param.pa = pa;
+ m4u_dci_msg->systrace_param.size = size;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+static int dr_unmap(unsigned long pa, size_t size)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+ if (!m4u_dci_msg) {
+ M4UMSG("error: m4u_dci_msg==null\n");
+ ret = -1;
+ goto out;
+ }
+
+ memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
+
+ m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_UNMAP;
+ m4u_dci_msg->systrace_param.pa = pa;
+ m4u_dci_msg->systrace_param.size = size;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+static int dr_transact(void)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+ if (!m4u_dci_msg) {
+ M4UMSG("error: m4u_dci_msg==null\n");
+ ret = -1;
+ goto out;
+ }
+
+ memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
+
+ m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_TRANSACT;
+ m4u_dci_msg->systrace_param.pa = 0;
+ m4u_dci_msg->systrace_param.size = 0;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+#endif
+/* ------------------------------------------------------------- */
+
+int m4u_sec_init(void)
+{
+ uint32_t deviceId = MC_DEVICE_ID_DEFAULT;
+ enum mc_result mcRet;
+
+ if (m4u_tee_en) {
+ M4UMSG("warning: m4u secure has been inited, %d\n", m4u_tee_en);
+ return 0;
+ }
+
+ M4UMSG("call m4u_sec_init in nornal m4u driver\n");
+
+ /* Initialize session handle data */
+ memset(&m4u_dci_session, 0, sizeof(m4u_dci_session));
+
+ /* Open MobiCore device */
+ mcRet = mc_open_device(deviceId);
+ if (MC_DRV_OK != mcRet) {
+ M4UMSG("tz_m4u: error mc_open_device returned: %d\n", mcRet);
+ if (mcRet != MC_DRV_ERR_INVALID_OPERATION)
+ return -1;
+ }
+
+ /* Allocating WSM for DCI */
+ mcRet = mc_malloc_wsm(deviceId, 0, sizeof(m4u_msg_t), (uint8_t **) &m4u_dci_msg, 0);
+ if (MC_DRV_OK != mcRet) {
+ M4UMSG("tz_m4u: mc_malloc_wsm returned: %d\n", mcRet);
+ return -1;
+ }
+
+ /* Open session the trustlet */
+ m4u_dci_session.device_id = deviceId;
+ mcRet = mc_open_session(&m4u_dci_session,
+ &m4u_drv_uuid,
+ (uint8_t *) m4u_dci_msg,
+ (uint32_t) sizeof(m4u_msg_t));
+ if (MC_DRV_OK != mcRet) {
+ M4UMSG("tz_m4u: mc_open_session returned: %d\n", mcRet);
+ return -1;
+ }
+
+ M4UMSG("tz_m4u: open DCI session returned: %d\n", mcRet);
+
+ {
+ volatile int i, j;
+
+ for (i = 0; i < 10000000; i++)
+ j++;
+ }
+
+ m4u_open_trustlet(deviceId);
+ __m4u_sec_init();
+#ifdef __M4U_SECURE_SYSTRACE_ENABLE__
+ {
+ union callback_func callback;
+
+ callback.dr.map = dr_map;
+ callback.dr.unmap = dr_unmap;
+ callback.dr.transact = dr_transact;
+ init_sectrace("M4U", if_dci, usage_dr, 64, &callback);
+ }
+#endif
+ m4u_close_trustlet(deviceId);
+
+ m4u_tee_en = 1;
+
+ return 0;
+}
+
+int m4u_config_port_tee(M4U_PORT_STRUCT *pM4uPort) /* native */
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+ if (!m4u_dci_msg) {
+ M4UMSG("error: m4u_dci_msg==null\n");
+ ret = -1;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_CFG_PORT;
+ m4u_dci_msg->port_param.port = pM4uPort->ePortID;
+ m4u_dci_msg->port_param.virt = pM4uPort->Virtuality;
+ m4u_dci_msg->port_param.direction = pM4uPort->Direction;
+ m4u_dci_msg->port_param.distance = pM4uPort->Distance;
+ m4u_dci_msg->port_param.sec = 0;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+int m4u_config_port_array_tee(unsigned char *port_array) /* native */
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+ if (!m4u_dci_msg) {
+ M4UMSG("error: m4u_dci_msg==null\n");
+ ret = -1;
+ goto out;
+ }
+
+ memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
+ memcpy(m4u_dci_msg->port_array_param.m4u_port_array, port_array,
+ sizeof(m4u_dci_msg->port_array_param.m4u_port_array));
+
+ m4u_dci_msg->cmd = CMD_M4U_CFG_PORT_ARRAY;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+static int m4u_unmap_nonsec_buffer(unsigned int mva, unsigned int size)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+
+ if (NULL == m4u_dci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_UNMAP_NONSEC_BUFFER;
+ m4u_dci_msg->buf_param.mva = mva;
+ m4u_dci_msg->buf_param.size = size;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+int m4u_larb_backup_sec(unsigned int larb_idx)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+
+ if (NULL == m4u_dci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_LARB_BACKUP;
+ m4u_dci_msg->larb_param.larb_idx = larb_idx;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+int m4u_larb_restore_sec(unsigned int larb_idx)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+
+ if (NULL == m4u_dci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_LARB_RESTORE;
+ m4u_dci_msg->larb_param.larb_idx = larb_idx;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+static int m4u_reg_backup_sec(void)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+
+ if (NULL == m4u_dci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_REG_BACKUP;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+static int m4u_reg_restore_sec(void)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+
+ if (NULL == m4u_dci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_REG_RESTORE;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+
+/* static void m4u_early_suspend(struct early_suspend *h)
+{
+ M4UMSG("m4u_early_suspend +, %d\n", m4u_tee_en);
+
+ if (m4u_tee_en)
+ m4u_reg_backup_sec();
+ M4UMSG("m4u_early_suspend -\n");
+}
+
+static void m4u_late_resume(struct early_suspend *h)
+{
+ M4UMSG("m4u_late_resume +, %d\n", m4u_tee_en);
+
+ if (m4u_tee_en)
+ m4u_reg_restore_sec();
+
+ M4UMSG("m4u_late_resume -\n");
+}
+
+static struct early_suspend mtk_m4u_early_suspend_driver = {
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 251,
+ .suspend = m4u_early_suspend,
+ .resume = m4u_late_resume,
+}; */
+
+static void m4u_early_suspend(void)
+{
+ M4UMSG("m4u_early_suspend +, %d\n", m4u_tee_en);
+
+ if (m4u_tee_en)
+ m4u_reg_backup_sec();
+ M4UMSG("m4u_early_suspend -\n");
+}
+
+static void m4u_late_resume(void)
+{
+ M4UMSG("m4u_late_resume +, %d\n", m4u_tee_en);
+
+ if (m4u_tee_en)
+ m4u_reg_restore_sec();
+
+ M4UMSG("m4u_late_resume -\n");
+}
+
+static struct notifier_block m4u_fb_notifier;
+static int m4u_fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data)
+{
+ struct fb_event *evdata = data;
+ int blank;
+
+ M4UMSG("m4u_fb_notifier_callback %ld, %d\n", event , FB_EVENT_BLANK);
+
+ if (event != FB_EVENT_BLANK)
+ return 0;
+
+ blank = *(int *)evdata->data;
+
+ switch (blank) {
+ case FB_BLANK_UNBLANK:
+ case FB_BLANK_NORMAL:
+ m4u_late_resume();
+ break;
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ break;
+ case FB_BLANK_POWERDOWN:
+ m4u_early_suspend();
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#if 1
+int m4u_map_nonsec_buf(int port, unsigned int mva, unsigned int size)
+{
+ int ret;
+
+ mutex_lock(&m4u_dci_mutex);
+
+ if (NULL == m4u_dci_msg) {
+ M4UMSG("%s TCI/DCI error\n", __func__);
+ ret = MC_DRV_ERR_NO_FREE_MEMORY;
+ goto out;
+ }
+
+ m4u_dci_msg->cmd = CMD_M4U_MAP_NONSEC_BUFFER;
+ m4u_dci_msg->buf_param.mva = mva;
+ m4u_dci_msg->buf_param.size = size;
+
+ ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
+ if (ret) {
+ M4UMSG("m4u exec command fail\n");
+ ret = -1;
+ goto out;
+ }
+ ret = m4u_dci_msg->rsp;
+
+out:
+ mutex_unlock(&m4u_dci_mutex);
+ return ret;
+}
+#endif
+
+#endif
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+static DEFINE_MUTEX(gM4u_sec_init);
+#endif
+
+static long MTK_M4U_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ M4U_MOUDLE_STRUCT m4u_module;
+ M4U_PORT_STRUCT m4u_port;
+ M4U_PORT_ID PortID;
+ M4U_PORT_ID ModuleID;
+ M4U_CACHE_STRUCT m4u_cache_data;
+ M4U_DMA_STRUCT m4u_dma_data;
+ m4u_client_t *client = filp->private_data;
+
+ switch (cmd) {
+ case MTK_M4U_T_POWER_ON:
+ ret = copy_from_user(&ModuleID, (void *)arg, sizeof(unsigned int));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_POWER_ON,copy_from_user failed,%d\n", ret);
+ return -EFAULT;
+ }
+ ret = m4u_power_on(ModuleID);
+ break;
+
+ case MTK_M4U_T_POWER_OFF:
+ ret = copy_from_user(&ModuleID, (void *)arg, sizeof(unsigned int));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_POWER_OFF,copy_from_user failed,%d\n", ret);
+ return -EFAULT;
+ }
+ ret = m4u_power_off(ModuleID);
+ break;
+
+ case MTK_M4U_T_ALLOC_MVA:
+ ret = copy_from_user(&m4u_module, (void *)arg, sizeof(M4U_MOUDLE_STRUCT));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_ALLOC_MVA,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = m4u_alloc_mva(client, m4u_module.port, m4u_module.BufAddr, NULL,
+ m4u_module.BufSize, m4u_module.prot, m4u_module.flags,
+ &(m4u_module.MVAStart));
+
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(&(((M4U_MOUDLE_STRUCT *) arg)->MVAStart),
+ &(m4u_module.MVAStart), sizeof(unsigned int));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_ALLOC_MVA,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+ break;
+
+ case MTK_M4U_T_DEALLOC_MVA:
+ {
+ ret = copy_from_user(&m4u_module, (void *)arg, sizeof(M4U_MOUDLE_STRUCT));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_DEALLOC_MVA,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = m4u_dealloc_mva(client, m4u_module.port, m4u_module.MVAStart);
+ if (ret)
+ return ret;
+ }
+ break;
+
+ case MTK_M4U_T_DUMP_INFO:
+ ret = copy_from_user(&ModuleID, (void *)arg, sizeof(unsigned int));
+ if (ret) {
+ M4UMSG("MTK_M4U_Invalid_TLB_Range,copy_from_user failed,%d\n", ret);
+ return -EFAULT;
+ }
+
+ break;
+
+ case MTK_M4U_T_CACHE_SYNC:
+ ret = copy_from_user(&m4u_cache_data, (void *)arg, sizeof(M4U_CACHE_STRUCT));
+ if (ret) {
+ M4UMSG("m4u_cache_sync,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = m4u_cache_sync(client, m4u_cache_data.port, m4u_cache_data.va,
+ m4u_cache_data.size, m4u_cache_data.mva,
+ m4u_cache_data.eCacheSync);
+ break;
+
+ case MTK_M4U_T_DMA_OP:
+ ret = copy_from_user(&m4u_dma_data, (void *) arg,
+ sizeof(M4U_DMA_STRUCT));
+ if (ret) {
+ M4UMSG("m4u dma map/unmap area,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = m4u_dma_op(client, m4u_dma_data.port, m4u_dma_data.va,
+ m4u_dma_data.size, m4u_dma_data.mva,
+ m4u_dma_data.eDMAType, m4u_dma_data.eDMADir);
+ break;
+
+ case MTK_M4U_T_CONFIG_PORT:
+ ret = copy_from_user(&m4u_port, (void *)arg, sizeof(M4U_PORT_STRUCT));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_CONFIG_PORT,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+#ifdef M4U_TEE_SERVICE_ENABLE
+ mutex_lock(&gM4u_sec_init);
+#endif
+ ret = m4u_config_port(&m4u_port);
+#ifdef M4U_TEE_SERVICE_ENABLE
+ mutex_unlock(&gM4u_sec_init);
+#endif
+ break;
+ case MTK_M4U_T_MONITOR_START:
+ ret = copy_from_user(&PortID, (void *)arg, sizeof(unsigned int));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_MONITOR_START,copy_from_user failed,%d\n", ret);
+ return -EFAULT;
+ }
+ ret = m4u_monitor_start(m4u_port_2_m4u_id(PortID));
+
+ break;
+ case MTK_M4U_T_MONITOR_STOP:
+ ret = copy_from_user(&PortID, (void *)arg, sizeof(unsigned int));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_MONITOR_STOP,copy_from_user failed,%d\n", ret);
+ return -EFAULT;
+ }
+ ret = m4u_monitor_stop(m4u_port_2_m4u_id(PortID));
+ break;
+ case MTK_M4U_T_CACHE_FLUSH_ALL:
+ m4u_dma_cache_flush_all();
+ break;
+
+ case MTK_M4U_T_CONFIG_PORT_ARRAY:
+ {
+ struct m4u_port_array port_array;
+
+ ret = copy_from_user(&port_array, (void *)arg, sizeof(struct m4u_port_array));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_CONFIG_PORT,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+#ifdef M4U_TEE_SERVICE_ENABLE
+ mutex_lock(&gM4u_sec_init);
+#endif
+ ret = m4u_config_port_array(&port_array);
+#ifdef M4U_TEE_SERVICE_ENABLE
+ mutex_unlock(&gM4u_sec_init);
+#endif
+ }
+ break;
+ case MTK_M4U_T_CONFIG_MAU:
+ {
+ M4U_MAU_STRUCT rMAU;
+
+ ret = copy_from_user(&rMAU, (void *)arg, sizeof(M4U_MAU_STRUCT));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_CONFIG_MAU,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = config_mau(rMAU);
+ }
+ break;
+ case MTK_M4U_T_CONFIG_TF:
+ {
+ M4U_TF_STRUCT rM4UTF;
+
+ ret = copy_from_user(&rM4UTF, (void *)arg, sizeof(M4U_TF_STRUCT));
+ if (ret) {
+ M4UMSG("MTK_M4U_T_CONFIG_TF,copy_from_user failed:%d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = m4u_enable_tf(rM4UTF.port, rM4UTF.fgEnable);
+ }
+ break;
+#ifdef M4U_TEE_SERVICE_ENABLE
+ case MTK_M4U_T_SEC_INIT:
+ {
+ M4UMSG("MTK M4U ioctl : MTK_M4U_T_SEC_INIT command!! 0x%x\n", cmd);
+ mutex_lock(&gM4u_sec_init);
+ ret = m4u_sec_init();
+ mutex_unlock(&gM4u_sec_init);
+ }
+ break;
+#endif
+ default:
+ /* M4UMSG("MTK M4U ioctl:No such command!!\n"); */
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+typedef struct {
+ compat_uint_t port;
+ compat_ulong_t BufAddr;
+ compat_uint_t BufSize;
+ compat_uint_t prot;
+ compat_uint_t MVAStart;
+ compat_uint_t MVAEnd;
+ compat_uint_t flags;
+} COMPAT_M4U_MOUDLE_STRUCT;
+
+typedef struct {
+ compat_uint_t port;
+ compat_uint_t eCacheSync;
+ compat_ulong_t va;
+ compat_uint_t size;
+ compat_uint_t mva;
+} COMPAT_M4U_CACHE_STRUCT;
+
+typedef struct {
+ compat_uint_t port;
+ compat_uint_t eDMAType;
+ compat_uint_t eDMADir;
+ compat_ulong_t va;
+ compat_uint_t size;
+ compat_uint_t mva;
+} COMPAT_M4U_DMA_STRUCT;
+
+#define COMPAT_MTK_M4U_T_ALLOC_MVA _IOWR(MTK_M4U_MAGICNO, 4, int)
+#define COMPAT_MTK_M4U_T_DEALLOC_MVA _IOW(MTK_M4U_MAGICNO, 5, int)
+#define COMPAT_MTK_M4U_T_CACHE_SYNC _IOW(MTK_M4U_MAGICNO, 10, int)
+#define COMPAT_MTK_M4U_T_DMA_OP _IOW(MTK_M4U_MAGICNO, 29, int)
+
+
+static int compat_get_m4u_module_struct(COMPAT_M4U_MOUDLE_STRUCT __user *data32,
+ M4U_MOUDLE_STRUCT __user *data)
+{
+ compat_uint_t u;
+ compat_ulong_t l;
+ int err;
+
+ err = get_user(u, &(data32->port));
+ err |= put_user(u, &(data->port));
+ err |= get_user(l, &(data32->BufAddr));
+ err |= put_user(l, &(data->BufAddr));
+ err |= get_user(u, &(data32->BufSize));
+ err |= put_user(u, &(data->BufSize));
+ err |= get_user(u, &(data32->prot));
+ err |= put_user(u, &(data->prot));
+ err |= get_user(u, &(data32->MVAStart));
+ err |= put_user(u, &(data->MVAStart));
+ err |= get_user(u, &(data32->MVAEnd));
+ err |= put_user(u, &(data->MVAEnd));
+ err |= get_user(u, &(data32->flags));
+ err |= put_user(u, &(data->flags));
+
+ return err;
+}
+
+static int compat_put_m4u_module_struct(COMPAT_M4U_MOUDLE_STRUCT __user *data32,
+ M4U_MOUDLE_STRUCT __user *data)
+{
+ compat_uint_t u;
+ compat_ulong_t l;
+ int err;
+
+ err = get_user(u, &(data->port));
+ err |= put_user(u, &(data32->port));
+ err |= get_user(l, &(data->BufAddr));
+ err |= put_user(l, &(data32->BufAddr));
+ err |= get_user(u, &(data->BufSize));
+ err |= put_user(u, &(data32->BufSize));
+ err |= get_user(u, &(data->prot));
+ err |= put_user(u, &(data32->prot));
+ err |= get_user(u, &(data->MVAStart));
+ err |= put_user(u, &(data32->MVAStart));
+ err |= get_user(u, &(data->MVAEnd));
+ err |= put_user(u, &(data32->MVAEnd));
+ err |= get_user(u, &(data->flags));
+ err |= put_user(u, &(data32->flags));
+
+ return err;
+}
+
+static int compat_get_m4u_cache_struct(COMPAT_M4U_CACHE_STRUCT __user *data32,
+ M4U_CACHE_STRUCT __user *data)
+{
+ compat_uint_t u;
+ compat_ulong_t l;
+ int err;
+
+ err = get_user(u, &(data32->port));
+ err |= put_user(u, &(data->port));
+ err |= get_user(u, &(data32->eCacheSync));
+ err |= put_user(u, &(data->eCacheSync));
+ err |= get_user(l, &(data32->va));
+ err |= put_user(l, &(data->va));
+ err |= get_user(u, &(data32->size));
+ err |= put_user(u, &(data->size));
+ err |= get_user(u, &(data32->mva));
+ err |= put_user(u, &(data->mva));
+
+ return err;
+}
+
+static int compat_get_m4u_dma_struct(
+ COMPAT_M4U_DMA_STRUCT __user *data32,
+ M4U_DMA_STRUCT __user *data)
+{
+ compat_uint_t u;
+ compat_ulong_t l;
+ int err;
+
+ err = get_user(u, &(data32->port));
+ err |= put_user(u, &(data->port));
+ err |= get_user(u, &(data32->eDMAType));
+ err |= put_user(u, &(data->eDMAType));
+ err |= get_user(u, &(data32->eDMADir));
+ err |= put_user(u, &(data->eDMADir));
+ err |= get_user(l, &(data32->va));
+ err |= put_user(l, &(data->va));
+ err |= get_user(u, &(data32->size));
+ err |= put_user(u, &(data->size));
+ err |= get_user(u, &(data32->mva));
+ err |= put_user(u, &(data->mva));
+
+ return err;
+}
+
+long MTK_M4U_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_MTK_M4U_T_ALLOC_MVA:
+ {
+ COMPAT_M4U_MOUDLE_STRUCT __user *data32;
+ M4U_MOUDLE_STRUCT __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(M4U_MOUDLE_STRUCT));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_m4u_module_struct(data32, data);
+ if (err)
+ return err;
+
+ ret = filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_ALLOC_MVA, (unsigned long)data);
+
+ err = compat_put_m4u_module_struct(data32, data);
+
+ if (err)
+ return err;
+
+ return ret;
+ }
+ case COMPAT_MTK_M4U_T_DEALLOC_MVA:
+ {
+ COMPAT_M4U_MOUDLE_STRUCT __user *data32;
+ M4U_MOUDLE_STRUCT __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(M4U_MOUDLE_STRUCT));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_m4u_module_struct(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_DEALLOC_MVA,
+ (unsigned long)data);
+ }
+ case COMPAT_MTK_M4U_T_CACHE_SYNC:
+ {
+ COMPAT_M4U_CACHE_STRUCT __user *data32;
+ M4U_CACHE_STRUCT __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(M4U_CACHE_STRUCT));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_m4u_cache_struct(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_CACHE_SYNC,
+ (unsigned long)data);
+ }
+ case COMPAT_MTK_M4U_T_DMA_OP:
+ {
+ COMPAT_M4U_DMA_STRUCT __user *data32;
+ M4U_DMA_STRUCT __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(M4U_DMA_STRUCT));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_m4u_dma_struct(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_DMA_OP,
+ (unsigned long)data);
+}
+ case MTK_M4U_T_POWER_ON:
+ case MTK_M4U_T_POWER_OFF:
+ case MTK_M4U_T_DUMP_INFO:
+ case MTK_M4U_T_CONFIG_PORT:
+ case MTK_M4U_T_MONITOR_START:
+ case MTK_M4U_T_MONITOR_STOP:
+ case MTK_M4U_T_CACHE_FLUSH_ALL:
+ case MTK_M4U_T_CONFIG_PORT_ARRAY:
+ case MTK_M4U_T_SEC_INIT:
+ return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#else
+
+#define MTK_M4U_COMPAT_ioctl NULL
+
+#endif
+
+static const struct file_operations m4u_fops = {
+ .owner = THIS_MODULE,
+ .open = MTK_M4U_open,
+ .release = MTK_M4U_release,
+ .flush = MTK_M4U_flush,
+ .unlocked_ioctl = MTK_M4U_ioctl,
+ .compat_ioctl = MTK_M4U_COMPAT_ioctl,
+ /* .mmap = NULL; */
+};
+
+static int m4u_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+
+ M4UINFO("m4u_probe 0\n");
+
+ if (pdev->dev.of_node) {
+ int err;
+
+ err = of_property_read_u32(node, "cell-index", &pdev->id);
+ if (err)
+ M4UMSG("[DTS] get m4u platform_device id fail!!\n");
+ }
+ M4UINFO("m4u_probe 1, pdev id = %d name = %s\n", pdev->id, pdev->name);
+
+ gM4uDev->pDev[pdev->id] = &pdev->dev;
+ gM4uDev->m4u_base[pdev->id] = (unsigned long)of_iomap(node, 0);
+ gM4uDev->irq_num[pdev->id] = irq_of_parse_and_map(node, 0);
+
+ M4UMSG("m4u_probe 2, of_iomap: 0x%lx, irq_num: %d, pDev: %p\n",
+ gM4uDev->m4u_base[pdev->id], gM4uDev->irq_num[pdev->id], gM4uDev->pDev[pdev->id]);
+
+ if (0 == pdev->id) {
+ m4u_domain_init(gM4uDev, &gMvaNode_unknown);
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ {
+ m4u_buf_info_t *pMvaInfo;
+ unsigned int mva;
+
+ pMvaInfo = m4u_alloc_buf_info();
+ if (!pMvaInfo) {
+ pMvaInfo->port = M4U_PORT_UNKNOWN;
+ pMvaInfo->size = M4U_NONSEC_MVA_START - 0x100000;
+ }
+
+ mva = m4u_do_mva_alloc(0, M4U_NONSEC_MVA_START - 0x100000, pMvaInfo);
+ M4UINFO("reserve sec mva: 0x%x\n", mva);
+ }
+#endif
+
+ }
+
+ m4u_hw_init(gM4uDev, pdev->id);
+
+ M4UINFO("m4u_probe 3 finish...\n");
+
+ return 0;
+}
+
+static int m4u_remove(struct platform_device *pdev)
+{
+ m4u_hw_deinit(gM4uDev, pdev->id);
+
+#ifndef __M4U_USE_PROC_NODE
+ misc_deregister(&(gM4uDev->dev));
+#else
+ if (gM4uDev->m4u_dev_proc_entry)
+ proc_remove(gM4uDev->m4u_dev_proc_entry);
+#endif
+
+ return 0;
+}
+
+static int m4u_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ m4u_reg_backup();
+ M4UINFO("M4U backup in suspend\n");
+
+ return 0;
+}
+
+static int m4u_resume(struct platform_device *pdev)
+{
+ m4u_reg_restore();
+ M4UINFO("M4U restore in resume\n");
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+#ifdef CONFIG_PM
+/*---------------------------------------------------------------------------*/
+static int m4u_pm_suspend(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+
+ BUG_ON(pdev == NULL);
+
+ return m4u_suspend(pdev, PMSG_SUSPEND);
+}
+
+static int m4u_pm_resume(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+
+ BUG_ON(pdev == NULL);
+
+ return m4u_resume(pdev);
+}
+
+static int m4u_pm_restore_noirq(struct device *device)
+{
+ int i;
+
+ for (i = 0; i < TOTAL_M4U_NUM; i++) {
+ irq_set_irq_type(gM4uDev->irq_num[i], IRQF_TRIGGER_LOW);
+ }
+
+ return 0;
+}
+
+/*---------------------------------------------------------------------------*/
+#else /*CONFIG_PM */
+/*---------------------------------------------------------------------------*/
+#define m4u_pm_suspend NULL
+#define m4u_pm_resume NULL
+#define m4u_pm_restore_noirq NULL
+/*---------------------------------------------------------------------------*/
+#endif /*CONFIG_PM */
+/*---------------------------------------------------------------------------*/
+static const struct of_device_id iommu_of_ids[] = {
+ {.compatible = "mediatek,m4u",},
+ {.compatible = "mediatek,perisys_iommu",},
+ {}
+};
+
+const struct dev_pm_ops m4u_pm_ops = {
+ .suspend = m4u_pm_suspend,
+ .resume = m4u_pm_resume,
+ .freeze = m4u_pm_suspend,
+ .thaw = m4u_pm_resume,
+ .poweroff = m4u_pm_suspend,
+ .restore = m4u_pm_resume,
+ .restore_noirq = m4u_pm_restore_noirq,
+};
+
+static struct platform_driver m4uDrv = {
+ .probe = m4u_probe,
+ .remove = m4u_remove,
+ .suspend = m4u_suspend,
+ .resume = m4u_resume,
+ .driver = {
+ .name = "m4u",
+ .of_match_table = iommu_of_ids,
+#ifdef CONFIG_PM
+ .pm = &m4u_pm_ops,
+#endif
+ .owner = THIS_MODULE,
+ }
+};
+
+#if 0
+static u64 m4u_dmamask = ~(u32) 0;
+
+static struct platform_device mtk_m4u_dev = {
+ .name = M4U_DEV_NAME,
+ .id = 0,
+ .dev = {
+ .dma_mask = &m4u_dmamask,
+ .coherent_dma_mask = 0xffffffffUL}
+};
+#endif
+
+#define __M4U_USE_PROC_NODE
+
+static int __init MTK_M4U_Init(void)
+{
+ int ret = 0;
+
+ gM4uDev = kzalloc(sizeof(struct m4u_device), GFP_KERNEL);
+
+ M4UINFO("MTK_M4U_Init kzalloc: %p\n", gM4uDev);
+
+ if (!gM4uDev) {
+ M4UMSG("kmalloc for m4u_device fail\n");
+ return -ENOMEM;
+ }
+#ifndef __M4U_USE_PROC_NODE
+ gM4uDev->dev.minor = MISC_DYNAMIC_MINOR;
+ gM4uDev->dev.name = M4U_DEV_NAME;
+ gM4uDev->dev.fops = &m4u_fops;
+ gM4uDev->dev.parent = NULL;
+
+ ret = misc_register(&(gM4uDev->dev));
+ M4UINFO("misc_register, minor: %d\n", gM4uDev->dev.minor);
+ if (ret) {
+ M4UMSG("failed to register misc device.\n");
+ return ret;
+ }
+#else
+ gM4uDev->m4u_dev_proc_entry = proc_create("m4u", 0, NULL, &m4u_fops);
+ if (!(gM4uDev->m4u_dev_proc_entry)) {
+ M4UMSG("m4u:failed to register m4u in proc/m4u_device.\n");
+ return ret;
+ }
+#endif
+
+ m4u_debug_init(gM4uDev);
+
+ M4UINFO("M4U platform_driver_register start\n");
+
+ if (platform_driver_register(&m4uDrv)) {
+ M4UMSG("failed to register M4U driver");
+ return -ENODEV;
+ }
+ M4UINFO("M4U platform_driver_register finsish\n");
+
+#if 0
+ retval = platform_device_register(&mtk_m4u_dev);
+ if (retval != 0)
+ return retval;
+#endif
+
+#ifdef M4U_PROFILE
+ m4u_profile_init();
+#endif
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ m4u_fb_notifier.notifier_call = m4u_fb_notifier_callback;
+ ret = fb_register_client(&m4u_fb_notifier);
+ if (ret)
+ M4UMSG("m4u register fb_notifier failed! ret(%d)\n", ret);
+ else
+ M4UMSG("m4u register fb_notifier OK!\n");
+#endif
+
+ return 0;
+}
+
+static int __init mtk_m4u_late_init(void)
+{
+
+#if !defined(CONFIG_MTK_LEGACY)
+ smi_common_clock_off();
+ smi_larb0_clock_off();
+#endif
+
+ return 0;
+}
+
+static void __exit MTK_M4U_Exit(void)
+{
+ platform_driver_unregister(&m4uDrv);
+}
+
+subsys_initcall(MTK_M4U_Init);
+late_initcall(mtk_m4u_late_init);
+module_exit(MTK_M4U_Exit);
+
+MODULE_DESCRIPTION("MTKM4Udriver");
+MODULE_AUTHOR("MTK80347 <Xiang.Xu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u_debug.c b/drivers/misc/mediatek/m4u/2.0/m4u_debug.c
new file mode 100644
index 000000000..c2629a750
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_debug.c
@@ -0,0 +1,1457 @@
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/module.h>
+
+#include "m4u_debug.h"
+#include "m4u_priv.h"
+
+
+/* global variables */
+int gM4U_log_to_uart = 2;
+int gM4U_log_level = 2;
+
+unsigned int gM4U_seed_mva = 0;
+
+int m4u_test_alloc_dealloc(int id, unsigned int size)
+{
+ m4u_client_t *client;
+ unsigned long va = 0;
+ unsigned int mva;
+ int ret;
+ unsigned long populate;
+
+ if (id == 1)
+ va = (unsigned long)kmalloc(size, GFP_KERNEL);
+ else if (id == 2)
+ va = (unsigned long)vmalloc(size);
+ else if (id == 3) {
+ down_write(&current->mm->mmap_sem);
+ va = do_mmap_pgoff(NULL, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, 0, &populate);
+ up_write(&current->mm->mmap_sem);
+ }
+
+ M4UINFO("test va=0x%lx,size=0x%x\n", va, size);
+
+ client = m4u_create_client();
+ if (IS_ERR_OR_NULL(client))
+ M4UMSG("create client fail!\n");
+
+ ret = m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, va, NULL, size,
+ M4U_PROT_READ | M4U_PROT_CACHE, 0, &mva);
+ if (ret) {
+ M4UMSG("alloc mva fail:va=0x%lx,size=0x%x,ret=%d\n", va, size, ret);
+ return -1;
+ }
+ m4u_dump_pgtable(m4u_get_domain_by_port(M4U_PORT_DISP_OVL0), NULL);
+
+ ret = m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, mva);
+ m4u_dump_pgtable(m4u_get_domain_by_port(M4U_PORT_DISP_OVL0), NULL);
+
+ if (id == 1)
+ kfree((void *)va);
+ else if (id == 2)
+ vfree((void *)va);
+ else if (id == 3) {
+ down_read(&current->mm->mmap_sem);
+ ret = do_munmap(current->mm, va, size);
+ up_read(&current->mm->mmap_sem);
+ if (ret)
+ M4UMSG("do_munmap failed\n");
+ }
+
+/* clean */
+ m4u_destroy_client(client);
+ return 0;
+}
+
+m4u_callback_ret_t m4u_test_callback(int alloc_port, unsigned int mva,
+ unsigned int size, void *data)
+{
+ if (NULL != data)
+ M4UMSG("test callback port=%d, mva=0x%x, size=0x%x, data=0x%x\n", alloc_port, mva, size, *(int *)data);
+ else
+ M4UMSG("test callback port=%d, mva=0x%x, size=0x%x\n", alloc_port, mva, size);
+
+ return M4U_CALLBACK_HANDLED;
+}
+
+int m4u_test_reclaim(unsigned int size)
+{
+ m4u_client_t *client;
+ unsigned int *va[10];
+ unsigned int buf_size;
+ unsigned int mva;
+ int ret, i;
+
+ /* register callback */
+ m4u_register_reclaim_callback(M4U_PORT_DISP_OVL0, m4u_test_callback, NULL);
+
+ client = m4u_create_client();
+ if (IS_ERR_OR_NULL(client))
+ M4UMSG("createclientfail!\n");
+
+ buf_size = size;
+ for (i = 0; i < 10; i++) {
+ va[i] = vmalloc(buf_size);
+
+ ret = m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)va[i],
+ NULL, buf_size, M4U_PROT_READ | M4U_PROT_CACHE, 0, &mva);
+ if (ret) {
+ M4UMSG("alloc using kmalloc fail:va=0x%p,size=0x%x\n", va[i], buf_size);
+ return -1;
+ }
+ M4UINFO("alloc mva:va=0x%p,mva=0x%x,size=0x%x\n", va[i], mva, buf_size);
+
+ buf_size += size;
+ }
+
+ for (i = 0; i < 10; i++)
+ vfree((void *)va[i]);
+
+ m4u_dump_buf_info(NULL);
+ m4u_dump_pgtable(m4u_get_domain_by_port(M4U_PORT_DISP_OVL0), NULL);
+
+ m4u_destroy_client(client);
+
+ m4u_unregister_reclaim_callback(M4U_PORT_DISP_OVL0);
+
+ return 0;
+}
+
+static int m4u_test_map_kernel(void)
+{
+ m4u_client_t *client;
+ unsigned long va;
+ unsigned int size = 1024 * 1024;
+ unsigned int mva;
+ unsigned long kernel_va;
+ unsigned int kernel_size;
+ int i;
+ int ret;
+ unsigned long populate;
+
+ down_write(&current->mm->mmap_sem);
+ va = do_mmap_pgoff(NULL, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, 0, &populate);
+ up_write(&current->mm->mmap_sem);
+
+ M4UINFO("test va=0x%lx,size=0x%x\n", va, size);
+
+ for (i = 0; i < size; i += 4)
+ *(int *)(va + i) = i;
+
+ client = m4u_create_client();
+ if (IS_ERR_OR_NULL(client))
+ M4UMSG("createclientfail!\n");
+
+ ret = m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, va, NULL, size, M4U_PROT_READ | M4U_PROT_CACHE, 0, &mva);
+ if (ret) {
+ M4UMSG("alloc using kmalloc fail:va=0x%lx,size=0x%x\n", va, size);
+ return -1;
+ }
+
+ ret = m4u_mva_map_kernel(mva, size, &kernel_va, &kernel_size);
+ if (ret) {
+ M4UMSG("map kernel fail!\n");
+ return -1;
+ }
+ for (i = 0; i < size; i += 4) {
+ if (*(int *)(kernel_va + i) != i) {
+ M4UMSG("wawawa, get map value fail! i=%d, map=%d\n", i,
+ *(int *)(kernel_va + i));
+ }
+ }
+
+ ret = m4u_mva_unmap_kernel(mva, size, kernel_va);
+
+ ret = m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, mva);
+ down_read(&current->mm->mmap_sem);
+ ret = do_munmap(current->mm, va, size);
+ up_read(&current->mm->mmap_sem);
+ if (ret)
+ M4UMSG("do_munmap failed\n");
+
+ m4u_destroy_client(client);
+ return 0;
+}
+
+int m4u_test_ddp(unsigned int prot)
+{
+ unsigned int *pSrc, *pDst;
+ unsigned int src_pa, dst_pa;
+ unsigned int size = 64 * 64 * 3;
+ M4U_PORT_STRUCT port;
+ m4u_client_t *client = m4u_create_client();
+
+ pSrc = vmalloc(size);
+ pDst = vmalloc(size);
+
+ m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pSrc, NULL,
+ size, prot, 0, &src_pa);
+
+ m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pDst, NULL,
+ size, prot, 0, &dst_pa);
+
+ M4UINFO("pSrc=0x%p, pDst=0x%p, src_pa=0x%x, dst_pa=0x%x\n", pSrc, pDst, src_pa, dst_pa);
+
+ port.ePortID = M4U_PORT_DISP_OVL0;
+ port.Direction = 0;
+ port.Distance = 1;
+ port.domain = 3;
+ port.Security = 0;
+ port.Virtuality = 1;
+ m4u_config_port(&port);
+
+ port.ePortID = M4U_PORT_DISP_WDMA0;
+ m4u_config_port(&port);
+
+ m4u_monitor_start(0);
+ __ddp_mem_test(pSrc, src_pa, pDst, dst_pa, !(prot & M4U_PROT_CACHE));
+ m4u_monitor_stop(0);
+
+ vfree(pSrc);
+ vfree(pDst);
+
+ m4u_destroy_client(client);
+ return 0;
+}
+
+m4u_callback_ret_t test_fault_callback(int port, unsigned int mva, void *data)
+{
+ if (NULL != data)
+ M4UMSG("fault call port=%d, mva=0x%x, data=0x%x\n", port, mva, *(int *)data);
+ else
+ M4UMSG("fault call port=%d, mva=0x%x\n", port, mva);
+
+ /* DO NOT print too much logs here !!!! */
+ /* Do NOT use any lock hear !!!! */
+ /* DO NOT do any other things except print !!! */
+ /* DO NOT make any mistake here (or reboot will happen) !!! */
+ return M4U_CALLBACK_HANDLED;
+}
+
+int m4u_test_tf(unsigned int prot)
+{
+ unsigned int *pSrc, *pDst;
+ unsigned int src_pa, dst_pa;
+ unsigned int size = 64 * 64 * 3;
+ M4U_PORT_STRUCT port;
+ m4u_client_t *client = m4u_create_client();
+ int data = 88;
+
+ m4u_register_fault_callback(M4U_PORT_DISP_OVL0, test_fault_callback, &data);
+ m4u_register_fault_callback(M4U_PORT_DISP_WDMA0, test_fault_callback, &data);
+
+ pSrc = vmalloc(size);
+ pDst = vmalloc(size);
+
+ m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pSrc, NULL,
+ size, prot, 0, &src_pa);
+
+ m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pDst, NULL,
+ size / 2, prot, 0, &dst_pa);
+
+ M4UINFO("pSrc=0x%p, pDst=0x%p, src_pa=0x%x, dst_pa=0x%x\n", pSrc, pDst, src_pa, dst_pa);
+
+ port.ePortID = M4U_PORT_DISP_OVL0;
+ port.Direction = 0;
+ port.Distance = 1;
+ port.domain = 3;
+ port.Security = 0;
+ port.Virtuality = 1;
+ m4u_config_port(&port);
+
+ port.ePortID = M4U_PORT_DISP_WDMA0;
+ m4u_config_port(&port);
+
+ m4u_monitor_start(0);
+ __ddp_mem_test(pSrc, src_pa, pDst, dst_pa, !!(prot & M4U_PROT_CACHE));
+ m4u_monitor_stop(0);
+
+ m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, src_pa);
+ m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, dst_pa);
+
+ vfree(pSrc);
+ vfree(pDst);
+
+ m4u_destroy_client(client);
+
+ return 0;
+}
+
+#if 0
+#include <mtk/ion_drv.h>
+
+void m4u_test_ion(void)
+{
+ unsigned int *pSrc, *pDst;
+ unsigned long src_pa, dst_pa;
+ unsigned int size = 64 * 64 * 3, tmp_size;
+ M4U_PORT_STRUCT port;
+ struct ion_mm_data mm_data;
+ struct ion_client *ion_client;
+ struct ion_handle *src_handle, *dst_handle;
+
+ /* FIX-ME: modified for linux-3.10 early porting */
+ /* ion_client = ion_client_create(g_ion_device, 0xffffffff, "test"); */
+ ion_client = ion_client_create(g_ion_device, "test");
+
+ src_handle = ion_alloc(ion_client, size, 0, ION_HEAP_MULTIMEDIA_MASK, 0);
+ dst_handle = ion_alloc(ion_client, size, 0, ION_HEAP_MULTIMEDIA_MASK, 0);
+
+ pSrc = ion_map_kernel(ion_client, src_handle);
+ pDst = ion_map_kernel(ion_client, dst_handle);
+
+ mm_data.config_buffer_param.kernel_handle = src_handle;
+ mm_data.config_buffer_param.eModuleID = M4U_PORT_DISP_OVL0;
+ mm_data.config_buffer_param.security = 0;
+ mm_data.config_buffer_param.coherent = 0;
+ mm_data.mm_cmd = ION_MM_CONFIG_BUFFER;
+ if (ion_kernel_ioctl(ion_client, ION_CMD_MULTIMEDIA, (unsigned long)&mm_data) < 0)
+ M4UMSG("ion_test_drv: Config buffer failed.\n");
+
+ mm_data.config_buffer_param.kernel_handle = dst_handle;
+ if (ion_kernel_ioctl(ion_client, ION_CMD_MULTIMEDIA, (unsigned long)&mm_data) < 0)
+ M4UMSG("ion_test_drv: Config buffer failed.\n");
+
+ ion_phys(ion_client, src_handle, &src_pa, (size_t *)&tmp_size);
+ ion_phys(ion_client, dst_handle, &dst_pa, (size_t *)&tmp_size);
+
+ M4UMSG("ion alloced: pSrc=0x%p, pDst=0x%p, src_pa=0x%lu, dst_pa=0x%lu\n", pSrc, pDst, src_pa, dst_pa);
+
+ port.ePortID = M4U_PORT_DISP_OVL0;
+ port.Direction = 0;
+ port.Distance = 1;
+ port.domain = 3;
+ port.Security = 0;
+ port.Virtuality = 1;
+ m4u_config_port(&port);
+
+ port.ePortID = M4U_PORT_DISP_WDMA0;
+ m4u_config_port(&port);
+
+ m4u_monitor_start(0);
+ __ddp_mem_test(pSrc, src_pa, pDst, dst_pa, 0);
+ m4u_monitor_stop(0);
+
+ ion_free(ion_client, src_handle);
+ ion_free(ion_client, dst_handle);
+
+ ion_client_destroy(ion_client);
+}
+#else
+#define m4u_test_ion(...)
+#endif
+
+static int m4u_debug_set(void *data, u64 val)
+{
+ m4u_domain_t *domain = data;
+
+ M4UMSG("m4u_debug_set:val=%llu\n", val);
+
+ switch (val) {
+ case 1:
+ { /* map4kpageonly */
+ struct sg_table table;
+ struct sg_table *sg_table = &table;
+ struct scatterlist *sg;
+ int i;
+ struct page *page;
+ int page_num = 512;
+ unsigned int mva = 0x4000;
+
+ page = alloc_pages(GFP_KERNEL, get_order(page_num));
+ sg_alloc_table(sg_table, page_num, GFP_KERNEL);
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i)
+ sg_set_page(sg, page + i, PAGE_SIZE, 0);
+ m4u_map_sgtable(domain, mva, sg_table, page_num * PAGE_SIZE, M4U_PROT_WRITE | M4U_PROT_READ);
+ m4u_dump_pgtable(domain, NULL);
+ m4u_unmap(domain, mva, page_num * PAGE_SIZE);
+ m4u_dump_pgtable(domain, NULL);
+
+ sg_free_table(sg_table);
+ __free_pages(page, get_order(page_num));
+ }
+ break;
+ case 2:
+ { /* map64kpageonly */
+ struct sg_table table;
+ struct sg_table *sg_table = &table;
+ struct scatterlist *sg;
+ int i;
+ int page_num = 51;
+ unsigned int page_size = SZ_64K;
+ unsigned int mva = SZ_64K;
+
+ sg_alloc_table(sg_table, page_num, GFP_KERNEL);
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+ sg_dma_address(sg) = page_size * (i + 1);
+ sg_dma_len(sg) = page_size;
+ }
+
+ m4u_map_sgtable(domain, mva, sg_table, page_num * page_size, M4U_PROT_WRITE | M4U_PROT_READ);
+ m4u_dump_pgtable(domain, NULL);
+ m4u_unmap(domain, mva, page_num * page_size);
+ m4u_dump_pgtable(domain, NULL);
+ sg_free_table(sg_table);
+ }
+ break;
+ case 3:
+ { /* map1Mpageonly */
+ struct sg_table table;
+ struct sg_table *sg_table = &table;
+ struct scatterlist *sg;
+ int i;
+ int page_num = 37;
+ unsigned int page_size = SZ_1M;
+ unsigned int mva = SZ_1M;
+
+ sg_alloc_table(sg_table, page_num, GFP_KERNEL);
+
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+ sg_dma_address(sg) = page_size * (i + 1);
+ sg_dma_len(sg) = page_size;
+ }
+ m4u_map_sgtable(domain, mva, sg_table, page_num * page_size, M4U_PROT_WRITE | M4U_PROT_READ);
+ m4u_dump_pgtable(domain, NULL);
+ m4u_unmap(domain, mva, page_num * page_size);
+ m4u_dump_pgtable(domain, NULL);
+
+ sg_free_table(sg_table);
+ }
+ break;
+ case 4:
+ { /* map16Mpageonly */
+ struct sg_table table;
+ struct sg_table *sg_table = &table;
+ struct scatterlist *sg;
+ int i;
+ int page_num = 2;
+ unsigned int page_size = SZ_16M;
+ unsigned int mva = SZ_16M;
+
+ sg_alloc_table(sg_table, page_num, GFP_KERNEL);
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+ sg_dma_address(sg) = page_size * (i + 1);
+ sg_dma_len(sg) = page_size;
+ }
+ m4u_map_sgtable(domain, mva, sg_table, page_num * page_size, M4U_PROT_WRITE | M4U_PROT_READ);
+ m4u_dump_pgtable(domain, NULL);
+ m4u_unmap(domain, mva, page_num * page_size);
+ m4u_dump_pgtable(domain, NULL);
+ sg_free_table(sg_table);
+ }
+ break;
+ case 5:
+ { /* mapmiscpages */
+ struct sg_table table;
+ struct sg_table *sg_table = &table;
+ struct scatterlist *sg;
+ unsigned int mva = 0x4000;
+ unsigned int size = SZ_16M * 2;
+
+ sg_alloc_table(sg_table, 1, GFP_KERNEL);
+ sg = sg_table->sgl;
+ sg_dma_address(sg) = 0x4000;
+ sg_dma_len(sg) = size;
+
+ m4u_map_sgtable(domain, mva, sg_table, size, M4U_PROT_WRITE | M4U_PROT_READ);
+ m4u_dump_pgtable(domain, NULL);
+ m4u_unmap(domain, mva, size);
+ m4u_dump_pgtable(domain, NULL);
+ sg_free_table(sg_table);
+ }
+ break;
+ case 6:
+ m4u_test_alloc_dealloc(1, SZ_4M);
+ break;
+ case 7:
+ m4u_test_alloc_dealloc(2, SZ_4M);
+ break;
+ case 8:
+ m4u_test_alloc_dealloc(3, SZ_4M);
+ break;
+ case 9: /* m4u_alloc_mvausingkmallocbuffer */
+ {
+ m4u_test_reclaim(SZ_16K);
+ m4u_mvaGraph_dump();
+ }
+ break;
+ case 10:
+ {
+ unsigned int mva;
+
+ mva = m4u_do_mva_alloc_fix(0x90000000, 0x10000000, NULL);
+ M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
+ mva = m4u_do_mva_alloc_fix(0xb0000000, 0x10000000, NULL);
+ M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
+ mva = m4u_do_mva_alloc_fix(0xa0000000, 0x10000000, NULL);
+ M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
+ mva = m4u_do_mva_alloc_fix(0xa4000000, 0x10000000, NULL);
+ M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
+ m4u_mvaGraph_dump();
+ m4u_do_mva_free(0x90000000, 0x10000000);
+ m4u_do_mva_free(0xa0000000, 0x10000000);
+ m4u_do_mva_free(0xb0000000, 0x10000000);
+ m4u_mvaGraph_dump();
+ }
+ break;
+ case 11: /* map unmap kernel */
+ m4u_test_map_kernel();
+ break;
+ case 12:
+ ddp_mem_test();
+ break;
+ case 13:
+ m4u_test_ddp(M4U_PROT_READ|M4U_PROT_WRITE);
+ break;
+ case 14:
+ m4u_test_tf(M4U_PROT_READ|M4U_PROT_WRITE);
+ break;
+ case 15:
+ m4u_test_ion();
+ break;
+ case 16:
+ m4u_dump_main_tlb(0, 0);
+ break;
+ case 17:
+ m4u_dump_pfh_tlb(0);
+ break;
+ case 18:
+ m4u_dump_main_tlb(1, 0);
+ break;
+ case 19:
+ m4u_dump_pfh_tlb(1);
+ break;
+ case 20:
+ {
+ M4U_PORT_STRUCT rM4uPort;
+ int i;
+
+ rM4uPort.Virtuality = 1;
+ rM4uPort.Security = 0;
+ rM4uPort.Distance = 1;
+ rM4uPort.Direction = 0;
+ rM4uPort.domain = 3;
+ for (i = 0; i < M4U_PORT_UNKNOWN; i++) {
+ rM4uPort.ePortID = i;
+ m4u_config_port(&rM4uPort);
+ }
+ }
+ break;
+ case 21:
+ {
+ M4U_PORT_STRUCT rM4uPort;
+ int i;
+
+ rM4uPort.Virtuality = 0;
+ rM4uPort.Security = 0;
+ rM4uPort.Distance = 1;
+ rM4uPort.Direction = 0;
+ rM4uPort.domain = 3;
+ for (i = 0; i < M4U_PORT_UNKNOWN; i++) {
+ rM4uPort.ePortID = i;
+ m4u_config_port(&rM4uPort);
+ }
+ }
+ break;
+ case 22:
+ {
+ int i;
+ unsigned int *pSrc;
+
+ pSrc = vmalloc(128);
+ memset(pSrc, 55, 128);
+ m4u_cache_sync(NULL, 0, 0, 0, 0, M4U_CACHE_FLUSH_ALL);
+
+ for (i = 0; i < 128 / 32; i += 32) {
+ M4UMSG("+0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+ 8 * i, pSrc[i], pSrc[i + 1], pSrc[i + 2], pSrc[i + 3], pSrc[i + 4],
+ pSrc[i + 5], pSrc[i + 6], pSrc[i + 7]);
+ }
+ vfree(pSrc);
+ }
+ break;
+ case 23:
+ {
+ void *pgd_va;
+ void *pgd_pa;
+ unsigned int size;
+
+ m4u_get_pgd(NULL, 0, &pgd_va, &pgd_pa, &size);
+ M4UMSG("pgd_va:0x%p pgd_pa:0x%p, size: %d\n", pgd_va, pgd_pa, size);
+ }
+ break;
+ case 24:
+ {
+ unsigned int *pSrc;
+ unsigned int mva;
+ unsigned long pa;
+ m4u_client_t *client = m4u_create_client();
+
+ pSrc = vmalloc(128);
+ m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pSrc, NULL, 128, 0, 0, &mva);
+
+ m4u_dump_pgtable(domain, NULL);
+
+ pa = m4u_mva_to_pa(NULL, 0, mva);
+ M4UMSG("(1) mva:0x%x pa:0x%lx\n", mva, pa);
+ m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, mva);
+ pa = m4u_mva_to_pa(NULL, 0, mva);
+ M4UMSG("(2) mva:0x%x pa:0x%lx\n", mva, pa);
+ m4u_destroy_client(client);
+ }
+ break;
+ case 25:
+ m4u_monitor_start(0);
+ break;
+ case 26:
+ m4u_monitor_stop(0);
+ break;
+ case 27:
+ /*
+ m4u_dump_reg_for_smi_hang_issue();
+ */
+ break;
+ case 28:
+ {
+ /*
+ unsigned char *pSrc;
+ unsigned char *pDst;
+ unsigned int mva_rd;
+ unsigned int mva_wr;
+ unsigned int allocated_size = 1024;
+ unsigned int i;
+ m4u_client_t *client = m4u_create_client();
+
+ m4u_monitor_start(0);
+
+ pSrc = vmalloc(allocated_size);
+ memset(pSrc, 0xFF, allocated_size);
+ M4UMSG("(0) vmalloc pSrc:0x%p\n", pSrc);
+ pDst = vmalloc(allocated_size);
+ memset(pDst, 0xFF, allocated_size);
+ M4UMSG("(0) vmalloc pDst:0x%p\n", pDst);
+ M4UMSG("(1) pDst check 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *pDst, *(pDst+1), *(pDst+126), *(pDst+127), *(pDst+128));
+
+
+ m4u_alloc_mva(client, M4U_PORT_DISP_FAKE, (unsigned long)pSrc, NULL, allocated_size, 0, 0, &mva_rd);
+ m4u_alloc_mva(client, M4U_PORT_DISP_FAKE, (unsigned long)pDst, NULL, allocated_size, 0, 0, &mva_wr);
+
+ m4u_dump_pgtable(domain, NULL);
+
+ m4u_display_fake_engine_test(mva_rd, mva_wr);
+
+ M4UMSG("(2) mva_wr:0x%x\n", mva_wr);
+
+ m4u_dealloc_mva(client, M4U_PORT_DISP_FAKE, mva_rd);
+ m4u_dealloc_mva(client, M4U_PORT_DISP_FAKE, mva_wr);
+
+ m4u_cache_sync(NULL, 0, 0, 0, 0, M4U_CACHE_FLUSH_ALL);
+
+ m4u_destroy_client(client);
+
+ M4UMSG("(3) pDst check 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *pDst, *(pDst+1), *(pDst+126), *(pDst+127), *(pDst+128));
+
+ for (i = 0; i < 128; i++) {
+ if (*(pDst+i) != 0) {
+ M4UMSG("(4) [Error] pDst check fail !!VA 0x%p: 0x%x\n",
+ pDst+i*sizeof(unsigned char), *(pDst+i));
+ break;
+ }
+ }
+ if (i == 128)
+ M4UMSG("(4) m4u_display_fake_engine_test R/W 128 bytes PASS!!\n ");
+
+ vfree(pSrc);
+ vfree(pDst);
+
+ m4u_monitor_stop(0);
+ */
+ break;
+ }
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ case 50:
+ m4u_sec_init();
+ break;
+ case 51:
+ {
+ M4U_PORT_STRUCT port;
+
+ memset(&port, 0, sizeof(M4U_PORT_STRUCT));
+
+ port.ePortID = M4U_PORT_HW_VDEC_PP_EXT;
+ port.Virtuality = 1;
+ M4UMSG("(0) config port: mmu: %d, sec: %d\n", port.Virtuality, port.Security);
+ m4u_config_port(&port);
+ /* port.ePortID = M4U_PORT_MDP_WROT1;
+ m4u_config_port(&port); */
+ /* port.ePortID = M4U_PORT_IMGO;
+ m4u_config_port(&port); */
+ port.ePortID = M4U_PORT_VENC_RCPU;
+ m4u_config_port(&port);
+ /* port.ePortID = M4U_PORT_MJC_MV_RD;
+ m4u_config_port(&port); */
+
+ port.ePortID = M4U_PORT_HW_VDEC_PP_EXT;
+ M4UMSG("(1) config port: mmu: %d, sec: %d\n", port.Virtuality, port.Security);
+ m4u_config_port_tee(&port);
+ port.Security = 1;
+ M4UMSG("(2) config port: mmu: %d, sec: %d\n", port.Virtuality, port.Security);
+ m4u_config_port_tee(&port);
+ }
+ break;
+#endif
+ default:
+ M4UMSG("m4u_debug_set error,val=%llu\n", val);
+ }
+
+ return 0;
+}
+
+static int m4u_debug_get(void *data, u64 *val)
+{
+ *val = 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(m4u_debug_fops, m4u_debug_get, m4u_debug_set, "%llu\n");
+
+#if (M4U_DVT != 0)
+static void m4u_test_init(void)
+{
+ M4U_PORT_STRUCT rM4uPort;
+ int i;
+
+ rM4uPort.Virtuality = 0;
+ rM4uPort.Security = 0;
+ rM4uPort.Distance = 1;
+ rM4uPort.Direction = 0;
+ rM4uPort.domain = 3;
+ for (i = 0; i < M4U_PORT_UNKNOWN; i++) {
+ rM4uPort.ePortID = i;
+ m4u_config_port(&rM4uPort);
+ }
+
+ m4u_invalid_tlb_all(0);
+ m4u_confirm_all_invalidated(0);
+
+ m4u_monitor_stop(0);
+}
+
+static void m4u_test_start(void)
+{
+ M4U_PORT_STRUCT rM4uPort;
+ int i;
+
+ m4u_monitor_start(0);
+
+ rM4uPort.Virtuality = 1;
+ rM4uPort.Security = 0;
+ rM4uPort.Distance = 1;
+ rM4uPort.Direction = 0;
+ rM4uPort.domain = 3;
+ for (i = 0; i < M4U_PORT_UNKNOWN; i++) {
+ rM4uPort.ePortID = i;
+ m4u_config_port(&rM4uPort);
+ }
+
+ for (i = 0; i < 100; i++)
+ M4UMSG("test %d !!!\n", i);
+}
+
+static void m4u_test_end(int invalid_tlb)
+{
+ M4U_PORT_STRUCT rM4uPort;
+ int i;
+
+ m4u_monitor_stop(0);
+
+ rM4uPort.Virtuality = 0;
+ rM4uPort.Security = 0;
+ rM4uPort.Distance = 1;
+ rM4uPort.Direction = 0;
+ rM4uPort.domain = 3;
+ for (i = 0; i < M4U_PORT_UNKNOWN; i++) {
+ rM4uPort.ePortID = i;
+ m4u_config_port(&rM4uPort);
+ }
+
+ if (1 == invalid_tlb) {
+ m4u_invalid_tlb_all(0);
+ m4u_confirm_all_invalidated(0);
+ }
+}
+#endif
+
+#if (M4U_DVT != 0)
+static int __vCatchTranslationFault(m4u_domain_t *domain, unsigned int layer,
+ unsigned int seed_mva)
+{
+ imu_pgd_t *pgd;
+ imu_pte_t *pte;
+ unsigned int backup;
+ unsigned int *backup_ptr;
+ int count;
+
+ int pt_type = m4u_get_pt_type(domain, seed_mva);
+
+ M4UMSG("__vCatchTranslationFault, layer = %d, seed_mva = 0x%x.\n", layer, seed_mva);
+
+ if (0 == seed_mva) {
+ M4UMSG("seed_mva = 0 !!!!!!!!!!!\n");
+ return 0;
+ }
+
+ pgd = imu_pgd_offset(domain, seed_mva);
+ if (layer == 0) {
+ int i = 0;
+
+ backup = imu_pgd_val(*pgd);
+ backup_ptr = (unsigned int *)pgd;
+ if (pt_type == MMU_PT_TYPE_SUPERSECTION) {
+ for (i = 0; i < 16; i++)
+ imu_pgd_val(*(pgd + i)) = 0x0;
+ } else {
+ imu_pgd_val(*pgd) = 0x0;
+ }
+ } else {
+ int i = 0;
+
+ pte = imu_pte_offset_map(pgd, seed_mva);
+ backup = imu_pte_val(*pte);
+ backup_ptr = (unsigned int *)pte;
+ if (pt_type == MMU_PT_TYPE_LARGE_PAGE) {
+ for (i = 0; i < 16; i++)
+ imu_pte_val(*(pte + i)) = 0x0;
+ } else {
+ imu_pte_val(*pte) = 0x0;
+ }
+ }
+
+ for (count = 0; count < 100; count++)
+ M4UMSG("test %d ......\n", count);
+
+ /* restore */
+ *backup_ptr = backup;
+
+ return 0;
+}
+
+static int __vCatchInvalidPhyFault(m4u_domain_t *domain, int g4_mode, unsigned int seed_mva)
+{
+ imu_pgd_t *pgd;
+ imu_pte_t *pte;
+ unsigned int backup;
+ unsigned int fault_pa;
+ int count;
+
+ if (0 == seed_mva) {
+ M4UMSG("seed_mva = 0 !!!!!!!!!!!\n");
+ return 0;
+ }
+
+ pgd = imu_pgd_offset(domain, seed_mva);
+#if (M4U_DVT == MMU_PT_TYPE_SMALL_PAGE || M4U_DVT == MMU_PT_TYPE_LARGE_PAGE)
+ pte = imu_pte_offset_map(pgd, seed_mva);
+ backup = imu_pte_val(*pte);
+ if (!g4_mode) {
+ imu_pte_val(*pte) = 0x2;
+ fault_pa = 0;
+ } else {
+ imu_pte_val(*pte) = 0x10000002;
+ fault_pa = 0x10000000;
+ }
+#else
+ backup = imu_pgd_val(*pgd);
+ if (!g4_mode) {
+ imu_pgd_val(*pgd) = 0x2;
+ fault_pa = 0;
+ } else {
+ imu_pgd_val(*pgd) = 0x10000002;
+ fault_pa = 0x10000000;
+ }
+#endif
+ M4UMSG("fault_pa (%d): 0x%x\n", g4_mode, fault_pa);
+
+ for (count = 0; count < 100; count++)
+ M4UMSG("test %d ......\n", count);
+
+
+ /* restore */
+#if (M4U_DVT == MMU_PT_TYPE_SMALL_PAGE || M4U_DVT == MMU_PT_TYPE_LARGE_PAGE)
+ imu_pte_val(*pte) = backup;
+#else
+ imu_pgd_val(*pgd) = backup;
+#endif
+ return 0;
+}
+
+#endif
+
+#if (M4U_DVT != 0)
+static int m4u_test_set(void *data, u64 val)
+{
+ m4u_domain_t *domain = data;
+
+ M4UMSG("m4u_test_set:val=%llu\n", val);
+
+ switch (val) {
+ case 1:
+ M4UMSG("---------- 1. MMU translation with main TLB only. ---------- Start!\n");
+ m4u_test_init();
+ m4u_enable_prefetch(0, 0);
+ m4u_test_start();
+ m4u_test_end(1);
+ m4u_enable_prefetch(0, 1);
+ M4UMSG("---------- 1. MMU translation with main TLB only. ---------- End!\n");
+ break;
+
+ case 2:
+ M4UMSG("---------- 2. MMU translation with both main TLB and pre-fetch TLB. ---------- Start!\n");
+ m4u_test_init();
+ m4u_enable_prefetch(0, 1);
+ m4u_test_start();
+ m4u_test_end(1);
+ M4UMSG("---------- 2. MMU translation with both main TLB and pre-fetch TLB. ---------- End!\n");
+ break;
+
+ case 3:
+ M4UMSG("---------- 3. Range invalidate TLBs static test. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ m4u_test_end(0);
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_invalid_tlb_by_range(domain, gM4U_seed_mva, gM4U_seed_mva + 0x1000000);
+ m4u_confirm_range_invalidated(0, gM4U_seed_mva, gM4U_seed_mva + 0x1000000);
+ m4u_dump_valid_main_tlb(0, 0);
+ M4UMSG("---------- 3. Range invalidate TLBs static test. ---------- End!\n");
+ break;
+
+ case 4:
+ {
+ int i;
+
+ M4UMSG("---------- 4. Range invalidate TLBs dynamic test. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ for (i = 0; i < 100; i++)
+ m4u_invalid_tlb_by_range(domain, gM4U_seed_mva,
+ gM4U_seed_mva + 0x1000000);
+ m4u_test_end(1);
+ M4UMSG("---------- 4. Range invalidate TLBs dynamic test. ---------- End!\n");
+ }
+ break;
+
+ case 5:
+ M4UMSG("---------- 5. Invalidate all TLBs static test. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ m4u_test_end(0);
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_invalid_tlb_all(0);
+ m4u_confirm_all_invalidated(0);
+ m4u_dump_valid_main_tlb(0, 0);
+ M4UMSG("---------- 5. Invalidate all TLBs static test. ---------- End!\n");
+ break;
+
+ case 6:
+ {
+ int i;
+
+ M4UMSG("---------- 6. Invalidate all TLBs dynamic test. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ for (i = 0; i < 100; i++)
+ m4u_invalid_tlb_all(0);
+ m4u_test_end(1);
+ m4u_dump_valid_main_tlb(0, 0);
+ M4UMSG("---------- 6. Invalidate all TLBs dynamic test. ---------- End!\n");
+ }
+ break;
+
+ case 8:
+ M4UMSG("---------- 8. SW manual mode to program main TLB. ---------- Start!\n");
+ m4u_test_init();
+ m4u_dump_main_tlb(0, 0);
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 1, M4U_DVT, 0, 0, gM4U_seed_mva);
+ m4u_dump_valid_main_tlb(0, 0);
+#else
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 0, M4U_DVT, 0, 0, gM4U_seed_mva);
+ m4u_dump_valid_main_tlb(0, 0);
+#endif
+ m4u_test_start();
+ m4u_test_end(1);
+ M4UMSG("---------- 8. SW manual mode to program main TLB. ---------- End!\n");
+ break;
+
+ case 9:
+ M4UMSG("---------- 9. Main TLB lock mode. ---------- Start!\n");
+ m4u_test_init();
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 1, M4U_DVT, 0, 1, gM4U_seed_mva);
+#else
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 0, M4U_DVT, 0, 1, gM4U_seed_mva);
+#endif
+ m4u_dump_valid_main_tlb(0, 0);
+
+ m4u_test_start();
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_end(1);
+ m4u_dump_valid_main_tlb(0, 0);
+
+ M4UMSG("---------- 9. Main TLB lock mode. ---------- End!\n");
+ break;
+
+ case 10:
+ {
+ int i, j;
+ int seq_id;
+
+ M4UMSG("---------- 10. Sequential range feature. ---------- Start!\n");
+
+ seq_id = m4u_insert_seq_range(0, gM4U_seed_mva, gM4U_seed_mva + 0x1000000);
+ m4u_test_init();
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_start();
+ m4u_test_end(0);
+ m4u_dump_valid_main_tlb(0, 0);
+ if (seq_id >= 0)
+ m4u_invalid_seq_range_by_id(0, seq_id);
+ M4UMSG("---------- 10. Sequential range feature. ---------- End!\n");
+ }
+ break;
+
+ case 11:
+ {
+ int i;
+
+ M4UMSG("---------- 11. Single entry test. ---------- Start!\n");
+ m4u_test_init();
+ m4u_enable_MTLB_allshare(0, 1);
+ for (i = 0; i < 31; i++)
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_manual_insert_entry(0, gM4U_seed_mva + i * 4096, 1, M4U_DVT, 0,
+ 1, gM4U_seed_mva + i * 4096);
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_SECTION)
+ m4u_manual_insert_entry(0, gM4U_seed_mva + i * 4096, 0, M4U_DVT, 0, 1,
+ gM4U_seed_mva + i * 4096);
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_SUPERSECTION)
+ m4u_manual_insert_entry(0, i * 4096, 1, MMU_PT_TYPE_SMALL_PAGE, 0, 1,
+ i * 4096);
+#endif
+
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_start();
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_end(1);
+ m4u_enable_MTLB_allshare(0, 0);
+
+ M4UMSG("---------- 11. Single entry test. ---------- End!\n");
+ }
+ break;
+
+
+ case 13:
+ {
+ int count;
+
+ M4UMSG("---------- 13. MMU performance counter. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ for (count = 0; count < 100; count++)
+ M4UMSG("test %d ......\n", count);
+ m4u_test_end(1);
+
+ M4UMSG("---------- 13. MMU performance counter. ---------- End!\n");
+ }
+ break;
+
+
+ case 14:
+ {
+ int i;
+ int count;
+
+ M4UMSG("---------- 14. Entry number versus performance evaluation. ---------- Start!\n");
+
+ m4u_test_init();
+ m4u_enable_MTLB_allshare(0, 1);
+ for (i = 0; i < 30; i++)
+ m4u_manual_insert_entry(0, i * 4096, 1, MMU_PT_TYPE_SMALL_PAGE, 0,
+ 1, i * 4096);
+
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_start();
+ for (count = 0; count < 100; count++)
+ M4UMSG("test %d ......\n", count);
+ m4u_test_end(1);
+
+ m4u_enable_MTLB_allshare(0, 0);
+
+ M4UMSG("---------- 14. Entry number versus performance evaluation. ---------- End!\n");
+ }
+ break;
+
+
+
+ case 15:
+ {
+ M4UMSG("---------- 15. Translation fault. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ __vCatchTranslationFault(domain, 0, gM4U_seed_mva);
+ m4u_test_end(1);
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_test_init();
+ m4u_test_start();
+ __vCatchTranslationFault(domain, 1, gM4U_seed_mva);
+ m4u_test_end(1);
+#endif
+ M4UMSG("---------- 15. Translation fault. ---------- End!\n");
+ }
+ break;
+
+ case 16:
+ M4UMSG("---------- 16. TLB multi-hit fault. ---------- Start!\n");
+ m4u_test_init();
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 1, M4U_DVT, 0, 0, gM4U_seed_mva);
+#else
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 0, M4U_DVT, 0, 0, gM4U_seed_mva);
+#endif
+ M4UMSG("valid main tlb 1\n");
+ m4u_dump_valid_main_tlb(0, 0);
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 1, M4U_DVT, 0, 0, gM4U_seed_mva);
+#else
+ m4u_manual_insert_entry(0, gM4U_seed_mva, 0, M4U_DVT, 0, 0, gM4U_seed_mva);
+#endif
+
+ M4UMSG("valid main tlb 2\n");
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_start();
+ M4UMSG("valid main tlb 3\n");
+ m4u_dump_main_tlb(0, 0);
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_end(1);
+ M4UMSG("---------- 16. TLB multi-hit fault. ---------- End!\n");
+ break;
+
+ case 17:
+ {
+ int i;
+
+ M4UMSG("---------- 17. Entry replacement fault. ---------- Start!\n");
+ m4u_enable_MTLB_allshare(0, 1);
+ m4u_test_init();
+ for (i = 0; i < 32; i++)
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE || M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ m4u_manual_insert_entry(0, gM4U_seed_mva + i * 4096, 1, M4U_DVT, 0,
+ 1, gM4U_seed_mva + i * 4096);
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_SECTION)
+ m4u_manual_insert_entry(0, gM4U_seed_mva + i * 4096, 0, M4U_DVT, 0, 1,
+ gM4U_seed_mva + i * 4096);
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_SUPERSECTION)
+ m4u_manual_insert_entry(0, i * 4096, 1, MMU_PT_TYPE_SMALL_PAGE, 0, 1,
+ i * 4096);
+#endif
+
+ m4u_dump_valid_main_tlb(0, 0);
+ m4u_test_start();
+ m4u_test_end(1);
+ m4u_enable_MTLB_allshare(0, 0);
+ M4UMSG("---------- 17. Entry replacement fault. ---------- End!\n");
+ }
+ break;
+
+ case 18:
+ M4UMSG("---------- 18. Invalid physical address fault. ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ __vCatchInvalidPhyFault(domain, 0, gM4U_seed_mva);
+ m4u_test_end(1);
+ m4u_test_init();
+ m4u_test_start();
+ __vCatchInvalidPhyFault(domain, 1, gM4U_seed_mva);
+ m4u_test_end(1);
+
+ M4UMSG("---------- 18. Invalid physical address fault. ---------- End!\n");
+ break;
+
+ case 20:
+ {
+ int i;
+ void *protectva = (void *)gM4U_ProtectVA;
+
+ M4UMSG("---------- 20. Translation fault Protection. ---------- Start!\n");
+ memset(protectva, 0x55, 128);
+ m4u_test_init();
+ m4u_test_start();
+ __vCatchTranslationFault(domain, 0, gM4U_seed_mva);
+ m4u_test_end(1);
+
+ M4UMSG("---------- 20. Translation fault Protection. ---------- End!\n");
+ }
+ break;
+
+ case 21:
+ M4UMSG("---------- 21. MMU interrupt hang function. ---------- Start!\n");
+ m4u_enable_error_hang(0, 1);
+ m4u_test_init();
+ m4u_test_start();
+ __vCatchTranslationFault(domain, 0, gM4U_seed_mva);
+ m4u_test_end(1);
+ m4u_enable_error_hang(0, 0);
+
+ M4UMSG("---------- 21. MMU interrupt hang function. ---------- End!\n");
+ break;
+
+ case 22:
+ {
+ int i;
+
+ M4UMSG("---------- 22. Physical MAU assert test(traffic after MMU). ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ for (i = 0; i < 4; i++)
+ mau_start_monitor(0, 0, i, 0, 0, 0, 0, gM4U_seed_mva + i * 0x100000,
+ gM4U_seed_mva + (i + 1) * 0x100000 - 1,
+ 0xffffffff, 0xffffffff);
+ m4u_test_end(1);
+
+ M4UMSG("---------- 22. MMU interrupt hang function. ---------- End!\n");
+ }
+ break;
+
+ case 23:
+ {
+ int i;
+
+ M4UMSG("---------- 23. Virtual MPU assert test(traffic before MMU). ---------- Start!\n");
+ m4u_test_init();
+ m4u_test_start();
+ for (i = 0; i < 4; i++)
+ mau_start_monitor(0, 0, i, 0, 1, 0, 0, gM4U_seed_mva + i * 0x100000,
+ gM4U_seed_mva + (i + 1) * 0x100000 - 1,
+ 0xffffffff, 0xffffffff);
+ m4u_test_end(1);
+
+ M4UMSG("---------- 23. Virtual MPU assert test. ---------- End!\n");
+ }
+ break;
+
+ case 29:
+ M4UMSG("---------- 29. Legacy 4KB-only mode test. ---------- Start!\n");
+
+ M4UMSG("---------- 29. Legacy 4KB-only mode test. ---------- End!\n");
+ break;
+
+ default:
+ M4UMSG("m4u_test_set error,val=%llu\n", val);
+ }
+
+ return 0;
+}
+
+static int m4u_test_get(void *data, u64 *val)
+{
+ gM4U_seed_mva = get_first_valid_mva() + 0x200000;
+
+ *val = gM4U_seed_mva;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(m4u_test_fops, m4u_test_get, m4u_test_set, "%llu\n");
+#endif
+
+static int m4u_log_level_set(void *data, u64 val)
+{
+ gM4U_log_to_uart = (val & 0xf0) >> 4;
+ gM4U_log_level = val & 0xf;
+ M4UMSG("gM4U_log_level: %d, gM4U_log_to_uart:%d\n", gM4U_log_level, gM4U_log_to_uart);
+
+ return 0;
+}
+
+static int m4u_log_level_get(void *data, u64 *val)
+{
+ *val = gM4U_log_level | (gM4U_log_to_uart << 4);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(m4u_log_level_fops, m4u_log_level_get, m4u_log_level_set, "%llu\n");
+
+static int m4u_debug_freemva_set(void *data, u64 val)
+{
+ m4u_domain_t *domain = data;
+ m4u_buf_info_t *pMvaInfo;
+ unsigned int mva = (unsigned int)val;
+
+ M4UMSG("free mva: 0x%x\n", mva);
+ pMvaInfo = mva_get_priv(mva);
+ if (pMvaInfo) {
+ m4u_unmap(domain, mva, pMvaInfo->size);
+ m4u_do_mva_free(mva, pMvaInfo->size);
+ }
+ return 0;
+}
+
+static int m4u_debug_freemva_get(void *data, u64 *val)
+{
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(m4u_debug_freemva_fops, m4u_debug_freemva_get, m4u_debug_freemva_set, "%llu\n");
+
+int m4u_debug_port_show(struct seq_file *s, void *unused)
+{
+ m4u_print_port_status(s, 0);
+ return 0;
+}
+
+int m4u_debug_port_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, m4u_debug_port_show, inode->i_private);
+}
+
+const struct file_operations m4u_debug_port_fops = {
+ .open = m4u_debug_port_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int m4u_debug_mva_show(struct seq_file *s, void *unused)
+{
+ m4u_mvaGraph_dump();
+ return 0;
+}
+
+int m4u_debug_mva_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, m4u_debug_mva_show, inode->i_private);
+}
+
+const struct file_operations m4u_debug_mva_fops = {
+ .open = m4u_debug_mva_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int m4u_debug_buf_show(struct seq_file *s, void *unused)
+{
+ m4u_dump_buf_info(s);
+ return 0;
+}
+
+int m4u_debug_buf_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, m4u_debug_buf_show, inode->i_private);
+}
+
+const struct file_operations m4u_debug_buf_fops = {
+ .open = m4u_debug_buf_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int m4u_debug_monitor_show(struct seq_file *s, void *unused)
+{
+ m4u_print_perf_counter(0, 0, "monitor");
+ return 0;
+}
+
+int m4u_debug_monitor_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, m4u_debug_monitor_show, inode->i_private);
+}
+
+const struct file_operations m4u_debug_monitor_fops = {
+ .open = m4u_debug_monitor_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int m4u_debug_register_show(struct seq_file *s, void *unused)
+{
+ m4u_dump_reg(0, 0);
+ return 0;
+}
+
+int m4u_debug_register_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, m4u_debug_register_show, inode->i_private);
+}
+
+const struct file_operations m4u_debug_register_fops = {
+ .open = m4u_debug_register_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int m4u_debug_init(struct m4u_device *m4u_dev)
+{
+ struct dentry *debug_file;
+ m4u_domain_t *domain = m4u_get_domain_by_id(0);
+
+ m4u_dev->debug_root = debugfs_create_dir("m4u", NULL);
+
+ if (IS_ERR_OR_NULL(m4u_dev->debug_root))
+ M4UMSG("m4u: failed to create debug dir.\n");
+
+ debug_file = debugfs_create_file("buffer", 0644, m4u_dev->debug_root, domain, &m4u_debug_buf_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 1.\n");
+
+ debug_file = debugfs_create_file("debug", 0644, m4u_dev->debug_root, domain, &m4u_debug_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 2.\n");
+
+#if (M4U_DVT != 0)
+ debug_file = debugfs_create_file("test", 0644, m4u_dev->debug_root, domain, &m4u_test_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 3.\n");
+#endif
+
+ debug_file = debugfs_create_file("port", 0644, m4u_dev->debug_root, domain, &m4u_debug_port_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 4.\n");
+
+ debug_file = debugfs_create_file("log_level", 0644, m4u_dev->debug_root, domain, &m4u_log_level_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 5.\n");
+
+ debug_file = debugfs_create_file("monitor", 0644, m4u_dev->debug_root, domain, &m4u_debug_monitor_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 6.\n");
+
+ debug_file = debugfs_create_file("register", 0644, m4u_dev->debug_root, domain, &m4u_debug_register_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 7.\n");
+
+ debug_file = debugfs_create_file("freemva", 0644, m4u_dev->debug_root, domain, &m4u_debug_freemva_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 8.\n");
+
+ debug_file = debugfs_create_file("mva", 0644, m4u_dev->debug_root, domain, &m4u_debug_mva_fops);
+ if (IS_ERR_OR_NULL(debug_file))
+ M4UMSG("m4u: failed to create debug files 9.\n");
+
+
+ return 0;
+}
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u_debug.h b/drivers/misc/mediatek/m4u/2.0/m4u_debug.h
new file mode 100644
index 000000000..da8f9850e
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_debug.h
@@ -0,0 +1,15 @@
+#ifndef __M4U_DEBUG_H__
+#define __M4U_DEBUG_H__
+
+extern unsigned long gM4U_ProtectVA;
+
+extern __attribute__((weak)) int ddp_mem_test(void);
+extern __attribute__((weak)) int __ddp_mem_test(unsigned int *pSrc, unsigned int pSrcPa,
+ unsigned int *pDst, unsigned int pDstPa,
+ int need_sync);
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+extern int m4u_sec_init(void);
+extern int m4u_config_port_tee(M4U_PORT_STRUCT *pM4uPort);
+#endif
+#endif
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u_mva.c b/drivers/misc/mediatek/m4u/2.0/m4u_mva.c
new file mode 100644
index 000000000..eea3a7ebb
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_mva.c
@@ -0,0 +1,392 @@
+#include <linux/spinlock.h>
+#include "m4u_priv.h"
+
+/* ((va&0xfff)+size+0xfff)>>12 */
+#define mva_pageOffset(mva) ((mva)&0xfff)
+
+#define MVA_BLOCK_SIZE_ORDER 20 /* 1M */
+#define MVA_MAX_BLOCK_NR 4095 /* 4GB */
+
+#define MVA_BLOCK_SIZE (1<<MVA_BLOCK_SIZE_ORDER) /* 0x40000 */
+#define MVA_BLOCK_ALIGN_MASK (MVA_BLOCK_SIZE-1) /* 0x3ffff */
+#define MVA_BLOCK_NR_MASK (MVA_MAX_BLOCK_NR) /* 0xfff */
+#define MVA_BUSY_MASK (1<<15) /* 0x8000 */
+
+#define MVA_IS_BUSY(index) ((mvaGraph[index]&MVA_BUSY_MASK) != 0)
+#define MVA_SET_BUSY(index) (mvaGraph[index] |= MVA_BUSY_MASK)
+#define MVA_SET_FREE(index) (mvaGraph[index] & (~MVA_BUSY_MASK))
+#define MVA_GET_NR(index) (mvaGraph[index] & MVA_BLOCK_NR_MASK)
+
+#define MVAGRAPH_INDEX(mva) (mva>>MVA_BLOCK_SIZE_ORDER)
+
+static short mvaGraph[MVA_MAX_BLOCK_NR + 1];
+static void *mvaInfoGraph[MVA_MAX_BLOCK_NR + 1];
+static DEFINE_SPINLOCK(gMvaGraph_lock);
+
+void m4u_mvaGraph_init(void *priv_reserve)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+ memset(mvaGraph, 0, sizeof(short) * (MVA_MAX_BLOCK_NR + 1));
+ memset(mvaInfoGraph, 0, sizeof(void *) * (MVA_MAX_BLOCK_NR + 1));
+ mvaGraph[0] = 1 | MVA_BUSY_MASK;
+ mvaInfoGraph[0] = priv_reserve;
+ mvaGraph[1] = MVA_MAX_BLOCK_NR;
+ mvaInfoGraph[1] = priv_reserve;
+ mvaGraph[MVA_MAX_BLOCK_NR] = MVA_MAX_BLOCK_NR;
+ mvaInfoGraph[MVA_MAX_BLOCK_NR] = priv_reserve;
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+}
+
+void m4u_mvaGraph_dump_raw(void)
+{
+ int i;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+ M4ULOG_HIGH("[M4U_K] dump raw data of mvaGraph:============>\n");
+ for (i = 0; i < MVA_MAX_BLOCK_NR + 1; i++)
+ M4ULOG_HIGH("0x%4x: 0x%08x\n", i, mvaGraph[i]);
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+}
+
+void m4u_mvaGraph_dump(void)
+{
+ unsigned int addr = 0, size = 0;
+ short index = 1, nr = 0;
+ int i, max_bit, is_busy;
+ short frag[12] = { 0 };
+ short nr_free = 0, nr_alloc = 0;
+ unsigned long irq_flags;
+
+ M4ULOG_HIGH("[M4U_K] mva allocation info dump:====================>\n");
+ M4ULOG_HIGH("start size blocknum busy\n");
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+ for (index = 1; index < MVA_MAX_BLOCK_NR + 1; index += nr) {
+ addr = index << MVA_BLOCK_SIZE_ORDER;
+ nr = MVA_GET_NR(index);
+ size = nr << MVA_BLOCK_SIZE_ORDER;
+ if (MVA_IS_BUSY(index)) {
+ is_busy = 1;
+ nr_alloc += nr;
+ } else { /* mva region is free */
+ is_busy = 0;
+ nr_free += nr;
+
+ max_bit = 0;
+ for (i = 0; i < 12; i++) {
+ if (nr & (1 << i))
+ max_bit = i;
+ }
+ frag[max_bit]++;
+ }
+
+ M4ULOG_HIGH("0x%08x 0x%08x %4d %d\n", addr, size, nr, is_busy);
+ }
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+
+ M4ULOG_HIGH("\n");
+ M4ULOG_HIGH("[M4U_K] mva alloc summary: (unit: blocks)========================>\n");
+ M4ULOG_HIGH("free: %d , alloc: %d, total: %d\n", nr_free, nr_alloc, nr_free + nr_alloc);
+ M4ULOG_HIGH("[M4U_K] free region fragments in 2^x blocks unit:===============\n");
+ M4ULOG_HIGH(" 0 1 2 3 4 5 6 7 8 9 10 11\n");
+ M4ULOG_HIGH("%4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d\n",
+ frag[0], frag[1], frag[2], frag[3], frag[4], frag[5], frag[6],
+ frag[7], frag[8], frag[9], frag[10], frag[11]);
+ M4ULOG_HIGH("[M4U_K] mva alloc dump done=========================<\n");
+}
+
+void *mva_get_priv_ext(unsigned int mva)
+{
+ void *priv = NULL;
+ int index;
+ unsigned long irq_flags;
+
+ index = MVAGRAPH_INDEX(mva);
+ if (index == 0 || index > MVA_MAX_BLOCK_NR) {
+ M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+
+ /* find prev head/tail of this region */
+ while (mvaGraph[index] == 0)
+ index--;
+
+ if (MVA_IS_BUSY(index))
+ priv = mvaInfoGraph[index];
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+ return priv;
+}
+
+int mva_foreach_priv(mva_buf_fn_t *fn, void *data)
+{
+ short index = 1, nr = 0;
+ unsigned int mva;
+ void *priv;
+ unsigned long irq_flags;
+ int ret;
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+
+ for (index = 1; index < MVA_MAX_BLOCK_NR + 1; index += nr) {
+ mva = index << MVA_BLOCK_SIZE_ORDER;
+ nr = MVA_GET_NR(index);
+ if (MVA_IS_BUSY(index)) {
+ priv = mvaInfoGraph[index];
+ ret = fn(priv, mva, mva + nr * MVA_BLOCK_SIZE, data);
+ if (ret)
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+ return 0;
+}
+
+unsigned int get_first_valid_mva(void)
+{
+ short index = 1, nr = 0;
+ unsigned int mva;
+ void *priv;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+
+ for (index = 1; index < MVA_MAX_BLOCK_NR + 1; index += nr) {
+ mva = index << MVA_BLOCK_SIZE_ORDER;
+ nr = MVA_GET_NR(index);
+ if (MVA_IS_BUSY(index)) {
+ priv = mvaInfoGraph[index];
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+ return mva;
+}
+
+
+void *mva_get_priv(unsigned int mva)
+{
+ void *priv = NULL;
+ int index;
+ unsigned long irq_flags;
+
+ index = MVAGRAPH_INDEX(mva);
+ if (index == 0 || index > MVA_MAX_BLOCK_NR) {
+ M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+
+ if (MVA_IS_BUSY(index))
+ priv = mvaInfoGraph[index];
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+ return priv;
+}
+
+unsigned int m4u_do_mva_alloc(unsigned long va, unsigned int size, void *priv)
+{
+ short s, end;
+ short new_start, new_end;
+ short nr = 0;
+ unsigned int mvaRegionStart;
+ unsigned long startRequire, endRequire, sizeRequire;
+ unsigned long irq_flags;
+
+ if (size == 0)
+ return 0;
+
+ /* ----------------------------------------------------- */
+ /* calculate mva block number */
+ startRequire = va & (~M4U_PAGE_MASK);
+ endRequire = (va + size - 1) | M4U_PAGE_MASK;
+ sizeRequire = endRequire - startRequire + 1;
+ nr = (sizeRequire + MVA_BLOCK_ALIGN_MASK) >> MVA_BLOCK_SIZE_ORDER;
+ /* (sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); */
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+
+ /* ----------------------------------------------- */
+ /* find first match free region */
+ for (s = 1; (s < (MVA_MAX_BLOCK_NR + 1)) && (mvaGraph[s] < nr); s += (mvaGraph[s] & MVA_BLOCK_NR_MASK))
+ ;
+ if (s > MVA_MAX_BLOCK_NR) {
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+ M4UMSG("mva_alloc error: no available MVA region for %d blocks!\n", nr);
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, size, s);
+
+ return 0;
+ }
+ /* ----------------------------------------------- */
+ /* alloc a mva region */
+ end = s + mvaGraph[s] - 1;
+
+ if (unlikely(nr == mvaGraph[s])) {
+ MVA_SET_BUSY(s);
+ MVA_SET_BUSY(end);
+ mvaInfoGraph[s] = priv;
+ mvaInfoGraph[end] = priv;
+ } else {
+ new_end = s + nr - 1;
+ new_start = new_end + 1;
+ /* note: new_start may equals to end */
+ mvaGraph[new_start] = (mvaGraph[s] - nr);
+ mvaGraph[new_end] = nr | MVA_BUSY_MASK;
+ mvaGraph[s] = mvaGraph[new_end];
+ mvaGraph[end] = mvaGraph[new_start];
+
+ mvaInfoGraph[s] = priv;
+ mvaInfoGraph[new_end] = priv;
+ }
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+
+ mvaRegionStart = (unsigned int)s;
+
+ return (mvaRegionStart << MVA_BLOCK_SIZE_ORDER) + mva_pageOffset(va);
+}
+
+unsigned int m4u_do_mva_alloc_fix(unsigned int mva, unsigned int size, void *priv)
+{
+ short nr = 0;
+ unsigned int startRequire, endRequire, sizeRequire;
+ unsigned long irq_flags;
+ short startIdx = mva >> MVA_BLOCK_SIZE_ORDER;
+ short endIdx;
+ short region_start, region_end;
+
+ if (size == 0)
+ return 0;
+ if (startIdx == 0 || startIdx > MVA_MAX_BLOCK_NR) {
+ M4UMSG("mvaGraph index is 0. index=0x%x\n", startIdx);
+ return 0;
+ }
+
+ /* ----------------------------------------------------- */
+ /* calculate mva block number */
+ startRequire = mva & (~MVA_BLOCK_ALIGN_MASK);
+ endRequire = (mva + size - 1) | MVA_BLOCK_ALIGN_MASK;
+ sizeRequire = endRequire - startRequire + 1;
+ nr = (sizeRequire + MVA_BLOCK_ALIGN_MASK) >> MVA_BLOCK_SIZE_ORDER;
+ /* (sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); */
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+
+ region_start = startIdx;
+ /* find prev head of this region */
+ while (mvaGraph[region_start] == 0)
+ region_start--;
+
+ if (MVA_IS_BUSY(region_start) || (MVA_GET_NR(region_start) < nr + startIdx - region_start)) {
+ M4UMSG("mva is inuse index=0x%x, mvaGraph=0x%x\n", region_start, mvaGraph[region_start]);
+ mva = 0;
+ goto out;
+ }
+
+ /* carveout startIdx~startIdx+nr-1 out of region_start */
+ endIdx = startIdx + nr - 1;
+ region_end = region_start + MVA_GET_NR(region_start) - 1;
+
+ if (startIdx == region_start && endIdx == region_end) {
+ MVA_SET_BUSY(startIdx);
+ MVA_SET_BUSY(endIdx);
+ } else if (startIdx == region_start) {
+ mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
+ mvaGraph[endIdx] = mvaGraph[startIdx];
+ mvaGraph[endIdx + 1] = region_end - endIdx;
+ mvaGraph[region_end] = mvaGraph[endIdx + 1];
+ } else if (endIdx == region_end) {
+ mvaGraph[region_start] = startIdx - region_start;
+ mvaGraph[startIdx - 1] = mvaGraph[region_start];
+ mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
+ mvaGraph[endIdx] = mvaGraph[startIdx];
+ } else {
+ mvaGraph[region_start] = startIdx - region_start;
+ mvaGraph[startIdx - 1] = mvaGraph[region_start];
+ mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
+ mvaGraph[endIdx] = mvaGraph[startIdx];
+ mvaGraph[endIdx + 1] = region_end - endIdx;
+ mvaGraph[region_end] = mvaGraph[endIdx + 1];
+ }
+
+ mvaInfoGraph[startIdx] = priv;
+ mvaInfoGraph[endIdx] = priv;
+
+out:
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+
+ return mva;
+}
+
+#define RightWrong(x) ((x) ? "correct" : "error")
+int m4u_do_mva_free(unsigned int mva, unsigned int size)
+{
+ short startIdx = mva >> MVA_BLOCK_SIZE_ORDER;
+ short nr = mvaGraph[startIdx] & MVA_BLOCK_NR_MASK;
+ short endIdx = startIdx + nr - 1;
+ unsigned int startRequire, endRequire, sizeRequire;
+ short nrRequire;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
+ /* -------------------------------- */
+ /* check the input arguments */
+ /* right condition: startIdx is not NULL && region is busy && right module && right size */
+ startRequire = mva & (unsigned int)(~M4U_PAGE_MASK);
+ endRequire = (mva + size - 1) | (unsigned int)M4U_PAGE_MASK;
+ sizeRequire = endRequire - startRequire + 1;
+ nrRequire = (sizeRequire + MVA_BLOCK_ALIGN_MASK) >> MVA_BLOCK_SIZE_ORDER;
+ /* (sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); */
+ if (!(startIdx != 0 /* startIdx is not NULL */
+ && MVA_IS_BUSY(startIdx)
+ && (nr == nrRequire))) {
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+ M4UMSG("error to free mva========================>\n");
+ M4UMSG("BufSize=%d(unit:0x%xBytes) (expect %d) [%s]\n",
+ nrRequire, MVA_BLOCK_SIZE, nr, RightWrong(nrRequire == nr));
+ M4UMSG("mva=0x%x, (IsBusy?)=%d (expect %d) [%s]\n",
+ mva, MVA_IS_BUSY(startIdx), 1, RightWrong(MVA_IS_BUSY(startIdx)));
+ m4u_mvaGraph_dump();
+ /* m4u_mvaGraph_dump_raw(); */
+ return -1;
+ }
+
+ mvaInfoGraph[startIdx] = NULL;
+ mvaInfoGraph[endIdx] = NULL;
+
+ /* -------------------------------- */
+ /* merge with followed region */
+ if ((endIdx + 1 <= MVA_MAX_BLOCK_NR) && (!MVA_IS_BUSY(endIdx + 1))) {
+ nr += mvaGraph[endIdx + 1];
+ mvaGraph[endIdx] = 0;
+ mvaGraph[endIdx + 1] = 0;
+ }
+ /* -------------------------------- */
+ /* merge with previous region */
+ if ((startIdx - 1 > 0) && (!MVA_IS_BUSY(startIdx - 1))) {
+ int pre_nr = mvaGraph[startIdx - 1];
+
+ mvaGraph[startIdx] = 0;
+ mvaGraph[startIdx - 1] = 0;
+ startIdx -= pre_nr;
+ nr += pre_nr;
+ }
+ /* -------------------------------- */
+ /* set region flags */
+ mvaGraph[startIdx] = nr;
+ mvaGraph[startIdx + nr - 1] = nr;
+
+ spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
+
+ return 0;
+}
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_mva.h b/drivers/misc/mediatek/m4u/2.0/m4u_mva.h
index f6963d576..f6963d576 100644
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_mva.h
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_mva.h
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u_pgtable.c b/drivers/misc/mediatek/m4u/2.0/m4u_pgtable.c
new file mode 100644
index 000000000..82a45c82a
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_pgtable.c
@@ -0,0 +1,1008 @@
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include "m4u_priv.h"
+
+typedef struct {
+ imu_pgd_t *pgd;
+ imu_pte_t *pte;
+ unsigned int mva;
+ unsigned long pa;
+ unsigned int size;
+ int valid;
+} m4u_pte_info_t;
+
+static inline void m4u_set_pgd_val(imu_pgd_t *pgd, unsigned int val)
+{
+ COM_WriteReg32((unsigned long)&(imu_pgd_val(*pgd)), val);
+}
+
+static inline void read_lock_domain(m4u_domain_t *domain)
+{
+ mutex_lock(&domain->pgtable_mutex);
+}
+
+static inline void read_unlock_domain(m4u_domain_t *domain)
+{
+ mutex_unlock(&domain->pgtable_mutex);
+}
+
+static inline void write_lock_domain(m4u_domain_t *domain)
+{
+ mutex_lock(&domain->pgtable_mutex);
+}
+
+static inline void write_unlock_domain(m4u_domain_t *domain)
+{
+ mutex_unlock(&domain->pgtable_mutex);
+}
+
+/* should not hold pg_lock when call this func. */
+inline int m4u_get_pt_type(m4u_domain_t *domain, unsigned int mva)
+{
+ imu_pgd_t *pgd;
+ imu_pte_t *pte;
+ int ret;
+
+ read_lock_domain(domain);
+
+ pgd = imu_pgd_offset(domain, mva);
+
+ if (F_PGD_TYPE_IS_PAGE(*pgd)) {
+ pte = imu_pte_offset_map(pgd, mva);
+ if (F_PTE_TYPE_GET(imu_pte_val(*pte)) == F_PTE_TYPE_LARGE) {
+ imu_pte_unmap(pte);
+ ret = MMU_PT_TYPE_LARGE_PAGE;
+ } else if (F_PTE_TYPE_GET(imu_pte_val(*pte)) == F_PTE_TYPE_SMALL) {
+ imu_pte_unmap(pte);
+ ret = MMU_PT_TYPE_SMALL_PAGE;
+ } else {
+ imu_pte_unmap(pte);
+ ret = -1;
+ }
+ } else if (F_PGD_TYPE_IS_SECTION(*pgd)) {
+ ret = MMU_PT_TYPE_SECTION;
+ } else if (F_PGD_TYPE_IS_SUPERSECTION(*pgd)) {
+ ret = MMU_PT_TYPE_SUPERSECTION;
+ } else {
+ ret = -1;
+ }
+ read_unlock_domain(domain);
+ return ret;
+}
+
+static inline unsigned int m4u_get_pt_type_size(int type)
+{
+ if (type == MMU_PT_TYPE_SMALL_PAGE)
+ return MMU_SMALL_PAGE_SIZE;
+ else if (type == MMU_PT_TYPE_LARGE_PAGE)
+ return MMU_LARGE_PAGE_SIZE;
+ else if (type == MMU_PT_TYPE_SECTION)
+ return MMU_SECTION_SIZE;
+ else if (type == MMU_PT_TYPE_SUPERSECTION)
+ return MMU_SUPERSECTION_SIZE;
+ else
+ return -1;
+}
+
+/***********************************************************/
+/** print pte info to log or sequncial file
+ if data is NULL, info is out put to kernel log by pr log
+ if pte is valid, we will print like va->pgd->pte->pa
+ if pte is invalid, we print as many info as we can.
+* @return NULL
+* @remark
+* @see
+* @author K Zhang @date 2013/11/18
+************************************************************/
+void *__m4u_print_pte(m4u_pte_info_t *info, void *data)
+{
+ if (info->valid) {
+ if (info->size == SZ_4K) {
+ M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pte(0x%x)-->pa(0x%lx) small\n",
+ info->mva, imu_pgd_val(*info->pgd), imu_pte_val(*info->pte), info->pa);
+ } else if (info->size == SZ_64K) {
+ M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pte(0x%x)-->pa(0x%lx) large\n",
+ info->mva, imu_pgd_val(*info->pgd), imu_pte_val(*info->pte), info->pa);
+ } else if (info->size == SZ_1M) {
+ M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pa(0x%lx) section\n",
+ info->mva, imu_pgd_val(*info->pgd), info->pa);
+ } else if (info->size == SZ_16M) {
+ M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pa(0x%lx) super\n",
+ info->mva, imu_pgd_val(*info->pgd), info->pa);
+ }
+ } else {
+ M4U_PRINT_LOG_OR_SEQ(data, "va(0x%x)", info->mva);
+ M4U_PRINT_LOG_OR_SEQ(data, "-->pgd(0x%x)", imu_pgd_val(*info->pgd));
+ if (info->pte)
+ M4U_PRINT_LOG_OR_SEQ(data, "-->pte(0x%x)", imu_pte_val(*info->pte));
+ M4U_PRINT_LOG_OR_SEQ(data, " invalid\n");
+ }
+
+ return NULL;
+}
+
+/* domain->pgtable_mutex should be held */
+int m4u_get_pte_info(m4u_domain_t *domain, unsigned int mva, m4u_pte_info_t *pte_info)
+{
+ imu_pgd_t *pgd;
+ imu_pte_t *pte = NULL;
+ unsigned int pa = 0;
+ unsigned int size;
+ int valid = 1;
+
+ pgd = imu_pgd_offset(domain, mva);
+
+ if (F_PGD_TYPE_IS_PAGE(*pgd)) {
+ pte = imu_pte_offset_map(pgd, mva);
+ if (F_PTE_TYPE_GET(imu_pte_val(*pte)) == F_PTE_TYPE_LARGE) {
+ pa = imu_pte_val(*pte) & F_PTE_PA_LARGE_MSK;
+ pa |= mva & (~F_PTE_PA_LARGE_MSK);
+ size = MMU_LARGE_PAGE_SIZE;
+ } else if (F_PTE_TYPE_GET(imu_pte_val(*pte)) == F_PTE_TYPE_SMALL) {
+ pa = imu_pte_val(*pte) & F_PTE_PA_SMALL_MSK;
+ pa |= mva & (~F_PTE_PA_SMALL_MSK);
+ size = MMU_SMALL_PAGE_SIZE;
+ } else {
+ valid = 0;
+ size = MMU_SMALL_PAGE_SIZE;
+ }
+ } else {
+ pte = NULL;
+ if (F_PGD_TYPE_IS_SECTION(*pgd)) {
+ pa = imu_pgd_val(*pgd) & F_PGD_PA_SECTION_MSK;
+ pa |= mva & (~F_PGD_PA_SECTION_MSK);
+ size = MMU_SECTION_SIZE;
+ } else if (F_PGD_TYPE_IS_SUPERSECTION(*pgd)) {
+ pa = imu_pgd_val(*pgd) & F_PGD_PA_SUPERSECTION_MSK;
+ pa |= mva & (~F_PGD_PA_SUPERSECTION_MSK);
+ size = MMU_SUPERSECTION_SIZE;
+ } else {
+ valid = 0;
+ size = MMU_SECTION_SIZE;
+ }
+ }
+
+ pte_info->pgd = pgd;
+ pte_info->pte = pte;
+ pte_info->mva = mva;
+ pte_info->pa = pa;
+ pte_info->size = size;
+ pte_info->valid = valid;
+ return 0;
+}
+
+typedef void *(m4u_pte_fn_t) (m4u_pte_info_t *pte_info, void *data);
+
+/***********************************************************/
+/** interate all pte, and call fn for each pte.
+* @param domain
+* @param fn -- to be called for each pte
+* @param data -- private data for fn
+*
+* @return NULL of success, non-NULL if interrupted by fn.
+* @remark
+ 1. fn will only be called when pte is valid.
+ 2. if fn return non-NULL, the iteration will return imediately.
+* @see
+* @author K Zhang @date 2013/11/18
+************************************************************/
+void *m4u_for_each_pte(m4u_domain_t *domain, m4u_pte_fn_t *fn, void *data)
+{
+ unsigned int mva = 0;
+ void *ret;
+ m4u_pte_info_t pte_info;
+
+ read_lock_domain(domain);
+ while (1) {
+ m4u_get_pte_info(domain, mva, &pte_info);
+
+ if (pte_info.valid) {
+ ret = fn(&pte_info, data);
+ if (ret) {
+ read_unlock_domain(domain);
+ return ret;
+ }
+ }
+
+ if (mva + pte_info.size < mva) /* over flow */
+ break;
+ mva += pte_info.size;
+ }
+
+ read_unlock_domain(domain);
+ return NULL;
+}
+
+/* dump pte info for mva, no matter it's valid or not */
+/* this function doesn't lock pgtable lock. */
+void m4u_dump_pte_nolock(m4u_domain_t *domain, unsigned int mva)
+{
+ m4u_pte_info_t pte_info;
+
+ m4u_get_pte_info(domain, mva, &pte_info);
+
+ __m4u_print_pte(&pte_info, NULL);
+}
+
+int m4u_pte_invalid(m4u_domain_t *domain, unsigned int mva)
+{
+ m4u_pte_info_t info;
+
+ m4u_get_pte_info(domain, mva, &info);
+ return (info.pa != 0) && info.valid;
+}
+
+void m4u_dump_pte(m4u_domain_t *domain, unsigned int mva)
+{
+ read_lock_domain(domain);
+ m4u_dump_pte_nolock(domain, mva);
+ read_unlock_domain(domain);
+}
+
+unsigned long m4u_get_pte(m4u_domain_t *domain, unsigned int mva)
+{
+ m4u_pte_info_t pte_info;
+
+ read_lock_domain(domain);
+ m4u_get_pte_info(domain, mva, &pte_info);
+ read_unlock_domain(domain);
+
+ return pte_info.pa;
+}
+
+/***********************************************************/
+/** dump pagetable to sequncial file or kernel log.
+* @param domain -- domain to dump
+* @param seq -- seq file. if NULL, we will dump to kernel log
+*
+* @remark this func will lock pgtable_lock, it may sleep.
+* @author K Zhang @date 2013/11/18
+************************************************************/
+void m4u_dump_pgtable(m4u_domain_t *domain, struct seq_file *seq)
+{
+ M4U_PRINT_LOG_OR_SEQ(seq, "m4u dump pgtable start ==============>\n");
+ m4u_for_each_pte(domain, __m4u_print_pte, seq);
+ M4U_PRINT_LOG_OR_SEQ(seq, "m4u dump pgtable done ==============>\n");
+}
+
+/* M4U_PROT_CACHE indicates M4U_PROT_SHARE, which route transaction to CCI*/
+static inline unsigned int m4u_prot_fixup(unsigned int prot)
+{
+ /* don't support read/write protect */
+/*
+ if(unlikely(!(prot & (M4U_PROT_READ|M4U_PROT_WRITE))))
+ prot |= M4U_PROT_READ|M4U_PROT_WRITE;
+ if(unlikely((prot&M4U_PROT_WRITE) && !(prot&M4U_PROT_READ)))
+ prot |= M4U_PROT_WRITE;
+*/
+ if (prot & M4U_PROT_CACHE)
+ prot |= M4U_PROT_SHARE;
+
+ return prot;
+}
+
+/***********************************************************/
+/** convert m4u_prot to hardware pgd/pte attribute
+* @param prot -- m4u_prot flags
+*
+* @return pgd or pte attribute
+* @remark
+* @see
+* @author K Zhang @date 2013/11/18
+************************************************************/
+static inline unsigned int __m4u_get_pgd_attr_16M(unsigned int prot)
+{
+ unsigned int pgprot;
+
+ pgprot = F_PGD_TYPE_SUPERSECTION;
+ pgprot |= (prot & M4U_PROT_SEC) ? 0 : F_PGD_NS_BIT_SECTION(1);
+ pgprot |= (prot & M4U_PROT_SHARE) ? F_PGD_S_BIT : 0;
+ pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT | F_PGD_B_BIT) : 0;
+ if (gM4U_4G_DRAM_Mode)
+ pgprot |= F_PGD_BIT32_BIT;
+ return pgprot;
+}
+
+static inline unsigned int __m4u_get_pgd_attr_1M(unsigned int prot)
+{
+ unsigned int pgprot;
+
+ pgprot = F_PGD_TYPE_SECTION;
+ pgprot |= (prot & M4U_PROT_SEC) ? 0 : F_PGD_NS_BIT_SECTION(1);
+ pgprot |= (prot & M4U_PROT_SHARE) ? F_PGD_S_BIT : 0;
+ pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT | F_PGD_B_BIT) : 0;
+ if (gM4U_4G_DRAM_Mode)
+ pgprot |= F_PGD_BIT32_BIT;
+ return pgprot;
+}
+
+static inline unsigned int __m4u_get_pgd_attr_page(unsigned int prot)
+{
+ unsigned int pgprot;
+
+ pgprot = F_PGD_TYPE_PAGE;
+ pgprot |= (prot & M4U_PROT_SEC) ? 0 : F_PGD_NS_BIT_PAGE(1);
+ return pgprot;
+}
+
+static inline unsigned int __m4u_get_pte_attr_64K(unsigned int prot)
+{
+ unsigned int pgprot;
+
+ pgprot = F_PTE_TYPE_LARGE;
+ pgprot |= (prot & M4U_PROT_SHARE) ? F_PTE_S_BIT : 0;
+ pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT | F_PGD_B_BIT) : 0;
+ if (gM4U_4G_DRAM_Mode)
+ pgprot |= F_PTE_BIT32_BIT;
+ return pgprot;
+}
+
+static inline unsigned int __m4u_get_pte_attr_4K(unsigned int prot)
+{
+ unsigned int pgprot;
+
+ pgprot = F_PTE_TYPE_SMALL;
+ pgprot |= (prot & M4U_PROT_SHARE) ? F_PTE_S_BIT : 0;
+ pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT | F_PGD_B_BIT) : 0;
+ if (gM4U_4G_DRAM_Mode)
+ pgprot |= F_PTE_BIT32_BIT;
+ return pgprot;
+}
+
+/***********************************************************/
+/** cache flush for modified pte.
+ notes: because pte is allocated using slab, cache sync is needed.
+*
+* @author K Zhang @date 2013/11/18
+************************************************************/
+int m4u_clean_pte(m4u_domain_t *domain, unsigned int mva, unsigned int size)
+{
+ imu_pgd_t *pgd;
+ unsigned long long tmp_mva = (unsigned long long)mva;
+ unsigned long long end_plus_1 = tmp_mva + (unsigned long long)size;
+
+ while (tmp_mva < end_plus_1) {
+ pgd = imu_pgd_offset(domain, tmp_mva);
+
+ if (F_PGD_TYPE_IS_PAGE(*pgd)) {
+ imu_pte_t *pte, *pte_end;
+ unsigned long long next_mva, sync_entry_nr;
+
+ pte = imu_pte_offset_map(pgd, tmp_mva);
+ if (!pte) {
+ /* invalid pte: goto next pgd entry */
+ tmp_mva = m4u_calc_next_mva(tmp_mva, end_plus_1, MMU_SECTION_SIZE);
+ continue;
+ }
+
+ next_mva = m4u_calc_next_mva(tmp_mva, end_plus_1, MMU_SECTION_SIZE);
+ sync_entry_nr = (next_mva - tmp_mva) >> 12; /*(next_mva - tmp_mva) / MMU_SMALL_PAGE_SIZE*/
+ pte_end = pte + sync_entry_nr;
+ /* do cache sync for [pte, pte_end) */
+ dmac_flush_range((void *)pte, (void *)pte_end);
+ /* M4UMSG("dmac_flush_range: 0x%p ~ 0x%p\n", pte, pte_end); */
+
+ imu_pte_unmap(pte);
+ tmp_mva = next_mva;
+
+ } else if (F_PGD_TYPE_IS_SUPERSECTION(*pgd)) {
+ /* for superseciton: don't need to sync. */
+ tmp_mva = m4u_calc_next_mva(tmp_mva, end_plus_1, MMU_SUPERSECTION_SIZE);
+ } else {
+ /* for section/invalid: don't need to sync */
+ tmp_mva = m4u_calc_next_mva(tmp_mva, end_plus_1, MMU_SECTION_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+struct kmem_cache *gM4u_pte_kmem = NULL;
+int m4u_pte_allocator_init(void)
+{
+ gM4u_pte_kmem = kmem_cache_create("m4u_pte", IMU_BYTES_PER_PTE, IMU_BYTES_PER_PTE, 0, NULL);
+ M4UINFO("%s: gM4u_pte_kmem = 0x%p, IMU_BYTES_PER_PTE = %d\n", __func__, gM4u_pte_kmem,
+ (unsigned int)IMU_BYTES_PER_PTE);
+
+ if (IS_ERR_OR_NULL(gM4u_pte_kmem)) {
+ M4UMSG("error in %s: ret = %p\n", __func__, gM4u_pte_kmem);
+ return -1;
+ }
+
+ return 0;
+}
+
+/***********************************************************/
+/** allocate a new pte
+* @param domain
+* @param pgd -- pgd to allocate for
+* @param pgprot
+*
+* @return 0 -- pte is allocated
+ 1 -- pte is not allocated, because it's allocated by others
+ <0 -- error
+* @remark
+* @see
+* @author K Zhang @date 2013/11/18
+************************************************************/
+int m4u_alloc_pte(m4u_domain_t *domain, imu_pgd_t *pgd, unsigned int pgprot)
+{
+ void *pte_new_va;
+ phys_addr_t pte_new;
+
+ /* pte_new_va = (unsigned int)kzalloc(IMU_BYTES_PER_PTE, GFP_KERNEL); */
+ /* pte_new_va = (unsigned int)get_zeroed_page(GFP_KERNEL); */
+ pte_new_va = kmem_cache_zalloc(gM4u_pte_kmem, GFP_KERNEL);
+ if (unlikely(!pte_new_va)) {
+ m4u_aee_print("%s: fail, nomemory\n", __func__);
+ return -ENOMEM;
+ }
+ pte_new = __pa(pte_new_va);
+
+ /* check pte alignment -- must 1K align */
+ if (unlikely(pte_new & (IMU_BYTES_PER_PTE - 1))) {
+ m4u_aee_print("%s: fail, not algin pa=0x%p, va=0x%p\n", __func__,
+ (void *)(uintptr_t)pte_new, pte_new_va);
+ /* kfree(pte_new_va); */
+ kmem_cache_free(gM4u_pte_kmem, (void *)pte_new_va);
+ return -ENOMEM;
+ }
+ /* lock and check again */
+ /* because someone else may have allocated for this pgd first */
+ if (likely(!imu_pgd_val(*pgd))) {
+ m4u_set_pgd_val(pgd, (unsigned int)(pte_new) | pgprot);
+ M4ULOG_LOW("%s: pgd: 0x%lx, pte_va:0x%lx, pte_pa: 0x%lx, value: 0x%x\n",
+ __func__, (unsigned long)pgd, (unsigned long)pte_new_va,
+ (unsigned long)pte_new, (unsigned int)(pte_new) | pgprot);
+
+ return 0;
+
+ } else {
+ /* allocated by other thread */
+ /* kfree(__va(pte_new)); */
+ M4ULOG_LOW("m4u pte allocated by others: pgd=0x%p\n", pgd);
+ kmem_cache_free(gM4u_pte_kmem, (void *)pte_new_va);
+ return 1;
+ }
+}
+
+int m4u_free_pte(m4u_domain_t *domain, imu_pgd_t *pgd)
+{
+ imu_pte_t *pte_old;
+
+ pte_old = imu_pte_map(pgd);
+ m4u_set_pgd_val(pgd, 0);
+
+ /* kfree(pte_old); */
+ /* free_page(pte_old); */
+ kmem_cache_free(gM4u_pte_kmem, pte_old);
+
+ return 0;
+}
+
+/***********************************************************/
+/** m4u_map_XX functions.
+ map mva<->pa
+notes: these function doesn't clean pte and invalid tlb
+ for performance concern.
+ callers should clean pte + invalid tlb after mapping.
+
+* @author K Zhang @date 2013/11/19
+************************************************************/
+int m4u_map_16M(m4u_domain_t *m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
+{
+ int i;
+ imu_pgd_t *pgd;
+ unsigned int pgprot;
+ unsigned int padscpt;
+
+ if ((mva & (~F_PGD_PA_SUPERSECTION_MSK)) != ((unsigned int)pa & (~F_PGD_PA_SUPERSECTION_MSK))) {
+ m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "supersection");
+ return -EINVAL;
+ }
+
+ mva &= F_PGD_PA_SUPERSECTION_MSK;
+ if (pa > 0xffffffffL)
+ padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK | F_PGD_BIT32_BIT);
+ else
+ padscpt = (unsigned int)pa & F_PGD_PA_SUPERSECTION_MSK;
+
+ pgprot = __m4u_get_pgd_attr_16M(prot);
+ pgd = imu_pgd_offset(m4u_domain, mva);
+
+ M4ULOG_LOW("%s: mva: 0x%x, pgd: 0x%lx (0x%lx + 0x%x), pa: 0x%lx, value: 0x%x\n",
+ __func__, mva, (unsigned long)pgd, (unsigned long)((m4u_domain)->pgd),
+ imu_pgd_index(mva), pa, padscpt | pgprot);
+
+ for (i = 0; i < 16; i++) {
+ if (unlikely(imu_pgd_val(*pgd))) {
+ m4u_aee_print("%s: mva=0x%x, pgd=0x%x, i=%d\n", __func__, mva, imu_pgd_val(*pgd), i);
+ goto err_out;
+ }
+ m4u_set_pgd_val(pgd, padscpt | pgprot);
+ pgd++;
+ }
+
+ return 0;
+
+err_out:
+ for (pgd--; i > 0; i--) {
+ m4u_set_pgd_val(pgd, 0);
+ pgd--;
+ }
+ return -1;
+}
+
+int m4u_map_1M(m4u_domain_t *m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
+{
+ imu_pgd_t *pgd;
+ unsigned int pgprot;
+ unsigned int padscpt;
+
+ if ((mva & (~F_PGD_PA_SECTION_MSK)) != ((unsigned int)pa & (~F_PGD_PA_SECTION_MSK))) {
+ m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "section");
+ return -EINVAL;
+ }
+
+ mva &= F_PGD_PA_SECTION_MSK;
+ if (pa > 0xffffffffL)
+ padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK | F_PGD_BIT32_BIT);
+ else
+ padscpt = (unsigned int)pa & F_PGD_PA_SECTION_MSK;
+
+ pgprot = __m4u_get_pgd_attr_1M(prot);
+ pgd = imu_pgd_offset(m4u_domain, mva);
+
+ if (unlikely(imu_pgd_val(*pgd))) {
+ m4u_aee_print("%s: mva=0x%x, pgd=0x%x\n", __func__, mva, imu_pgd_val(*pgd));
+ return -1;
+ }
+
+ m4u_set_pgd_val(pgd, padscpt | pgprot);
+
+ M4ULOG_LOW("%s: mva: 0x%x, pgd: 0x%lx (0x%lx + 0x%x), pa: 0x%lx, value: 0x%x\n",
+ __func__, mva, (unsigned long)pgd, (unsigned long)((m4u_domain)->pgd),
+ imu_pgd_index(mva), pa, padscpt | pgprot);
+
+ return 0;
+}
+
+int m4u_map_64K(m4u_domain_t *m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
+{
+ int ret, i;
+ imu_pgd_t *pgd;
+ imu_pte_t *pte;
+ unsigned int pte_new, pgprot;
+ unsigned int padscpt;
+
+ if ((mva & (~F_PTE_PA_LARGE_MSK)) != ((unsigned int)pa & (~F_PTE_PA_LARGE_MSK))) {
+ m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "large page");
+ return -EINVAL;
+ }
+
+ mva &= F_PTE_PA_LARGE_MSK;
+ if (pa > 0xffffffffL)
+ padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK | F_PTE_BIT32_BIT);
+ else
+ padscpt = (unsigned int)pa & F_PTE_PA_LARGE_MSK;
+
+ pgprot = __m4u_get_pgd_attr_page(prot);
+ pgd = imu_pgd_offset(m4u_domain, mva);
+ if (!imu_pgd_val(*pgd)) {
+ ret = m4u_alloc_pte(m4u_domain, pgd, pgprot);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ pte_new = 0;
+ else
+ pte_new = 1;
+ } else {
+ if (unlikely((imu_pgd_val(*pgd) & (~F_PGD_PA_PAGETABLE_MSK)) != pgprot)) {
+ m4u_aee_print("%s: mva=0x%x, pgd=0x%x, pgprot=0x%x\n",
+ __func__, mva, imu_pgd_val(*pgd), pgprot);
+ return -1;
+ }
+ pte_new = 0;
+ }
+
+ pgprot = __m4u_get_pte_attr_64K(prot);
+ pte = imu_pte_offset_map(pgd, mva);
+
+ M4ULOG_LOW("%s: mva: 0x%x, pte: 0x%p (0x%lx + 0x%x), pa: 0x%lx, value: 0x%x\n",
+ __func__, mva, &imu_pte_val(*pte), (unsigned long)imu_pte_map(pgd),
+ imu_pte_index(mva), pa, padscpt | pgprot);
+
+ for (i = 0; i < 16; i++) {
+ if (unlikely(imu_pte_val(pte[i]))) {
+ m4u_aee_print("%s: pte=0x%x, i=%d\n", __func__, imu_pte_val(pte[i]), i);
+ goto err_out;
+ }
+ imu_pte_val(pte[i]) = padscpt | pgprot;
+ }
+ imu_pte_unmap(pte);
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ imu_pte_val(pte[i]) = 0;
+ imu_pte_unmap(pte);
+
+ if (pte_new)
+ m4u_free_pte(m4u_domain, pgd);
+
+ return -1;
+}
+
+int m4u_map_4K(m4u_domain_t *m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
+{
+ int ret, pte_new;
+ imu_pgd_t *pgd;
+ imu_pte_t *pte;
+ unsigned int pgprot;
+ unsigned int padscpt;
+
+ if ((mva & (~F_PTE_PA_SMALL_MSK)) != ((unsigned int)pa & (~F_PTE_PA_SMALL_MSK))) {
+ m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "small page");
+ return -EINVAL;
+ }
+
+ mva &= F_PTE_PA_SMALL_MSK;
+ if (pa > 0xffffffffL)
+ padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK | F_PTE_BIT32_BIT);
+ else
+ padscpt = (unsigned int)pa & F_PTE_PA_SMALL_MSK;
+
+ pgprot = __m4u_get_pgd_attr_page(prot);
+ pgd = imu_pgd_offset(m4u_domain, mva);
+ if (!imu_pgd_val(*pgd)) {
+ ret = m4u_alloc_pte(m4u_domain, pgd, pgprot);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ pte_new = 0;
+ else
+ pte_new = 1;
+ } else {
+ if (unlikely((imu_pgd_val(*pgd) & (~F_PGD_PA_PAGETABLE_MSK)) != pgprot)) {
+ m4u_aee_print("%s: mva=0x%x, pgd=0x%x, pgprot=0x%x\n",
+ __func__, mva, imu_pgd_val(*pgd), pgprot);
+ return -1;
+ }
+ pte_new = 0;
+ }
+
+ pgprot = __m4u_get_pte_attr_4K(prot);
+ pte = imu_pte_offset_map(pgd, mva);
+
+ if (unlikely(imu_pte_val(*pte))) {
+ m4u_aee_print("%s: pte=0x%x\n", __func__, imu_pte_val(*pte));
+ goto err_out;
+ }
+
+ imu_pte_val(*pte) = padscpt | pgprot;
+
+ M4ULOG_LOW("%s: mva: 0x%x, pte: 0x%p (0x%lx + 0x%x), pa: 0x%lx, value: 0x%x\n",
+ __func__, mva, &imu_pte_val(*pte), (unsigned long)imu_pte_map(pgd),
+ imu_pte_index(mva), pa, padscpt | imu_pte_val(*pte));
+
+ imu_pte_unmap(pte);
+
+ return 0;
+
+err_out:
+ imu_pte_unmap(pte);
+ if (pte_new)
+ m4u_free_pte(m4u_domain, pgd);
+
+ return -1;
+}
+
+/* notes: both iova & paddr should be aligned. */
+static inline int m4u_map_phys_align(m4u_domain_t *m4u_domain, unsigned int iova,
+ unsigned long paddr, unsigned int size, unsigned int prot)
+{
+ int ret;
+
+ if (size == SZ_16M)
+ ret = m4u_map_16M(m4u_domain, iova, paddr, prot);
+ else if (size == SZ_1M)
+ ret = m4u_map_1M(m4u_domain, iova, paddr, prot);
+ else if (size == SZ_64K)
+ ret = m4u_map_64K(m4u_domain, iova, paddr, prot);
+ else if (size == SZ_4K)
+ ret = m4u_map_4K(m4u_domain, iova, paddr, prot);
+ else {
+ m4u_aee_print("%s: fail size=0x%x\n", __func__, size);
+ return -1;
+ }
+
+ return ret;
+}
+
+
+/***********************************************************/
+/** map a physical continuous memory to iova (mva).
+* @param m4u_domain domain
+* @param iova -- iova (mva)
+* @param paddr -- physical address
+* @param size -- size
+* @param prot -- m4u_prot
+*
+* @return 0 on success, others on fail
+* @remark
+* @see refer to kernel/drivers/iommu/iommu.c iommu_map()
+* @author K Zhang @date 2013/11/19
+************************************************************/
+int m4u_map_phys_range(m4u_domain_t *m4u_domain, unsigned int iova,
+ unsigned long paddr, unsigned int size, unsigned int prot)
+{
+ unsigned int min_pagesz;
+ int ret = 0;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(m4u_domain->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | (unsigned int)paddr | size, min_pagesz)) {
+ M4UMSG("unaligned: iova 0x%x pa 0x%lx size 0x%x min_pagesz 0x%x\n",
+ iova, paddr, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ while (size) {
+ unsigned long pgsize, addr_merge = (unsigned long)iova | paddr;
+ unsigned int pgsize_idx;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by both iova and paddr */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= m4u_domain->pgsize_bitmap;
+
+ /* make sure we're still sane */
+ BUG_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ M4ULOG_LOW("mapping: iova 0x%x pa 0x%lx pgsize %lu\n", iova, paddr, pgsize);
+
+#if (M4U_DVT == MMU_PT_TYPE_SMALL_PAGE)
+ if (pgsize > SZ_4K)
+ pgsize = SZ_4K;
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_LARGE_PAGE)
+ if (pgsize > SZ_64K)
+ pgsize = SZ_64K;
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_SECTION)
+ if (pgsize > SZ_1M)
+ pgsize = SZ_1M;
+#endif
+#if (M4U_DVT == MMU_PT_TYPE_SUPERSECTION)
+ if (pgsize > SZ_16M)
+ pgsize = SZ_16M;
+#endif
+
+ ret = m4u_map_phys_align(m4u_domain, iova, paddr, pgsize, prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ m4u_unmap(m4u_domain, iova, size);
+ return ret;
+}
+
+int m4u_map_sgtable(m4u_domain_t *m4u_domain, unsigned int mva,
+ struct sg_table *sg_table, unsigned int size, unsigned int prot)
+{
+ int i, ret;
+ struct scatterlist *sg;
+ unsigned int map_mva = mva, map_end = mva + size;
+
+ prot = m4u_prot_fixup(prot);
+
+ write_lock_domain(m4u_domain);
+
+ for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
+ dma_addr_t pa;
+ unsigned int len;
+
+ pa = get_sg_phys(sg);
+ len = sg_dma_len(sg);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ if (0 == sg_dma_address(sg))
+ len = sg->length;
+#endif
+
+ M4ULOG_LOW("%s: for_each_sg i: %d, len: %d, mva: 0x%x\n", __func__, i, len, map_mva);
+
+ if (map_mva + len > map_end) {
+ M4UMSG("%s: map_mva(0x%x)+len(0x%x)>end(0x%x)\n", __func__, map_mva, len, map_end);
+ break;
+ }
+ if (len == SZ_4K) { /* for most cases */
+ ret = m4u_map_4K(m4u_domain, map_mva, pa, prot);
+ } else {
+ ret = m4u_map_phys_range(m4u_domain, map_mva, pa, len, prot);
+ }
+
+ if (ret) {
+ M4UMSG("%s: ret: %d, i: %d, sg->dma: 0x%lx, sg->phy: 0x%lx, sg->offset: 0x%x\n",
+ __func__, ret, i, (unsigned long)sg_dma_address(sg),
+ (unsigned long)sg_phys(sg), sg->offset);
+ goto err_out;
+ } else {
+ map_mva += len;
+ }
+ }
+
+ if (map_mva < map_end) {
+ M4UMSG("%s: map_mva(0x%x) < map_end(0x%x)\n", __func__, map_mva, map_end);
+ goto err_out;
+ }
+
+ m4u_clean_pte(m4u_domain, mva, size);
+
+ m4u_invalid_tlb_by_range(m4u_domain, mva, mva + size - 1);
+
+ write_unlock_domain(m4u_domain);
+
+ return 0;
+
+err_out:
+ write_unlock_domain(m4u_domain);
+
+ m4u_unmap(m4u_domain, mva, size);
+ return -EINVAL;
+}
+
+
+
+int m4u_check_free_pte(m4u_domain_t *domain, imu_pgd_t *pgd)
+{
+ imu_pte_t *pte;
+ int i;
+
+ pte = imu_pte_map(pgd);
+ for (i = 0; i < IMU_PTRS_PER_PTE; i++) {
+ if (imu_pte_val(*pte) != 0)
+ break;
+ }
+ if (i == IMU_PTRS_PER_PTE) {
+ m4u_free_pte(domain, pgd);
+ m4u_set_pgd_val(pgd, 0);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+int m4u_unmap(m4u_domain_t *domain, unsigned int mva, unsigned int size)
+{
+ imu_pgd_t *pgd;
+ int i, ret;
+ unsigned int start = mva;
+ unsigned long end_plus_1 = (unsigned long)mva + size;
+
+ write_lock_domain(domain);
+ do {
+ pgd = imu_pgd_offset(domain, mva);
+
+ if (F_PGD_TYPE_IS_PAGE(*pgd)) {
+ imu_pte_t *pte;
+ unsigned int pte_offset;
+ unsigned int num_to_clean;
+
+ pte_offset = imu_pte_index(mva);
+ num_to_clean =
+ min((unsigned int)((end_plus_1 - mva) / PAGE_SIZE),
+ (unsigned int)(IMU_PTRS_PER_PTE - pte_offset));
+
+ pte = imu_pte_offset_map(pgd, mva);
+
+ memset(pte, 0, num_to_clean << 2);
+
+ ret = m4u_check_free_pte(domain, pgd);
+ if (ret == 1) { /* pte is not freed, need to flush pte */
+ m4u_clean_pte(domain, mva, num_to_clean << PAGE_SHIFT);
+ }
+
+ mva += num_to_clean << PAGE_SHIFT;
+ } else if (F_PGD_TYPE_IS_SECTION(*pgd)) {
+ m4u_set_pgd_val(pgd, 0);
+ mva += MMU_SECTION_SIZE;
+ } else if (F_PGD_TYPE_IS_SUPERSECTION(*pgd)) {
+ imu_pgd_t *start = imu_supersection_start(pgd);
+
+ if (unlikely(start != pgd))
+ m4u_aee_print("%s: suppersec not align, mva=0x%x, pgd=0x%x\n",
+ __func__, mva, imu_pgd_val(*pgd));
+
+ for (i = 0; i < 16; i++)
+ imu_pgd_val(start[i]) = 0;
+
+ mva = (mva + MMU_SUPERSECTION_SIZE) & (~(MMU_SUPERSECTION_SIZE - 1)); /* must align */
+ } else {
+ mva += MMU_SECTION_SIZE;
+ }
+ } while (mva < end_plus_1 && mva); /*if the last mva is 0xfff00000, the new mva may be 0 */
+
+ m4u_invalid_tlb_by_range(domain, start, end_plus_1 - 1);
+
+ write_unlock_domain(domain);
+ return 0;
+}
+
+int m4u_debug_pgtable_show(struct seq_file *s, void *unused)
+{
+ m4u_dump_pgtable(s->private, s);
+ return 0;
+}
+
+int m4u_debug_pgtable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, m4u_debug_pgtable_show, inode->i_private);
+}
+
+const struct file_operations m4u_debug_pgtable_fops = {
+ .open = m4u_debug_pgtable_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+int m4u_pgtable_init(struct m4u_device *m4u_dev, m4u_domain_t *m4u_domain)
+{
+ /* ======= alloc pagetable======================= */
+ m4u_domain->pgd =
+ dma_alloc_coherent(m4u_dev->pDev[0], M4U_PGD_SIZE, &(m4u_domain->pgd_pa), GFP_KERNEL);
+
+ if (!(m4u_domain->pgd)) {
+ M4UMSG("dma_alloc_coherent error! dma memory not available.\n");
+ return -1;
+ }
+ if ((unsigned int)(m4u_domain->pgd_pa) & (M4U_PGD_SIZE - 1)) {
+ M4UMSG("dma_alloc_coherent memory not align. 0x%lx\n",
+ (unsigned long)(m4u_domain->pgd_pa));
+ return -1;
+ }
+
+ M4UINFO("dma_alloc_coherent success! pagetable_va=0x%lx, pagetable_pa=0x%lx.\n",
+ (unsigned long)(m4u_domain->pgd), (unsigned long)(m4u_domain->pgd_pa));
+
+ memset((void *)m4u_domain->pgd, 0, M4U_PGD_SIZE);
+ /* ======= alloc pagetable done======================= */
+
+ if (0 != m4u_pte_allocator_init())
+ return -1;
+
+ debugfs_create_file("pgtable", 0644, m4u_dev->debug_root, m4u_domain, &m4u_debug_pgtable_fops);
+
+ return 0;
+}
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u_pgtable.h b/drivers/misc/mediatek/m4u/2.0/m4u_pgtable.h
new file mode 100644
index 000000000..fce982466
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_pgtable.h
@@ -0,0 +1,144 @@
+#ifndef __M4U_PGTABLE_H__
+#define __M4U_PGTABLE_H__
+
+#include "m4u_reg.h"
+
+/* ================================================================= */
+/* 2 level pagetable: pgd -> pte */
+
+#define F_PTE_TYPE_MSK F_MSK(1, 0)
+#define F_PTE_TYPE_SET(val) F_VAL(val, 1, 0)
+#define F_PTE_TYPE_GET(regval) F_MSK_SHIFT(regval, 1, 0)
+#define F_PTE_TYPE_LARGE (0x1)
+#define F_PTE_TYPE_SMALL (0x2)
+#define F_PTE_B_BIT F_BIT_SET(2)
+#define F_PTE_C_BIT F_BIT_SET(3)
+#define F_PTE_AP_MSK F_MSK(5, 4)
+#define F_PTE_AP_SET(val) F_VAL(val, 5, 4)
+#define F_PTE_AP_GET(regval) F_MSK_SHIFT(regval, 5, 4)
+#define F_PTE_TEX_MSK F_MSK(8, 6)
+#define F_PTE_TEX_SET(val) F_VAL(val, 8, 6)
+#define F_PTE_TEX_GET(regval) F_MSK_SHIFT(regval, 8, 6)
+#define F_PTE_BIT32_BIT F_BIT_SET(9)
+#define F_PTE_S_BIT F_BIT_SET(10)
+#define F_PTE_NG_BIT F_BIT_SET(11)
+#define F_PTE_PA_LARGE_MSK F_MSK(31, 16)
+#define F_PTE_PA_LARGE_SET(val) F_VAL(val, 31, 16)
+#define F_PTE_PA_LARGE_GET(regval) F_MSK_SHIFT(regval, 31, 16)
+#define F_PTE_PA_SMALL_MSK F_MSK(31, 12)
+#define F_PTE_PA_SMALL_SET(val) F_VAL(val, 31, 12)
+#define F_PTE_PA_SMALL_GET(regval) F_MSK_SHIFT(regval, 31, 12)
+#define F_PTE_TYPE_IS_LARGE_PAGE(pte) ((imu_pte_val(pte)&0x3) == F_PTE_TYPE_LARGE)
+#define F_PTE_TYPE_IS_SMALL_PAGE(pte) ((imu_pte_val(pte)&0x3) == F_PTE_TYPE_SMALL)
+
+
+#define F_PGD_TYPE_PAGE (0x1)
+#define F_PGD_TYPE_PAGE_MSK (0x3)
+#define F_PGD_TYPE_SECTION (0x2)
+#define F_PGD_TYPE_SUPERSECTION (0x2|(1<<18))
+#define F_PGD_TYPE_SECTION_MSK (0x3|(1<<18))
+#define F_PGD_TYPE_IS_PAGE(pgd) ((imu_pgd_val(pgd)&3) == 1)
+#define F_PGD_TYPE_IS_SECTION(pgd) \
+ (F_PGD_TYPE_IS_PAGE(pgd) ? 0 : ((imu_pgd_val(pgd)&F_PGD_TYPE_SECTION_MSK) == F_PGD_TYPE_SECTION))
+#define F_PGD_TYPE_IS_SUPERSECTION(pgd) \
+ (F_PGD_TYPE_IS_PAGE(pgd) ? 0 : ((imu_pgd_val(pgd)&F_PGD_TYPE_SECTION_MSK) == F_PGD_TYPE_SUPERSECTION))
+
+#define F_PGD_B_BIT F_BIT_SET(2)
+#define F_PGD_C_BIT F_BIT_SET(3)
+#define F_PGD_AP_MSK F_MSK(11, 10)
+#define F_PGD_AP_SET(val) F_VAL(val, 11, 10)
+#define F_PGD_AP_GET(regval) F_MSK_SHIFT(regval, 11, 10)
+#define F_PGD_TEX_MSK F_MSK(14, 12)
+#define F_PGD_TEX_SET(val) F_VAL(val, 14, 12)
+#define F_PGD_TEX_GET(regval) F_MSK_SHIFT(regval, 14, 12)
+#define F_PGD_BIT32_BIT F_BIT_SET(9)
+#define F_PGD_S_BIT F_BIT_SET(16)
+#define F_PGD_NG_BIT F_BIT_SET(17)
+#define F_PGD_NS_BIT_PAGE(ns) F_BIT_VAL(ns, 3)
+#define F_PGD_NS_BIT_SECTION(ns) F_BIT_VAL(ns, 19)
+#define F_PGD_NS_BIT_SUPERSECTION(ns) F_BIT_VAL(ns, 19)
+
+#define F_PGD_PA_PAGETABLE_MSK F_MSK(31, 10)
+#define F_PGD_PA_PAGETABLE_SET(val) F_VAL(val, 31, 10)
+#define F_PGD_PA_SECTION_MSK F_MSK(31, 20)
+#define F_PGD_PA_SECTION_SET(val) F_VAL(val, 31, 20)
+#define F_PGD_PA_SUPERSECTION_MSK F_MSK(31, 24)
+#define F_PGD_PA_SUPERSECTION_SET(val) F_VAL(val, 31, 24)
+
+/* pagetable walk */
+#define IMU_PGDIR_SHIFT 20
+#define IMU_PAGE_SHIFT 12
+#define IMU_PTRS_PER_PGD 4096
+#define IMU_PTRS_PER_PTE 256
+#define IMU_BYTES_PER_PTE (IMU_PTRS_PER_PTE*sizeof(imu_pteval_t))
+
+#define MMU_PT_TYPE_SUPERSECTION (1<<4)
+#define MMU_PT_TYPE_SECTION (1<<3)
+#define MMU_PT_TYPE_LARGE_PAGE (1<<2)
+#define MMU_PT_TYPE_SMALL_PAGE (1<<1)
+
+#define MMU_SMALL_PAGE_SIZE (SZ_4K)
+#define MMU_LARGE_PAGE_SIZE (SZ_64K)
+#define MMU_SECTION_SIZE (SZ_1M)
+#define MMU_SUPERSECTION_SIZE (SZ_16M)
+
+typedef unsigned int imu_pteval_t;
+typedef struct {imu_pteval_t imu_pte; } imu_pte_t;
+typedef struct {imu_pteval_t imu_pgd; } imu_pgd_t;
+
+#define imu_pte_val(x) ((x).imu_pte)
+#define imu_pgd_val(x) ((x).imu_pgd)
+
+#define __imu_pte(x) ((imu_pte_t){(x)})
+#define __imu_pgd(x) ((imu_pgd_t){(x)})
+
+#define imu_pte_none(pte) (!imu_pte_val(pte))
+#define imu_pte_type(pte) (imu_pte_val(pte)&0x3)
+
+#define imu_pgd_index(addr) ((addr) >> IMU_PGDIR_SHIFT)
+#define imu_pgd_offset(domain, addr) ((domain)->pgd + imu_pgd_index(addr))
+
+#define imu_pte_index(addr) (((addr)>>IMU_PAGE_SHIFT)&(IMU_PTRS_PER_PTE - 1))
+#define imu_pte_offset_map(pgd, addr) (imu_pte_map(pgd) + imu_pte_index(addr))
+
+static inline imu_pte_t *imu_pte_map(imu_pgd_t *pgd)
+{
+ return (imu_pte_t *) __va(imu_pgd_val(*pgd) & F_PGD_PA_PAGETABLE_MSK);
+}
+
+static inline int imu_pte_unmap(imu_pte_t *pte)
+{
+ return 0;
+}
+
+static inline unsigned int imu_pgd_entry_pa(imu_pgd_t pgd)
+{
+ if (F_PGD_TYPE_IS_PAGE(pgd))
+ return imu_pgd_val(pgd) & F_PGD_PA_PAGETABLE_MSK;
+ else if (F_PGD_TYPE_IS_SECTION(pgd))
+ return imu_pgd_val(pgd) & F_PGD_PA_SECTION_MSK;
+ else if (F_PGD_TYPE_IS_SUPERSECTION(pgd))
+ return imu_pgd_val(pgd) & F_PGD_PA_SUPERSECTION_MSK;
+ else
+ return 0;
+}
+
+static inline imu_pgd_t *imu_supersection_start(imu_pgd_t *pgd)
+{
+ return (imu_pgd_t *) (round_down((unsigned long)pgd, (16 * 4)));
+}
+static inline imu_pte_t *imu_largepage_start(imu_pte_t *pte)
+{
+ return (imu_pte_t *) (round_down((unsigned long)pte, (16 * 4)));
+}
+
+static inline unsigned long long m4u_calc_next_mva(unsigned long long addr, unsigned long long end, unsigned int size)
+{
+ /* addr + size may equal 0x100000000*/
+ unsigned long long __boundary = (addr + (unsigned long long)size) & (~((unsigned long long)size-1));
+ unsigned long long min = min_t(unsigned long long, __boundary, end);
+
+ return min;
+}
+
+#endif
diff --git a/drivers/misc/mediatek/m4u/2.0/m4u_v2.h b/drivers/misc/mediatek/m4u/2.0/m4u_v2.h
new file mode 100644
index 000000000..6063d8c84
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/2.0/m4u_v2.h
@@ -0,0 +1,161 @@
+#ifndef __M4U_V2_H__
+#define __M4U_V2_H__
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include "m4u_port.h"
+#include <linux/scatterlist.h>
+
+typedef int M4U_PORT_ID;
+
+#define M4U_PROT_READ (1<<0) /* buffer can be read by engine */
+#define M4U_PROT_WRITE (1<<1) /* buffer can be write by engine */
+#define M4U_PROT_CACHE (1<<2) /* buffer access will goto CCI to do cache snoop */
+#define M4U_PROT_SHARE (1<<3) /* buffer access will goto CCI, but don't do cache snoop
+ (just for engines who wants to use CCI bandwidth) */
+#define M4U_PROT_SEC (1<<4) /* buffer can only be accessed by secure engine. */
+
+/* public flags */
+#define M4U_FLAGS_SEQ_ACCESS (1<<0) /* engine access this buffer in sequncial way. */
+#define M4U_FLAGS_FIX_MVA (1<<1) /* fix allocation, we will use mva user specified. */
+#define M4U_FLAGS_SEC_SHAREABLE (1<<2) /* the mva will share in SWd */
+
+/* m4u internal flags (DO NOT use them for other purpers) */
+#define M4U_FLAGS_MVA_IN_FREE (1<<8) /* this mva is in deallocating. */
+
+
+typedef enum {
+ RT_RANGE_HIGH_PRIORITY = 0,
+ SEQ_RANGE_LOW_PRIORITY = 1
+} M4U_RANGE_PRIORITY_ENUM;
+
+
+/* port related: virtuality, security, distance */
+typedef struct _M4U_PORT {
+ M4U_PORT_ID ePortID; /* hardware port ID, defined in M4U_PORT_ID */
+ unsigned int Virtuality;
+ unsigned int Security;
+ unsigned int domain; /* domain : 0 1 2 3 */
+ unsigned int Distance;
+ unsigned int Direction; /* 0:- 1:+ */
+} M4U_PORT_STRUCT;
+
+struct m4u_port_array {
+ #define M4U_PORT_ATTR_EN (1<<0)
+ #define M4U_PORT_ATTR_VIRTUAL (1<<1)
+ #define M4U_PORT_ATTR_SEC (1<<2)
+ unsigned char ports[M4U_PORT_NR];
+};
+
+
+typedef enum {
+ M4U_CACHE_CLEAN_BY_RANGE,
+ M4U_CACHE_INVALID_BY_RANGE,
+ M4U_CACHE_FLUSH_BY_RANGE,
+
+ M4U_CACHE_CLEAN_ALL,
+ M4U_CACHE_INVALID_ALL,
+ M4U_CACHE_FLUSH_ALL,
+} M4U_CACHE_SYNC_ENUM;
+
+typedef enum {
+ M4U_DMA_MAP_AREA,
+ M4U_DMA_UNMAP_AREA,
+} M4U_DMA_TYPE;
+
+typedef enum {
+ M4U_DMA_FROM_DEVICE,
+ M4U_DMA_TO_DEVICE,
+ M4U_DMA_BIDIRECTIONAL,
+} M4U_DMA_DIR;
+
+typedef struct {
+ /* mutex to protect mvaList */
+ /* should get this mutex whenever add/delete/interate mvaList */
+ struct mutex dataMutex;
+ pid_t open_pid;
+ pid_t open_tgid;
+ struct list_head mvaList;
+} m4u_client_t;
+
+int m4u_dump_info(int m4u_index);
+int m4u_power_on(int m4u_index);
+int m4u_power_off(int m4u_index);
+
+int m4u_alloc_mva(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, struct sg_table *sg_table,
+ unsigned int size, unsigned int prot, unsigned int flags,
+ unsigned int *pMva);
+
+int m4u_dealloc_mva(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva);
+
+int m4u_alloc_mva_sg(int eModuleID,
+ struct sg_table *sg_table,
+ const unsigned int BufSize,
+ int security,
+ int cache_coherent,
+ unsigned int *pRetMVABuf);
+
+int m4u_dealloc_mva_sg(int eModuleID,
+ struct sg_table *sg_table,
+ const unsigned int BufSize,
+ const unsigned int MVA);
+
+int m4u_config_port(M4U_PORT_STRUCT *pM4uPort);
+int m4u_config_port_array(struct m4u_port_array *port_array);
+int m4u_monitor_start(int m4u_id);
+int m4u_monitor_stop(int m4u_id);
+
+int m4u_cache_sync(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, unsigned int size, unsigned int mva,
+ M4U_CACHE_SYNC_ENUM sync_type);
+
+int m4u_mva_map_kernel(unsigned int mva, unsigned int size,
+ unsigned long *map_va, unsigned int *map_size);
+int m4u_mva_unmap_kernel(unsigned int mva, unsigned int size, unsigned long va);
+m4u_client_t *m4u_create_client(void);
+int m4u_destroy_client(m4u_client_t *client);
+
+int m4u_dump_reg_for_smi_hang_issue(void);
+int m4u_display_fake_engine_test(unsigned long ulFakeReadAddr, unsigned long ulFakeWriteAddr);
+
+void m4u_larb_backup(int larb_idx);
+void m4u_larb_restore(int larb_idx);
+
+typedef enum m4u_callback_ret {
+ M4U_CALLBACK_HANDLED,
+ M4U_CALLBACK_NOT_HANDLED,
+} m4u_callback_ret_t;
+
+typedef m4u_callback_ret_t (m4u_reclaim_mva_callback_t)(int alloc_port, unsigned int mva,
+ unsigned int size, void *data);
+int m4u_register_reclaim_callback(int port, m4u_reclaim_mva_callback_t *fn, void *data);
+int m4u_unregister_reclaim_callback(int port);
+
+typedef m4u_callback_ret_t (m4u_fault_callback_t)(int port, unsigned int mva, void *data);
+int m4u_register_fault_callback(int port, m4u_fault_callback_t *fn, void *data);
+int m4u_unregister_fault_callback(int port);
+
+#ifdef CONFIG_PM
+extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
+extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity);
+#endif
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+extern int gM4U_L2_enable;
+#endif
+
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+
+#ifdef M4U_PROFILE
+extern void MMProfileEnable(int enable);
+extern void MMProfileStart(int start);
+extern MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
+#endif
+
+#ifndef M4U_FPGAPORTING
+extern void smp_inner_dcache_flush_all(void);
+#endif
+/* m4u driver internal use --------------------------------------------------- */
+/* */
+
+#endif
diff --git a/drivers/misc/mediatek/m4u/Kconfig b/drivers/misc/mediatek/m4u/Kconfig
new file mode 100644
index 000000000..cd28f2e22
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/Kconfig
@@ -0,0 +1,6 @@
+config MTK_M4U
+ tristate "MediaTek M4U driver"
+ default y
+ ---help---
+ M4U driver support for multimedia hardware use non-physical continuous memory.
+
diff --git a/drivers/misc/mediatek/m4u/Makefile b/drivers/misc/mediatek/m4u/Makefile
index 804347f3e..4e9a33cd3 100644
--- a/drivers/misc/mediatek/m4u/Makefile
+++ b/drivers/misc/mediatek/m4u/Makefile
@@ -1,3 +1,8 @@
-
obj-y += $(subst ",,$(CONFIG_MTK_PLATFORM))/
+obj-$(CONFIG_ARCH_MT6580) += 2.0/
+obj-$(CONFIG_ARCH_MT6735) += 2.0/
+obj-$(CONFIG_ARCH_MT6735M) += 2.0/
+obj-$(CONFIG_ARCH_MT6753) += 2.0/
+obj-$(CONFIG_ARCH_MT6755) += 2.0/
+obj-$(CONFIG_ARCH_MT6797) += 2.0/ \ No newline at end of file
diff --git a/drivers/misc/mediatek/m4u/mt6735/Makefile b/drivers/misc/mediatek/m4u/mt6735/Makefile
index 8c9b15446..d2626b08f 100755..100644
--- a/drivers/misc/mediatek/m4u/mt6735/Makefile
+++ b/drivers/misc/mediatek/m4u/mt6735/Makefile
@@ -1,11 +1,12 @@
-include $(srctree)/drivers/misc/mediatek/Makefile.custom
-
-ccflags-y += -O0 -gdwarf-2
ccflags-y += -I$(srctree)/drivers/staging/android/ion
-ccflags-y += -I$(srctree)/drivers/misc/mediatek/gud/$(MTK_PLATFORM)/gud/MobiCoreKernelApi/public
-ccflags-y += -I$(srctree)/drivers/misc/mediatek/gud/$(MTK_PLATFORM)/gud/MobiCoreKernelApi/include
+include $(srctree)/drivers/misc/mediatek/gud/Makefile.include
ccflags-y += -I$(srctree)/drivers/misc/mediatek/mach/$(MTK_PLATFORM)/include/trustzone/m4u
ccflags-y += -I$(srctree)/drivers/misc/mediatek/mach/$(MTK_PLATFORM)/include/mach
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/irq/$(MTK_PLATFORM)/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat/$(MTK_PLATFORM)/include
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/mmp/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/2.0
ifeq ($(CONFIG_ARCH_MT6735),y)
ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6735/
@@ -20,5 +21,4 @@ ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6753/
obj-y += mt6753/
endif
-obj-y += m4u.o m4u_mva.o m4u_pgtable.o m4u_hw.o m4u_debug.o
-
+obj-$(CONFIG_MTK_M4U) += m4u_hw.o
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u.c b/drivers/misc/mediatek/m4u/mt6735/m4u.c
deleted file mode 100644
index 4fba0c9da..000000000
--- a/drivers/misc/mediatek/m4u/mt6735/m4u.c
+++ /dev/null
@@ -1,2677 +0,0 @@
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/miscdevice.h>
-#include <asm/io.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/earlysuspend.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/xlog.h>
-//#include <asm/mach/map.h>
-#include <mach/sync_write.h>
-//#include <mach/mt_irq.h>
-#include <mach/mt_clkmgr.h>
-#include <mach/irqs.h>
-#include <asm/cacheflush.h>
-//#include <asm/system.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/printk.h>
-#include <linux/dma-direction.h>
-#include <asm/page.h>
-#include <linux/proc_fs.h>
-#include <mach/m4u.h>
-
-#include "m4u_priv.h"
-#include "m4u_hw.h"
-
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-
-
-#ifdef M4U_TEE_SERVICE_ENABLE
-
-#include "mobicore_driver_api.h"
-#include "tz_m4u.h"
-#ifdef __M4U_SECURE_SYSTRACE_ENABLE__
-#include <linux/sectrace.h>
-#endif
-int m4u_tee_en = 0;
-
-#endif
-
-#if IS_ENABLED(CONFIG_COMPAT)
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#endif
-
-#if defined(CONFIG_CMA) && defined(CONFIG_MTK_SVP)
-#include <linux/sh_svp.h>
-#endif
-
-static m4u_buf_info_t gMvaNode_unkown =
-{
- .va = 0,
- .mva = 0,
- .size = 0,
- .port = M4U_PORT_UNKNOWN,
-};
-
-
-
-
-//-------------------------------------Global variables------------------------------------------------//
-
-//static DEFINE_MUTEX(gM4uMutex);
-MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
-
-#define M4U_DEV_NAME "m4u"
-struct m4u_device *gM4uDev;
-
-#ifndef M4U_FPGAPORTING
-extern void smp_inner_dcache_flush_all(void);
-#endif
-
-static int m4u_buf_show(void* priv, unsigned int mva_start, unsigned int mva_end, void* data)
-{
- m4u_buf_info_t *pMvaInfo = priv;
-
- M4U_PRINT_LOG_OR_SEQ(data, "0x%-8x, 0x%-8x, 0x%lx, 0x%-8x, 0x%x, %s, 0x%x, 0x%x, 0x%x\n",
- pMvaInfo->mva, pMvaInfo->mva+pMvaInfo->size-1,
- pMvaInfo->va, pMvaInfo->size, pMvaInfo->prot,
- m4u_get_port_name(pMvaInfo->port),
- pMvaInfo->flags,
- mva_start, mva_end);
-
- return 0;
-}
-
-
-int m4u_dump_buf_info(struct seq_file * seq)
-{
-
- M4U_PRINT_LOG_OR_SEQ(seq, "\ndump mva allocated info ========>\n");
- M4U_PRINT_LOG_OR_SEQ(seq, "mva_start mva_end va size prot module flags debug1 debug2\n");
-
- mva_for_each_priv((void *)m4u_buf_show, seq);
-
- M4U_PRINT_LOG_OR_SEQ(seq, " dump mva allocated info done ========>\n");
- return 0;
-}
-
-#ifdef M4U_PROFILE
-
-extern void MMProfileEnable(int enable);
-extern void MMProfileStart(int start);
-
-static void m4u_profile_init(void)
-{
-
- MMP_Event M4U_Event;
- MMProfileEnable(1);
- M4U_Event = MMProfileRegisterEvent(MMP_RootEvent, "M4U");
- // register events
- M4U_MMP_Events[M4U_MMP_ALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "Alloc MVA");
- M4U_MMP_Events[M4U_MMP_DEALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "DeAlloc MVA");
- M4U_MMP_Events[M4U_MMP_CONFIG_PORT] = MMProfileRegisterEvent(M4U_Event, "Config Port");
- M4U_MMP_Events[M4U_MMP_M4U_ERROR] = MMProfileRegisterEvent(M4U_Event, "M4U ERROR");
- M4U_MMP_Events[M4U_MMP_CACHE_SYNC] = MMProfileRegisterEvent(M4U_Event, "M4U_CACHE_SYNC");
- M4U_MMP_Events[M4U_MMP_TOGGLE_CG] = MMProfileRegisterEvent(M4U_Event, "M4U_Toggle_CG");
-
- //enable events by default
- MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], 1);
- MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], 1);
- MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], 1);
- MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_M4U_ERROR], 1);
- MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], 1);
- //MMProfileEnableEvent(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], 0);
- MMProfileStart(1);
-}
-#endif
-
-
-//get ref count on all pages in sgtable
-int m4u_get_sgtable_pages(struct sg_table *table)
-{
- int i;
- struct scatterlist *sg;
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- struct page *page = sg_page(sg);
- M4ULOG_LOW("m4u_get_sgtable_pages, i=%d, page=0x%p, page_count = %d.\n", i, page, atomic_read(&page->_count));
- if(page && pfn_valid(page_to_pfn(page)))
- get_page(page);
- }
- return 0;
-}
-//put ref count on all pages in sgtable
-int m4u_put_sgtable_pages(struct sg_table *table)
-{
- int i;
- struct scatterlist *sg;
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- struct page *page = sg_page(sg);
- if(page && pfn_valid(page_to_pfn(page)))
- put_page(page);
- }
- return 0;
-}
-
-
-
-static m4u_buf_info_t* m4u_alloc_buf_info(void)
-{
- m4u_buf_info_t *pList = NULL;
- pList = (m4u_buf_info_t*)kzalloc(sizeof(m4u_buf_info_t), GFP_KERNEL);
- if(pList==NULL)
- {
- M4UMSG("m4u_client_add_buf(), pList=0x%p\n", pList);
- return NULL;
- }
-
- INIT_LIST_HEAD(&(pList->link));
- return pList;
-}
-
-static int m4u_free_buf_info(m4u_buf_info_t *pList)
-{
- kfree(pList);
- return 0;
-}
-
-
-static int m4u_client_add_buf(m4u_client_t *client, m4u_buf_info_t *pList)
-{
- mutex_lock(&(client->dataMutex));
- list_add(&(pList->link), &(client->mvaList));
- mutex_unlock(&(client->dataMutex));
-
- return 0;
-}
-
-/*
-static int m4u_client_del_buf(m4u_client_t *client, m4u_buf_info_t *pList)
-{
- mutex_lock(&(client->dataMutex));
- list_del(&(pList->link));
- mutex_unlock(&(client->dataMutex));
-
- return 0;
-}
-*/
-
-/***********************************************************/
-/** find or delete a buffer from client list
-* @param client -- client to be searched
-* @param mva -- mva to be searched
-* @param del -- should we del this buffer from client?
-*
-* @return buffer_info if found, NULL on fail
-* @remark
-* @see
-* @to-do we need to add multi domain support here.
-* @author K Zhang @date 2013/11/14
-************************************************************/
-static m4u_buf_info_t* m4u_client_find_buf(m4u_client_t *client, unsigned int mva, int del)
-{
- struct list_head *pListHead;
- m4u_buf_info_t *pList = NULL;
- m4u_buf_info_t* ret=NULL;
-
- if(client==NULL)
- {
- M4UERR("m4u_delete_from_garbage_list(), client is NULL! \n");
- m4u_dump_buf_info(NULL);
- return NULL;
- }
-
- mutex_lock(&(client->dataMutex));
- list_for_each(pListHead, &(client->mvaList))
- {
- pList = container_of(pListHead, m4u_buf_info_t, link);
- if((pList->mva == mva))
- break;
- }
- if(pListHead == &(client->mvaList))
- {
- ret=NULL;
- }
- else
- {
- if(del)
- list_del(pListHead);
- ret = pList;
- }
-
-
- mutex_unlock(&(client->dataMutex));
-
- return ret;
-}
-
-/*
-//dump buf info in client
-static void m4u_client_dump_buf(m4u_client_t *client, const char *pMsg)
-{
- m4u_buf_info_t *pList;
- struct list_head *pListHead;
-
- M4UMSG("print mva list [%s] ================================>\n", pMsg);
- mutex_lock(&(client->dataMutex));
- list_for_each(pListHead, &(client->mvaList))
- {
- pList = container_of(pListHead, m4u_buf_info_t, link);
- M4UMSG("port=%s, va=0x%x, size=0x%x, mva=0x%x, prot=%d\n",
- m4u_get_port_name(pList->port), pList->va, pList->size, pList->mva, pList->prot);
- }
- mutex_unlock(&(client->dataMutex));
-
- M4UMSG("print mva list done ==========================>\n");
-}
-*/
-
-m4u_client_t * m4u_create_client(void)
-{
- m4u_client_t * client;
-
- client = kmalloc(sizeof(m4u_client_t) , GFP_ATOMIC);
- if(!client)
- {
- return NULL;
- }
-
- mutex_init(&(client->dataMutex));
- mutex_lock(&(client->dataMutex));
- client->open_pid = current->pid;
- client->open_tgid = current->tgid;
- INIT_LIST_HEAD(&(client->mvaList));
- mutex_unlock(&(client->dataMutex));
-
- return client;
-}
-
-int m4u_destroy_client(m4u_client_t *client)
-{
- m4u_buf_info_t *pMvaInfo;
- unsigned int mva, size;
- M4U_PORT_ID port;
-
- while(1)
- {
- mutex_lock(&(client->dataMutex));
- if(list_empty(&client->mvaList))
- {
- mutex_unlock(&(client->dataMutex));
- break;
- }
- pMvaInfo = container_of(client->mvaList.next, m4u_buf_info_t, link);
- M4UMSG("warnning: clean garbage at m4u close: module=%s,va=0x%lx,mva=0x%x,size=%d\n",
- m4u_get_port_name(pMvaInfo->port),pMvaInfo->va,pMvaInfo->mva,pMvaInfo->size);
-
- port = pMvaInfo->port;
- mva = pMvaInfo->mva;
- size = pMvaInfo->size;
-
- mutex_unlock(&(client->dataMutex));
-
- m4u_reclaim_notify(port, mva, size);
-
- //m4u_dealloc_mva will lock client->dataMutex again
- m4u_dealloc_mva(client, port, mva);
- }
-
- kfree(client);
-
- return 0;
-}
-
-
-static int m4u_dump_mmaps(unsigned long addr)
-{
- struct vm_area_struct *vma;
-
- M4ULOG_MID("addr=0x%lx, name=%s, pid=0x%x,", addr, current->comm, current->pid);
-
- vma = find_vma(current->mm, addr);
-
- if(vma && (addr >= vma->vm_start))
- {
- M4ULOG_MID("find vma: 0x%16lx-0x%16lx, flags=0x%lx\n",
- (vma->vm_start), (vma->vm_end), vma->vm_flags);
- return 0;
- }
- else
- {
- M4UMSG("cannot find vma for addr 0x%lx\n", addr);
- return -1;
- }
-}
-
-//to-do: need modification to support 4G DRAM
-static phys_addr_t m4u_user_v2p(unsigned long va)
-{
- unsigned long pageOffset = (va & (PAGE_SIZE - 1));
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- phys_addr_t pa;
-
- if(NULL==current)
- {
- M4UMSG("warning: m4u_user_v2p, current is NULL! \n");
- return 0;
- }
- if(NULL==current->mm)
- {
- M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
- return 0;
- }
-
- pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
- if(pgd_none(*pgd)||pgd_bad(*pgd))
- {
- M4UMSG("m4u_user_v2p(), va=0x%lx, pgd invalid! \n", va);
- return 0;
- }
-
- pud = pud_offset(pgd, va);
- if(pud_none(*pud)||pud_bad(*pud))
- {
- M4UMSG("m4u_user_v2p(), va=0x%lx, pud invalid! \n", va);
- return 0;
- }
-
- pmd = pmd_offset(pud, va);
- if(pmd_none(*pmd)||pmd_bad(*pmd))
- {
- M4UMSG("m4u_user_v2p(), va=0x%lx, pmd invalid! \n", va);
- return 0;
- }
-
- pte = pte_offset_map(pmd, va);
- if(pte_present(*pte))
- {
-#if 0 //cloud, workaround
- if((long long)pte_val(pte[PTE_HWTABLE_PTRS]) == (long long)0)
- {
- M4UMSG("user_v2p, va=0x%x, *ppte=%08llx", va,
- (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
- pte_unmap(pte);
- return 0;
- }
-#endif
- //pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset;
- pa=(pte_val(*pte) & (PHYS_MASK) & (~((phys_addr_t)0xfff))) | pageOffset;
- pte_unmap(pte);
- return pa;
- }
-
- pte_unmap(pte);
-
- M4UMSG("m4u_user_v2p(), va=0x%lx, pte invalid! \n", va);
- return 0;
-}
-
-
-static int m4u_fill_sgtable_user(unsigned long va, int page_num, struct scatterlist **pSg, int has_page)
-{
- int i;
- unsigned long va_align;
- phys_addr_t pa;
- struct scatterlist *sg = *pSg;
- struct page *page;
-
- va_align = round_down(va, PAGE_SIZE);
-
- for(i=0; i<page_num; i++)
- {
- pa = m4u_user_v2p(va_align+i*PAGE_SIZE);
- if(!pa || !sg)
- {
- M4UMSG("%s: fail va=0x%lx,page_num=0x%x,fail_va=0x%lx,pa=0x%pa,sg=0x%p,i=%d\n",
- __FUNCTION__, va, page_num, va_align+i*PAGE_SIZE, &pa, sg, i);
-
- m4u_dump_mmaps(va);
- m4u_dump_mmaps(va_align+i*PAGE_SIZE);
- return -1;
- }
-
-#if defined(CONFIG_CMA) && defined(CONFIG_MTK_SVP)
- else if (has_page) {
- unsigned long opfn, pfn;
- int ret, tries = 0;
-
- opfn = page_to_pfn(phys_to_page(pa));
-smretry:
- pfn = page_to_pfn(phys_to_page(pa));
-
- if (svp_is_in_range(pfn)) { //pr_alert("%s %d: pfn: %lu tries: %d\n", __func__, __LINE__, pfn, tries); dump_page(phys_to_page(pa));
- if (tries < 2) {
- // get new one
- ret = svp_migrate_range(pfn);
-
- if (!ret) {
- pa = m4u_user_v2p(va_align + i *PAGE_SIZE);
- tries++;
- goto smretry;
- }
- }
-
- if (tries >= 2 || ret) { pr_alert("%s %d: opfn: %lu pfn: %lu tries: %d\n", __func__, __LINE__, opfn, pfn, tries);
- m4u_dump_mmaps(va);
- m4u_dump_mmaps(va_align + i * PAGE_SIZE);
- return -1;
- }
- }
-
-#if 0
- if (opfn != pfn) {
- pr_alert("%s %d: opfn: %lu pfn: %lu tries: %d\n", __func__, __LINE__, opfn, pfn, tries);
- //dump_page(pfn_to_page(opfn));
- dump_page(phys_to_page(pa));
- }
-#endif
- }
-#endif
-
- if(has_page)
- {
- page = phys_to_page(pa);
- M4ULOG_LOW("%s: va=0x%lx, pa=0x%pa, sg=0x%p, page=0x%p\n",
- __FUNCTION__, va_align+i*PAGE_SIZE, &pa, sg, page);
- sg_set_page(sg, page, PAGE_SIZE, 0);
- }
- else
- {
- sg_dma_address(sg) = pa;
- sg_dma_len(sg) = PAGE_SIZE;
- }
- sg = sg_next(sg);
- }
- *pSg = sg;
- return 0;
-}
-
-static int m4u_create_sgtable_user(unsigned long va_align, struct sg_table *table)
-{
- int ret=0;
- struct vm_area_struct *vma;
- struct scatterlist *sg = table->sgl;
- unsigned int left_page_num = table->nents;
- unsigned long va = va_align;
-
- down_read(&current->mm->mmap_sem);
-
- while(left_page_num)
- {
- unsigned int vma_page_num;
-
- vma = find_vma(current->mm, va);
- if(vma == NULL || vma->vm_start > va)
- {
- M4UMSG("cannot find vma: va=0x%lx, vma=0x%p\n", va, vma);
- m4u_dump_mmaps(va);
- ret = -1;
- goto out;
- }
- else
- {
- M4ULOG_MID("%s va: 0x%lx, vma->vm_start=0x%lx, vma->vm_end=0x%lx\n", __FUNCTION__, va, vma->vm_start, vma->vm_end);
- }
-
- vma_page_num = (vma->vm_end - va)/PAGE_SIZE;
- vma_page_num = min(vma_page_num, left_page_num);
-
- if((vma->vm_flags) & VM_PFNMAP)
- {
- //ion va or ioremap vma has this flag
- //VM_PFNMAP: Page-ranges managed without "struct page", just pure PFN
- ret = m4u_fill_sgtable_user(va, vma_page_num, &sg, 0);
- M4ULOG_MID("alloc_mva VM_PFNMAP va=0x%lx, page_num=0x%x\n", va, vma_page_num);
- }
- else if((vma->vm_flags) & VM_LOCKED)
- {
- ret = m4u_fill_sgtable_user(va, vma_page_num, &sg, 1);
- }
- else
- {
- M4UMSG("%s vma->flags is error: 0x%lx\n", __FUNCTION__, vma->vm_flags);
- m4u_dump_mmaps(va);
- ret = -1;
- }
- if(ret)
- {
- goto out;
- }
-
- left_page_num -= vma_page_num;
- va += vma_page_num * PAGE_SIZE;
- }
-
-out:
- up_read(&current->mm->mmap_sem);
- return ret;
-}
-
-
-//make a sgtable for virtual buffer
-struct sg_table* m4u_create_sgtable(unsigned long va, unsigned int size)
-{
- struct sg_table *table;
- int ret,i, page_num;
- unsigned long va_align;
- phys_addr_t pa;
- struct scatterlist *sg;
- struct page *page;
-
- page_num = M4U_GET_PAGE_NUM(va, size);
- va_align = round_down(va, PAGE_SIZE);
-
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if(!table)
- {
- M4UMSG("%s table kmalloc fail: va=0x%lx, size=0x%x, page_num=%d\n", __FUNCTION__, va, size, page_num);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = sg_alloc_table(table, page_num, GFP_KERNEL);
- if(ret)
- {
- kfree(table);
- M4UMSG("%s alloc_sgtable fail: va=0x%lx, size=0x%x, page_num=%d\n", __FUNCTION__, va, size, page_num);
- return ERR_PTR(-ENOMEM);
- }
-
- M4ULOG_MID("%s va=0x%lx, PAGE_OFFSET=0x%lx, VMALLOC_START=0x%lx, VMALLOC_END=0x%lx\n", __FUNCTION__, va, PAGE_OFFSET, VMALLOC_START, VMALLOC_END);
-
- if(va<PAGE_OFFSET) // from user space
- {
- if(va>=VMALLOC_START && va<=VMALLOC_END) // vmalloc
- {
- M4ULOG_MID(" from user space vmalloc, va = 0x%lx", va);
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- page = vmalloc_to_page((void *)(va_align+i*PAGE_SIZE));
- if(!page)
- {
- M4UMSG("vmalloc_to_page fail, va=0x%lx\n", va_align+i*PAGE_SIZE);
- goto err;
- }
- sg_set_page(sg, page, PAGE_SIZE, 0);
- }
- }
- else
- {
- ret = m4u_create_sgtable_user(va_align, table);
- if(ret)
- {
- M4UMSG("%s error va=0x%lx, size=%d\n", __FUNCTION__, va, size);
- goto err;
- }
- }
- }
- else // from kernel space
- {
- if(va>=VMALLOC_START && va<=VMALLOC_END) // vmalloc
- {
- M4ULOG_MID(" from kernel space vmalloc, va = 0x%lx", va);
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- page = vmalloc_to_page((void *)(va_align+i*PAGE_SIZE));
- if(!page)
- {
- M4UMSG("vmalloc_to_page fail, va=0x%lx\n", va_align+i*PAGE_SIZE);
- goto err;
- }
- sg_set_page(sg, page, PAGE_SIZE, 0);
- }
- }
- else // kmalloc to-do: use one entry sgtable.
- {
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- pa = virt_to_phys((void*)(va_align + i*PAGE_SIZE));
- page = phys_to_page(pa);
- sg_set_page(sg, page, PAGE_SIZE, 0);
- }
- }
- }
-
- return table;
-
-err:
- sg_free_table(table);
- kfree(table);
- return ERR_PTR(-EFAULT);
-
-}
-
-int m4u_destroy_sgtable(struct sg_table *table)
-{
- if(!IS_ERR_OR_NULL(table))
- {
- sg_free_table(table);
- kfree(table);
- }
- return 0;
-}
-
-//#define __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
-
-int m4u_alloc_mva(m4u_client_t *client, M4U_PORT_ID port,
- unsigned long va, struct sg_table *sg_table,
- unsigned int size, unsigned int prot, unsigned int flags,
- unsigned int *pMva)
-{
- int ret;
- m4u_buf_info_t *pMvaInfo;
- unsigned int mva, mva_align, size_align;
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagStart, va, size);
-
-
- if(va && sg_table)
- {
- M4UMSG("%s, va or sg_table are both valid: va=0x%lx, sg=0x%p\n", __FUNCTION__,
- va, sg_table);
- }
- if(va)
- {
- sg_table = m4u_create_sgtable(va, size);
- if(IS_ERR_OR_NULL(sg_table))
- {
- M4UMSG("m4u_alloc_mva fail, cannot create sg: larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x\n",
- m4u_port_2_larb_id(port), m4u_get_port_name(port), va, sg_table, size, prot, flags);
- ret = -EFAULT;
- goto err;
- }
- }
-
- //here we get correct sg_table for this buffer
-
- pMvaInfo=m4u_alloc_buf_info();
- if(!pMvaInfo)
- {
- ret = -ENOMEM;
- goto err;
- }
-
- pMvaInfo->va = va;
- pMvaInfo->port = port;
- pMvaInfo->size = size;
- pMvaInfo->prot = prot;
- pMvaInfo->flags = flags;
- pMvaInfo->sg_table = sg_table;
-
- if(flags & M4U_FLAGS_FIX_MVA)
- {
- mva = m4u_do_mva_alloc_fix(*pMva, size, pMvaInfo);
- }
- else
- {
- mva = m4u_do_mva_alloc(va, size, pMvaInfo);
- }
-
- if(mva == 0)
- {
- m4u_aee_print("alloc mva fail: larb=%d,module=%s,size=%d\n",
- m4u_port_2_larb_id(port), m4u_get_port_name(port), size);
- m4u_dump_buf_info(NULL);
- ret = -EINVAL;
- goto err1;
- }
- else
- {
- M4ULOG_MID("%s,mva = 0x%x\n", __FUNCTION__, mva);
- }
-
- m4u_get_sgtable_pages(sg_table);
-
- mva_align = round_down(mva, PAGE_SIZE);
- size_align = PAGE_ALIGN(mva + size - mva_align);
-
- ret = m4u_map_sgtable(m4u_get_domain_by_port(port), mva_align,
- sg_table, size_align, pMvaInfo->prot);
- if(ret < 0)
- {
- M4UMSG("error to map sgtable\n");
- goto err2;
- }
-
- pMvaInfo->mva = mva;
- pMvaInfo->mva_align = mva_align;
- pMvaInfo->size_align = size_align;
- *pMva = mva;
-
- if(flags & M4U_FLAGS_SEQ_ACCESS)
- {
- pMvaInfo->seq_id = m4u_insert_seq_range(port, mva, mva+size-1);
- }
-
- m4u_client_add_buf(client, pMvaInfo);
-
- M4ULOG_MID("m4u_alloc_mva: pMvaInfo=0x%p, larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x,mva=0x%x\n",
- pMvaInfo, m4u_port_2_larb_id(port), m4u_get_port_name(port), va, sg_table, size, prot, flags, mva);
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagEnd, port, mva);
-
-#ifdef __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
- //map this mva to kernel va just for debug
- {
- unsigned long kernel_va;
- unsigned int kernel_size;
- int ret;
- ret = m4u_mva_map_kernel(mva, size, &kernel_va, &kernel_size);
- if(ret)
- {
- M4UMSG("error to map kernel va: mva=0x%x, size=%d\n", mva, size);
- }
- else
- {
- pMvaInfo->mapped_kernel_va_for_debug = kernel_va;
- M4ULOG_MID("[kernel_va_debug] map va: mva=0x%x, kernel_va=0x%lx, size=0x%x\n", mva, kernel_va, size);
- }
- }
-#endif
-
-
-
- return 0;
-
-err2:
- m4u_do_mva_free(mva, size);
-
-err1:
- m4u_free_buf_info(pMvaInfo);
-
-err:
- if(va)
- m4u_destroy_sgtable(sg_table);
-
- *pMva = 0;
-
- M4UMSG("error: larb=%d,module=%s,va=0x%lx,sg=0x%p,size=%d,prot=0x%x,flags=0x%x, mva=0x%x\n",
- m4u_port_2_larb_id(port), m4u_get_port_name(port), va, sg_table, size, prot, flags, mva);
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_ALLOC_MVA], MMProfileFlagEnd, port, 0);
- return ret;
-}
-
-
-//interface for ion
-static m4u_client_t *ion_m4u_client = NULL;
-
-int m4u_alloc_mva_sg(int eModuleID,
- struct sg_table *sg_table,
- const unsigned int BufSize,
- int security,
- int cache_coherent,
- unsigned int *pRetMVABuf)
-{
- int prot;
- if(!ion_m4u_client)
- {
- ion_m4u_client = m4u_create_client();
- if (IS_ERR_OR_NULL(ion_m4u_client))
- {
- ion_m4u_client = NULL;
- return -1;
- }
- }
-
- prot = M4U_PROT_READ | M4U_PROT_WRITE \
- | (cache_coherent ? (M4U_PROT_SHARE | M4U_PROT_CACHE) : 0)\
- | (security ? M4U_PROT_SEC : 0) ;
-
- return m4u_alloc_mva(ion_m4u_client, eModuleID, 0, sg_table, BufSize, prot, 0, pRetMVABuf);
-}
-
-
-#ifdef M4U_TEE_SERVICE_ENABLE
-static int m4u_unmap_nonsec_buffer(unsigned int mva, unsigned int size);
-
-int m4u_register_mva_share(int eModuleID, unsigned int mva)
-{
- m4u_buf_info_t *pMvaInfo;
-
- pMvaInfo = mva_get_priv(mva);
- if(!pMvaInfo)
- {
- M4UMSG("%s cannot find mva: module=%s, mva=0x%x\n", __FUNCTION__, m4u_get_port_name(eModuleID), mva);
- return -1;
- }
- pMvaInfo->flags |= M4U_FLAGS_SEC_SHAREABLE;
-
- return 0;
-}
-#endif
-
-
-int m4u_dealloc_mva_sg(int eModuleID,
- struct sg_table* sg_table,
- const unsigned int BufSize,
- const unsigned int MVA)
-{
- if(!ion_m4u_client)
- {
- m4u_aee_print("ion_m4u_client==NULL !! oops oops~~~~\n");
- return -1;
- }
-
- return m4u_dealloc_mva(ion_m4u_client, eModuleID, MVA);
-}
-
-//should not hold client->dataMutex here.
-int m4u_dealloc_mva(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva)
-{
- m4u_buf_info_t *pMvaInfo;
- int ret, is_err=0;
- unsigned int size;
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagStart, port, mva);
-
-
- pMvaInfo = m4u_client_find_buf(client, mva, 1);
- if(unlikely(!pMvaInfo))
- {
- M4UMSG("error: m4u_dealloc_mva no mva found in client! module=%s, mva=0x%x\n", m4u_get_port_name(port), mva);
- m4u_dump_buf_info(NULL);
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagStart, 0x5a5a5a5a, mva);
- return -EINVAL;
- }
-
- pMvaInfo->flags |= M4U_FLAGS_MVA_IN_FREE;
-
- M4ULOG_MID("m4u_dealloc_mva: larb=%d,module=%s,mva=0x%x, size=%d\n",
- m4u_port_2_larb_id(port), m4u_get_port_name(port), mva, pMvaInfo->size);
-
-#ifdef M4U_TEE_SERVICE_ENABLE
- if(pMvaInfo->flags & M4U_FLAGS_SEC_SHAREABLE)
- {
- m4u_unmap_nonsec_buffer(mva, pMvaInfo->size);
- }
-#endif
-
- ret = m4u_unmap(m4u_get_domain_by_port(port), pMvaInfo->mva_align, pMvaInfo->size_align);
- if(ret)
- {
- is_err=1;
- M4UMSG("m4u_unmap fail\n");
- }
-
- m4u_put_sgtable_pages(pMvaInfo->sg_table);
-
- ret = m4u_do_mva_free(mva, pMvaInfo->size);
- if(ret)
- {
- is_err=1;
- M4UMSG("do_mva_free fail\n");
- }
-
- if(pMvaInfo->va) //buffer is allocated by va
- {
- m4u_destroy_sgtable(pMvaInfo->sg_table);
- }
-
- if(pMvaInfo->flags & M4U_FLAGS_SEQ_ACCESS)
- {
- if(pMvaInfo->seq_id > 0)
- m4u_invalid_seq_range_by_id(port, pMvaInfo->seq_id);
- }
-
- if(is_err)
- {
- m4u_aee_print("%s fail: port=%s, mva=0x%x, size=0x%x, sg=0x%p\n", __FUNCTION__,
- m4u_get_port_name(port), mva, pMvaInfo->size, pMvaInfo->sg_table);
- ret = -EINVAL;
- }
- else
- ret = 0;
-
- size = pMvaInfo->size;
-
-#ifdef __M4U_MAP_MVA_TO_KERNEL_FOR_DEBUG__
- //unmap kernel va for debug
- {
- if(pMvaInfo->mapped_kernel_va_for_debug)
- {
- M4ULOG_MID("[kernel_va_debug] unmap va: mva=0x%x, kernel_va=0x%lx, size=0x%x\n",
- pMvaInfo->mva, pMvaInfo->mapped_kernel_va_for_debug, pMvaInfo->size);
- m4u_mva_unmap_kernel(pMvaInfo->mva, pMvaInfo->size, pMvaInfo->mapped_kernel_va_for_debug);
- }
- }
-#endif
-
- m4u_free_buf_info(pMvaInfo);
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_DEALLOC_MVA], MMProfileFlagEnd, size, mva);
-
- return ret;
-
-}
-
-
-
-
-int m4u_dma_cache_flush_all(void)
-{
- smp_inner_dcache_flush_all();
- outer_flush_all();
- return 0;
-}
-
-
-static struct vm_struct *cache_map_vm_struct = NULL;
-static int m4u_cache_sync_init(void)
-{
- cache_map_vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
- if (!cache_map_vm_struct)
- return -ENOMEM;
-
- return 0;
-}
-
-static void* m4u_cache_map_page_va(struct page* page)
-{
- int ret;
- struct page** ppPage = &page;
-
- ret = map_vm_area(cache_map_vm_struct, PAGE_KERNEL, &ppPage);
- if(ret)
- {
- M4UMSG("error to map page\n");
- return NULL;
- }
- return cache_map_vm_struct->addr;
-}
-
-static void m4u_cache_unmap_page_va(unsigned int va)
-{
- unmap_kernel_range((unsigned long)cache_map_vm_struct->addr, PAGE_SIZE);
-}
-
-
-static int __m4u_cache_sync_kernel(const void *start, size_t size, M4U_CACHE_SYNC_ENUM sync_type)
-{
- if((sync_type==M4U_CACHE_CLEAN_BY_RANGE))
- {
- dmac_map_area((void*)start, size, DMA_TO_DEVICE);
- }
- else if ((sync_type == M4U_CACHE_INVALID_BY_RANGE))
- {
- dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE);
- }
- else if ((sync_type == M4U_CACHE_FLUSH_BY_RANGE))
- {
- dmac_flush_range((void*)start, (void*)(start+size));
- }
-
- return 0;
-}
-
-extern void show_pte(struct mm_struct *mm, unsigned long addr);
-
-static struct page* m4u_cache_get_page(unsigned long va)
-{
- unsigned long start;
- phys_addr_t pa;
- struct page *page;
-
- start = va & (~M4U_PAGE_MASK);
- pa = m4u_user_v2p(start);
- if((pa==0))
- {
- M4UMSG("error m4u_get_phys user_v2p return 0 on va=0x%lu\n", start);
- //dump_page(page);
- m4u_dump_mmaps(start);
- show_pte(current->mm, va);
- return NULL;
- }
- page = phys_to_page(pa);
-
- return page;
-}
-
-
-
-//lock to protect cache_map_vm_struct
-static DEFINE_MUTEX(gM4u_cache_sync_user_lock);
-
-static int __m4u_cache_sync_user(unsigned long start, size_t size, M4U_CACHE_SYNC_ENUM sync_type)
-{
- unsigned long map_size, map_start, map_end;
- unsigned long end = start+size;
- struct page* page;
- unsigned long map_va, map_va_align;
- int ret = 0;
-
- mutex_lock(&gM4u_cache_sync_user_lock);
-
- if(!cache_map_vm_struct)
- {
- M4UMSG(" error: cache_map_vm_struct is NULL, retry\n");
- m4u_cache_sync_init();
- }
- if(!cache_map_vm_struct)
- {
- M4UMSG("error: cache_map_vm_struct is NULL, no vmalloc area\n");
- ret = -1;
- goto out;
- }
-
- map_start = start;
- while(map_start < end)
- {
- map_end = min(((map_start&(~M4U_PAGE_MASK))+PAGE_SIZE), end);
- map_size = map_end - map_start;
-
- page = m4u_cache_get_page(map_start);
- if(!page)
- {
- ret = -1;
- goto out;
- }
-
- map_va = (unsigned long)m4u_cache_map_page_va(page);
- if(!map_va)
- {
- ret = -1;
- goto out;
- }
-
- map_va_align = map_va | (map_start&(PAGE_SIZE-1));
-
- __m4u_cache_sync_kernel((void*)map_va_align, map_size, sync_type);
-
- m4u_cache_unmap_page_va(map_va);
- map_start = map_end;
- }
-
-
-out:
- mutex_unlock(&gM4u_cache_sync_user_lock);
-
- return ret;
-
-}
-
-
-int m4u_cache_sync_by_range(unsigned long va, unsigned int size,
- M4U_CACHE_SYNC_ENUM sync_type, struct sg_table *table)
-{
- int ret = 0;
- if(va<PAGE_OFFSET) // from user space
- {
- ret = __m4u_cache_sync_user(va, size, sync_type);
- }
- else
- {
- ret = __m4u_cache_sync_kernel((void*)va, size, sync_type);
- }
-
-#ifdef CONFIG_OUTER_CACHE
- {
- struct scatterlist *sg;
- int i;
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- unsigned int len = sg_dma_len(sg);
- phys_addr_t phys_addr = get_sg_phys(sg);
-
- if (sync_type == M4U_CACHE_CLEAN_BY_RANGE)
- outer_clean_range(phys_addr, phys_addr+len);
- else if (sync_type == M4U_CACHE_INVALID_BY_RANGE)
- outer_inv_range(phys_addr, phys_addr+len);
- else if (sync_type == M4U_CACHE_FLUSH_BY_RANGE)
- outer_flush_range(phys_addr, phys_addr+len);
- }
- }
-#endif
-
- return ret;
-}
-
-
-
-/**
- notes: only mva allocated by m4u_alloc_mva can use this function.
- if buffer is allocated by ion, please use ion_cache_sync
-**/
-int m4u_cache_sync(m4u_client_t *client, M4U_PORT_ID port,
- unsigned long va, unsigned int size, unsigned int mva,
- M4U_CACHE_SYNC_ENUM sync_type)
-{
- int ret = 0;
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagStart, va, mva);
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagPulse, size, ((sync_type)<<24) | port);
-
- M4ULOG_MID("cache_sync port=%s, va=0x%lx, size=0x%x, mva=0x%x, type=%d\n", m4u_get_port_name(port), va, size, mva, sync_type);
-
- if (sync_type < M4U_CACHE_CLEAN_ALL)
- {
- m4u_buf_info_t *pMvaInfo = NULL;
-
- if(client)
- pMvaInfo = m4u_client_find_buf(client, mva, 0);
-
- //some user may sync mva from other client (eg. ovl may not know who allocated this buffer, but he need to sync cache).
- //we make a workaround here by query mva from mva manager
- if(!pMvaInfo)
- pMvaInfo = mva_get_priv(mva);
-
- if(!pMvaInfo)
- {
- M4UMSG("cache sync fail, cannot find buf: mva=0x%x, client=0x%p.\n", mva, client);
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, 0, 0);
- return -1;
- }
-
- if((pMvaInfo->size != size) || (pMvaInfo->va != va))
- {
- M4UMSG("cache_sync fail: expect mva=0x%x,size=0x%x,va=0x%lx, but mva=0x%x,size=0x%x,va=0x%lx\n",
- pMvaInfo->mva, pMvaInfo->size, pMvaInfo->va, mva, size, va);
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, pMvaInfo->va, pMvaInfo->mva);
- return -1;
- }
-
- if((va|size) & (L1_CACHE_BYTES-1)) //va size should be cache line align
- {
- M4UMSG("warning: cache_sync not align: va=0x%lx,size=0x%x,align=0x%x\n",
- va, size, L1_CACHE_BYTES);
- }
-
- ret = m4u_cache_sync_by_range(va, size, sync_type, pMvaInfo->sg_table);
-
- }
- else
- {
- // All cache operation
- if (sync_type == M4U_CACHE_CLEAN_ALL)
- {
- smp_inner_dcache_flush_all();
- outer_clean_all();
- }
- else if (sync_type == M4U_CACHE_INVALID_ALL)
- {
- M4UMSG("no one can use invalid all!\n");
- return -1;
- }
- else if (sync_type == M4U_CACHE_FLUSH_ALL)
- {
- smp_inner_dcache_flush_all();
- outer_flush_all();
- }
- }
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CACHE_SYNC], MMProfileFlagEnd, size, mva);
- return ret;
-}
-
-
-int m4u_dump_info(int m4u_index)
-{
- return 0;
-}
-
-void m4u_get_pgd(m4u_client_t* client, M4U_PORT_ID port, void** pgd_va, void** pgd_pa, unsigned int* size)
-{
- m4u_domain_t * pDomain;
- pDomain = m4u_get_domain_by_port(port);
- *pgd_va = pDomain->pgd;
- *pgd_pa = (void*)pDomain->pgd_pa;
- *size = M4U_PGD_SIZE;
-}
-
-unsigned long m4u_mva_to_pa(m4u_client_t* client, M4U_PORT_ID port, unsigned int mva)
-{
- unsigned long pa;
- m4u_domain_t* pDomain;
-
- pDomain = m4u_get_domain_by_port(port);
-
- pa = m4u_get_pte(pDomain, mva);
-
- return pa;
-}
-
-int m4u_query_mva_info(unsigned int mva, unsigned int size, unsigned int *real_mva, unsigned int *real_size)
-{
- m4u_buf_info_t *pMvaInfo;
-
- if((!real_mva)||(!real_size))
- return -1;
-
- pMvaInfo = mva_get_priv(mva);
- if(!pMvaInfo)
- {
- M4UMSG("%s cannot find mva: mva=0x%x, size=0x%x\n", __FUNCTION__, mva, size);
- *real_mva = 0;
- *real_size = 0;
-
- return -2;
- }
- *real_mva = pMvaInfo->mva;
- *real_size = pMvaInfo->size;
-
- return 0;
-}
-EXPORT_SYMBOL(m4u_query_mva_info);
-
-/***********************************************************/
-/** map mva buffer to kernel va buffer
-* this funtion should ONLY used for DEBUG
-************************************************************/
-int m4u_mva_map_kernel(unsigned int mva, unsigned int size, unsigned long *map_va, unsigned int *map_size)
-{
- m4u_buf_info_t *pMvaInfo;
- struct sg_table *table;
- struct scatterlist *sg;
- int i, j, k, ret=0;
- struct page **pages;
- unsigned int page_num;
- void* kernel_va;
- unsigned int kernel_size;
-
- pMvaInfo = mva_get_priv(mva);
-
- if(!pMvaInfo || pMvaInfo->size<size)
- {
- M4UMSG("%s cannot find mva: mva=0x%x, size=0x%x\n", __FUNCTION__, mva, size);
- if(pMvaInfo)
- M4UMSG("pMvaInfo: mva=0x%x, size=0x%x\n", pMvaInfo->mva, pMvaInfo->size);
- return -1;
- }
-
- table = pMvaInfo->sg_table;
-
- page_num = M4U_GET_PAGE_NUM(mva, size);
- pages = vmalloc(sizeof(struct page *) * page_num);
- if (pages == NULL)
- {
- M4UMSG("mva_map_kernel:error to vmalloc for %d.\n", (int)sizeof(struct page *) * page_num);
- }
-
- k=0;
- for_each_sg(table->sgl, sg, table->nents, i)
- {
- struct page* page_start;
- int pages_in_this_sg = PAGE_ALIGN(sg_dma_len(sg))/PAGE_SIZE;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- if(0 == sg_dma_address(sg))
- {
- pages_in_this_sg = PAGE_ALIGN(sg->length)/PAGE_SIZE;
- }
-#endif
- page_start = sg_page(sg);
- for(j=0; j<pages_in_this_sg; j++)
- {
- pages[k++] = page_start++;
- if(k>=page_num)
- {
- goto get_pages_done;
- }
- }
- }
-
-get_pages_done:
-
- if (k < page_num)
- {
- //this should not happen, because we have checked the size before.
- M4UMSG("mva_map_kernel:only get %d pages: mva=0x%x, size=0x%x, pg_num=%d\n", k, mva, size, page_num);
- ret = -1;
- goto error_out;
- }
-
- kernel_va = 0;
- kernel_size = 0;
- kernel_va = vmap(pages, page_num, VM_MAP, PAGE_KERNEL);
- if (kernel_va == 0 || (unsigned long)kernel_va & M4U_PAGE_MASK)
- {
- M4UMSG("mva_map_kernel:vmap fail: page_num=%d, kernel_va=0x%p\n", page_num, kernel_va);
- ret = -1;
- goto error_out;
- }
-
- kernel_va += ((unsigned long)mva & (M4U_PAGE_MASK));
-
- *map_va = (unsigned long)kernel_va;
- *map_size = size;
-
- error_out:
- vfree(pages);
- M4ULOG_LOW("mva_map_kernel:mva=0x%x,size=0x%x,map_va=0x%lx,map_size=0x%x\n",
- mva, size, *map_va, *map_size);
-
- return ret;
-}
-
-EXPORT_SYMBOL(m4u_mva_map_kernel);
-
-int m4u_mva_unmap_kernel(unsigned int mva, unsigned int size, unsigned long map_va)
-{
- M4ULOG_LOW("mva_unmap_kernel:mva=0x%x,size=0x%x,va=0x%lx\n", mva, size, map_va);
- vunmap((void *) (map_va & (~M4U_PAGE_MASK)));
- return 0;
-}
-
-EXPORT_SYMBOL(m4u_mva_unmap_kernel);
-
-
-static int MTK_M4U_open(struct inode *inode, struct file *file)
-{
- m4u_client_t *client;
-
- client = m4u_create_client();
- if (IS_ERR_OR_NULL(client))
- {
- M4UMSG("createclientfail\n");
- return -ENOMEM;
- }
-
- file->private_data = client;
-
- return 0;
-}
-
-static int MTK_M4U_release(struct inode *inode, struct file *file)
-{
- m4u_client_t *client = file->private_data;
- m4u_destroy_client(client);
- return 0;
-}
-
-static int MTK_M4U_flush(struct file *filp, fl_owner_t a_id)
-{
- return 0;
-}
-
-#ifdef M4U_TEE_SERVICE_ENABLE
-
-#define TPLAY_DEV_NAME "tz_m4u"
-
-#define M4U_DRV_UUID {{0x90,0x73,0xF0,0x3A,0x96,0x18,0x38,0x3B,0xB1,0x85,0x6E,0xB3,0xF9,0x90,0xBA,0xBD}}
-static const struct mc_uuid_t m4u_drv_uuid = M4U_DRV_UUID;
-static struct mc_session_handle m4u_dci_session;
-static m4u_msg_t *m4u_dci_msg = NULL;
-static DEFINE_MUTEX(m4u_dci_mutex);
-
-#define M4U_TL_UUID {{0x98,0xfb,0x95,0xbc,0xb4,0xbf,0x42,0xd2,0x64,0x73,0xea,0xe4,0x86,0x90,0xd7,0xea}}
-static const struct mc_uuid_t m4u_tl_uuid = M4U_TL_UUID;
-static struct mc_session_handle m4u_tci_session;
-static m4u_msg_t *m4u_tci_msg = NULL;
-static DEFINE_MUTEX(m4u_tci_mutex);
-
-static int m4u_open_trustlet( uint32_t deviceId)
-{
-
- enum mc_result mcRet;
-
- /* Initialize session handle data */
- memset(&m4u_tci_session, 0, sizeof(m4u_tci_session));
-
- mcRet = mc_malloc_wsm(deviceId, 0, sizeof(m4u_msg_t), (uint8_t **) &m4u_tci_msg, 0);
- if (MC_DRV_OK != mcRet)
- {
- M4UMSG("tz_m4u: mc_malloc_wsm tci fail: %d\n", mcRet);
- return -1;
- }
-
- /* Open session the trustlet */
- m4u_tci_session.device_id = deviceId;
- mcRet = mc_open_session(&m4u_tci_session,
- &m4u_tl_uuid,
- (uint8_t *) m4u_tci_msg,
- (uint32_t) sizeof(m4u_msg_t));
- if (MC_DRV_OK != mcRet)
- {
- M4UMSG("tz_m4u: mc_open_session returned: %d\n", mcRet);
- return -1;
- }
-
- M4UMSG("tz_m4u: open TCI session success\n");
-
- return 0;
-}
-
-int m4u_close_trustlet( uint32_t deviceId)
-{
- enum mc_result mcRet;
-
- mcRet = mc_free_wsm(deviceId, (uint8_t *)m4u_tci_msg);
- if(mcRet)
- {
- M4UMSG("tz_m4u: free tci struct fail: %d\n", mcRet);
- return -1;
- }
-
- /* Close session*/
- mcRet = mc_close_session(&m4u_tci_session);
- if (MC_DRV_OK != mcRet)
- {
- M4UMSG("tz_m4u: mc_close_session returned: %d\n", mcRet);
- return -1;
- }
-
- return 0;
-}
-
-static int m4u_exec_cmd(struct mc_session_handle* m4u_session, m4u_msg_t* m4u_msg)
-{
- enum mc_result ret;
-
- if(NULL == m4u_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- return -1;
- }
-
- M4UMSG("Notify %x\n", m4u_msg->cmd);
- ret = mc_notify(m4u_session);
- if (MC_DRV_OK != ret)
- {
- m4u_aee_print("tz_m4u Notify failed: %d\n", ret);
- goto exit;
- }
-
-
- ret = mc_wait_notification(m4u_session, MC_INFINITE_TIMEOUT);
- if (MC_DRV_OK != ret)
- {
- m4u_aee_print("Wait for response notification failed: 0x%x\n", ret);
- goto exit;
- }
-
- M4UMSG("get_resp %x\n", m4u_msg->cmd);
-exit:
- return ret;
-}
-
-
-extern int gM4U_L2_enable;
-
-static int __m4u_sec_init(void)
-{
- int ret;
- void* pgd_va;
- unsigned long pt_pa_nonsec;
- unsigned size;
-
- mutex_lock(&m4u_tci_mutex);
- if(NULL == m4u_tci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_get_pgd(NULL, 0, &pgd_va, &pt_pa_nonsec, &size);
-
-
- m4u_tci_msg->cmd = CMD_M4UTL_INIT;
- m4u_tci_msg->init_param.nonsec_pt_pa= pt_pa_nonsec;
- m4u_tci_msg->init_param.l2_en = gM4U_L2_enable;
- m4u_tci_msg->init_param.sec_pt_pa = 0; //m4u_alloc_sec_pt_for_debug();
- M4UMSG("%s call m4u_exec_cmd CMD_M4UTL_INIT\n", __FUNCTION__);
- ret = m4u_exec_cmd(&m4u_tci_session, m4u_tci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
-
- ret = m4u_tci_msg->rsp;
-out:
- mutex_unlock(&m4u_tci_mutex);
- return ret;
-}
-
-// -------------------------------------------------------------
-#ifdef __M4U_SECURE_SYSTRACE_ENABLE__
-static int dr_map(unsigned long pa, size_t size)
-{
- int ret;
- mutex_lock(&m4u_dci_mutex);
- if(!m4u_dci_msg)
- {
- M4UMSG("error: m4u_dci_msg==null\n");
- ret = -1;
- goto out;
- }
-
- memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
-
- m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_MAP;
- m4u_dci_msg->systrace_param.pa = pa;
- m4u_dci_msg->systrace_param.size = size;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-static int dr_unmap(unsigned long pa, size_t size)
-{
- int ret;
- mutex_lock(&m4u_dci_mutex);
- if(!m4u_dci_msg)
- {
- M4UMSG("error: m4u_dci_msg==null\n");
- ret = -1;
- goto out;
- }
-
- memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
-
- m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_UNMAP;
- m4u_dci_msg->systrace_param.pa = pa;
- m4u_dci_msg->systrace_param.size = size;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-static int dr_transact(void)
-{
- int ret;
- mutex_lock(&m4u_dci_mutex);
- if(!m4u_dci_msg)
- {
- M4UMSG("error: m4u_dci_msg==null\n");
- ret = -1;
- goto out;
- }
-
- memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
-
- m4u_dci_msg->cmd = CMD_M4U_SYSTRACE_TRANSACT;
- m4u_dci_msg->systrace_param.pa = 0;
- m4u_dci_msg->systrace_param.size = 0;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-#endif
-// -------------------------------------------------------------
-
-int m4u_sec_init(void)
-{
-
- uint32_t deviceId = MC_DEVICE_ID_DEFAULT;
- enum mc_result mcRet;
-
- if(m4u_tee_en)
- {
- M4UMSG("warning: m4u secure has been inited, %d\n", m4u_tee_en);
- return 0;
- }
- else
- {
- M4UMSG("call m4u_sec_init in nornal m4u driver\n");
- }
-
- /* Initialize session handle data */
- memset(&m4u_dci_session, 0, sizeof(m4u_dci_session));
-
- /* Open MobiCore device */
- mcRet = mc_open_device(deviceId);
- if (MC_DRV_OK != mcRet)
- {
- M4UMSG("tz_m4u: error mc_open_device returned: %d\n", mcRet);
- if(mcRet != MC_DRV_ERR_INVALID_OPERATION)
- return -1;
- }
-
- /* Allocating WSM for DCI */
- mcRet = mc_malloc_wsm(deviceId, 0, sizeof(m4u_msg_t), (uint8_t **) &m4u_dci_msg, 0);
- if (MC_DRV_OK != mcRet)
- {
- M4UMSG("tz_m4u: mc_malloc_wsm returned: %d\n", mcRet);
- return -1;
- }
-
- /* Open session the trustlet */
- m4u_dci_session.device_id = deviceId;
- mcRet = mc_open_session(&m4u_dci_session,
- &m4u_drv_uuid,
- (uint8_t *) m4u_dci_msg,
- (uint32_t) sizeof(m4u_msg_t));
- if (MC_DRV_OK != mcRet)
- {
- M4UMSG("tz_m4u: mc_open_session returned: %d\n", mcRet);
- return -1;
- }
-
- M4UMSG("tz_m4u: open DCI session returned: %d\n", mcRet);
-
-
- {
- volatile int i, j;
- for(i=0; i<10000000; i++)
- j++;
- }
-
- m4u_open_trustlet(deviceId);
- __m4u_sec_init();
-#ifdef __M4U_SECURE_SYSTRACE_ENABLE__
- {
- union callback_func callback;
- callback.dr.map = dr_map;
- callback.dr.unmap = dr_unmap;
- callback.dr.transact = dr_transact;
- init_sectrace("M4U", if_dci, usage_dr, 64, &callback);
- }
-#endif
- m4u_close_trustlet(deviceId);
-
- m4u_tee_en = 1;
-
- return 0;
-}
-
-int m4u_config_port_tee(M4U_PORT_STRUCT* pM4uPort) //native
-{
- int ret;
- mutex_lock(&m4u_dci_mutex);
- if(!m4u_dci_msg)
- {
- M4UMSG("error: m4u_dci_msg==null\n");
- ret = -1;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_CFG_PORT;
- m4u_dci_msg->port_param.port = pM4uPort->ePortID;
- m4u_dci_msg->port_param.virt = pM4uPort->Virtuality;
- m4u_dci_msg->port_param.direction = pM4uPort->Direction;
- m4u_dci_msg->port_param.distance = pM4uPort->Distance;
- m4u_dci_msg->port_param.sec = 0;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-
-}
-
-int m4u_config_port_array_tee(unsigned char* port_array) //native
-{
- int ret;
- mutex_lock(&m4u_dci_mutex);
- if(!m4u_dci_msg)
- {
- M4UMSG("error: m4u_dci_msg==null\n");
- ret = -1;
- goto out;
- }
-
- memset(m4u_dci_msg, 0, sizeof(m4u_msg_t));
- memcpy(m4u_dci_msg->port_array_param.m4u_port_array, port_array, sizeof(m4u_dci_msg->port_array_param.m4u_port_array));
-
- m4u_dci_msg->cmd = CMD_M4U_CFG_PORT_ARRAY;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-
-}
-
-static int m4u_unmap_nonsec_buffer(unsigned int mva, unsigned int size)
-{
- int ret;
-
- mutex_lock(&m4u_dci_mutex);
-
- if(NULL == m4u_dci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_UNMAP_NONSEC_BUFFER;
- m4u_dci_msg->buf_param.mva = mva;
- m4u_dci_msg->buf_param.size = size;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-int m4u_larb_backup_sec(unsigned int larb_idx)
-{
- int ret;
-
- mutex_lock(&m4u_dci_mutex);
-
- if(NULL == m4u_dci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_LARB_BACKUP;
- m4u_dci_msg->larb_param.larb_idx = larb_idx;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-int m4u_larb_restore_sec(unsigned int larb_idx)
-{
- int ret;
-
- mutex_lock(&m4u_dci_mutex);
-
- if(NULL == m4u_dci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_LARB_RESTORE;
- m4u_dci_msg->larb_param.larb_idx = larb_idx;
-
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-static int m4u_reg_backup_sec(void)
-{
- int ret;
-
- mutex_lock(&m4u_dci_mutex);
-
- if(NULL == m4u_dci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_REG_BACKUP;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-static int m4u_reg_restore_sec(void)
-{
- int ret;
- mutex_lock(&m4u_dci_mutex);
-
- if(NULL == m4u_dci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_REG_RESTORE;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
-
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-
-
-static void m4u_early_suspend(struct early_suspend *h)
-{
- M4UMSG("m4u_early_suspend +, %d\n", m4u_tee_en);
-
- if(m4u_tee_en)
- m4u_reg_backup_sec();
- M4UMSG("m4u_early_suspend -\n");
-}
-
-static void m4u_late_resume(struct early_suspend *h)
-{
- M4UMSG("m4u_late_resume +, %d\n", m4u_tee_en);
-
- if(m4u_tee_en)
- m4u_reg_restore_sec();
-
- M4UMSG("m4u_late_resume -\n");
-}
-
-static struct early_suspend mtk_m4u_early_suspend_driver = {
- .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 251,
- .suspend = m4u_early_suspend,
- .resume = m4u_late_resume,
-};
-
-#if 1
-int m4u_map_nonsec_buf(int port, unsigned int mva, unsigned int size)
-{
- int ret;
-
- mutex_lock(&m4u_dci_mutex);
-
- if(NULL == m4u_dci_msg)
- {
- M4UMSG("%s TCI/DCI error\n", __FUNCTION__);
- ret = MC_DRV_ERR_NO_FREE_MEMORY;
- goto out;
- }
-
- m4u_dci_msg->cmd = CMD_M4U_MAP_NONSEC_BUFFER;
- m4u_dci_msg->buf_param.mva = mva;
- m4u_dci_msg->buf_param.size = size;
-
- ret = m4u_exec_cmd(&m4u_dci_session, m4u_dci_msg);
- if(ret)
- {
- M4UMSG("m4u exec command fail\n");
- ret = -1;
- goto out;
- }
- ret = m4u_dci_msg->rsp;
-
-out:
- mutex_unlock(&m4u_dci_mutex);
- return ret;
-}
-#endif
-
-
-#endif
-
-#ifdef M4U_TEE_SERVICE_ENABLE
-static DEFINE_MUTEX(gM4u_sec_init);
-#endif
-
-static long MTK_M4U_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int ret = 0;
- M4U_MOUDLE_STRUCT m4u_module;
- M4U_PORT_STRUCT m4u_port;
- M4U_PORT_ID PortID;
- M4U_PORT_ID ModuleID;
- M4U_CACHE_STRUCT m4u_cache_data;
- m4u_client_t *client = filp->private_data;
-
- switch (cmd)
- {
- case MTK_M4U_T_POWER_ON:
- ret = copy_from_user(&ModuleID, (void *) arg, sizeof(unsigned int));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_POWER_ON,copy_from_user failed,%d\n", ret);
- return -EFAULT;
- }
- ret = m4u_power_on(ModuleID);
- break;
-
- case MTK_M4U_T_POWER_OFF:
- ret = copy_from_user(&ModuleID, (void *) arg, sizeof(unsigned int));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_POWER_OFF,copy_from_user failed,%d\n", ret);
- return -EFAULT;
- }
- ret = m4u_power_off(ModuleID);
- break;
-
- case MTK_M4U_T_ALLOC_MVA:
- ret = copy_from_user(&m4u_module, (void *) arg, sizeof(M4U_MOUDLE_STRUCT));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_ALLOC_MVA,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-
- ret = m4u_alloc_mva(client, m4u_module.port, m4u_module.BufAddr,
- NULL, m4u_module.BufSize, m4u_module.prot, m4u_module.flags, &(m4u_module.MVAStart));
-
- if (ret)
- return ret;
-
- ret = copy_to_user(&(((M4U_MOUDLE_STRUCT *) arg)->MVAStart), &(m4u_module.MVAStart), sizeof(unsigned int));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_ALLOC_MVA,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
- break;
-
- case MTK_M4U_T_DEALLOC_MVA:
- {
- ret = copy_from_user(&m4u_module, (void *) arg, sizeof(M4U_MOUDLE_STRUCT));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_DEALLOC_MVA,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-
- ret = m4u_dealloc_mva(client, m4u_module.port, m4u_module.MVAStart);
- if (ret)
- return ret;
- }
- break;
-
- case MTK_M4U_T_DUMP_INFO:
- ret = copy_from_user(&ModuleID, (void *) arg, sizeof(unsigned int));
- if (ret)
- {
- M4UMSG("MTK_M4U_Invalid_TLB_Range,copy_from_user failed,%d\n", ret);
- return -EFAULT;
- }
-
- break;
-
- case MTK_M4U_T_CACHE_SYNC:
- ret = copy_from_user(&m4u_cache_data, (void *) arg, sizeof(M4U_CACHE_STRUCT));
- if (ret)
- {
- M4UMSG("m4u_cache_sync,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-
- ret = m4u_cache_sync(client, m4u_cache_data.port, m4u_cache_data.va,
- m4u_cache_data.size, m4u_cache_data.mva, m4u_cache_data.eCacheSync);
- break;
-
- case MTK_M4U_T_CONFIG_PORT:
- ret = copy_from_user(&m4u_port, (void *) arg, sizeof(M4U_PORT_STRUCT));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_CONFIG_PORT,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-#ifdef M4U_TEE_SERVICE_ENABLE
- mutex_lock(&gM4u_sec_init);
-#endif
- ret = m4u_config_port(&m4u_port);
-#ifdef M4U_TEE_SERVICE_ENABLE
- mutex_unlock(&gM4u_sec_init);
-#endif
- break;
-
-
- case MTK_M4U_T_MONITOR_START:
- ret = copy_from_user(&PortID, (void *) arg, sizeof(unsigned int));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_MONITOR_START,copy_from_user failed,%d\n", ret);
- return -EFAULT;
- }
- ret = m4u_monitor_start(m4u_port_2_m4u_id(PortID));
-
- break;
-
- case MTK_M4U_T_MONITOR_STOP:
- ret = copy_from_user(&PortID, (void *) arg, sizeof(unsigned int));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_MONITOR_STOP,copy_from_user failed,%d\n", ret);
- return -EFAULT;
- }
- ret = m4u_monitor_stop(m4u_port_2_m4u_id(PortID));
- break;
-
- case MTK_M4U_T_CACHE_FLUSH_ALL:
- m4u_dma_cache_flush_all();
- break;
-
- case MTK_M4U_T_CONFIG_PORT_ARRAY:
- {
- struct m4u_port_array port_array;
- ret = copy_from_user(&port_array, (void *) arg, sizeof(struct m4u_port_array));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_CONFIG_PORT,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-#ifdef M4U_TEE_SERVICE_ENABLE
- mutex_lock(&gM4u_sec_init);
-#endif
- ret = m4u_config_port_array(&port_array);
-#ifdef M4U_TEE_SERVICE_ENABLE
- mutex_unlock(&gM4u_sec_init);
-#endif
- }
- break;
- case MTK_M4U_T_CONFIG_MAU:
- {
- M4U_MAU_STRUCT rMAU;
- ret = copy_from_user(&rMAU, (void *) arg, sizeof(M4U_MAU_STRUCT));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_CONFIG_MAU,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-
- ret = config_mau(rMAU);
- }
- break;
- case MTK_M4U_T_CONFIG_TF:
- {
- M4U_TF_STRUCT rM4UTF;
- ret = copy_from_user(&rM4UTF, (void *) arg, sizeof(M4U_TF_STRUCT));
- if (ret)
- {
- M4UMSG("MTK_M4U_T_CONFIG_TF,copy_from_user failed:%d\n", ret);
- return -EFAULT;
- }
-
- ret = m4u_enable_tf(rM4UTF.port, rM4UTF.fgEnable);
- }
- break;
-#ifdef M4U_TEE_SERVICE_ENABLE
- case MTK_M4U_T_SEC_INIT:
- {
- M4UMSG("MTK M4U ioctl : MTK_M4U_T_SEC_INIT command!! 0x%x\n", cmd);
- mutex_lock(&gM4u_sec_init);
- ret = m4u_sec_init();
- mutex_unlock(&gM4u_sec_init);
- }
- break;
-#endif
- default:
- M4UMSG("MTK M4U ioctl:No such command!!\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-#if IS_ENABLED(CONFIG_COMPAT)
-
-typedef struct
-{
- compat_uint_t port;
- compat_ulong_t BufAddr;
- compat_uint_t BufSize;
- compat_uint_t prot;
- compat_uint_t MVAStart;
- compat_uint_t MVAEnd;
- compat_uint_t flags;
-}COMPAT_M4U_MOUDLE_STRUCT;
-
-typedef struct
-{
- compat_uint_t port;
- compat_uint_t eCacheSync;
- compat_ulong_t va;
- compat_uint_t size;
- compat_uint_t mva;
-}COMPAT_M4U_CACHE_STRUCT;
-
-
-#define COMPAT_MTK_M4U_T_ALLOC_MVA _IOWR(MTK_M4U_MAGICNO,4, int)
-#define COMPAT_MTK_M4U_T_DEALLOC_MVA _IOW(MTK_M4U_MAGICNO, 5, int)
-#define COMPAT_MTK_M4U_T_CACHE_SYNC _IOW(MTK_M4U_MAGICNO, 10, int)
-
-
-
-static int compat_get_m4u_module_struct(
- COMPAT_M4U_MOUDLE_STRUCT __user *data32,
- M4U_MOUDLE_STRUCT __user *data)
-{
- compat_uint_t u;
- compat_ulong_t l;
- int err;
-
- err = get_user(u, &(data32->port));
- err |= put_user(u, &(data->port));
- err |= get_user(l, &(data32->BufAddr));
- err |= put_user(l, &(data->BufAddr));
- err |= get_user(u, &(data32->BufSize));
- err |= put_user(u, &(data->BufSize));
- err |= get_user(u, &(data32->prot));
- err |= put_user(u, &(data->prot));
- err |= get_user(u, &(data32->MVAStart));
- err |= put_user(u, &(data->MVAStart));
- err |= get_user(u, &(data32->MVAEnd));
- err |= put_user(u, &(data->MVAEnd));
- err |= get_user(u, &(data32->flags));
- err |= put_user(u, &(data->flags));
-
- return err;
-}
-
-static int compat_put_m4u_module_struct(
- COMPAT_M4U_MOUDLE_STRUCT __user *data32,
- M4U_MOUDLE_STRUCT __user *data)
-{
- compat_uint_t u;
- compat_ulong_t l;
- int err;
-
-
- err = get_user(u, &(data->port));
- err |= put_user(u, &(data32->port));
- err |= get_user(l, &(data->BufAddr));
- err |= put_user(l, &(data32->BufAddr));
- err |= get_user(u, &(data->BufSize));
- err |= put_user(u, &(data32->BufSize));
- err |= get_user(u, &(data->prot));
- err |= put_user(u, &(data32->prot));
- err |= get_user(u, &(data->MVAStart));
- err |= put_user(u, &(data32->MVAStart));
- err |= get_user(u, &(data->MVAEnd));
- err |= put_user(u, &(data32->MVAEnd));
- err |= get_user(u, &(data->flags));
- err |= put_user(u, &(data32->flags));
-
- return err;
-}
-
-static int compat_get_m4u_cache_struct(
- COMPAT_M4U_CACHE_STRUCT __user *data32,
- M4U_CACHE_STRUCT __user *data)
-{
- compat_uint_t u;
- compat_ulong_t l;
- int err;
-
- err = get_user(u, &(data32->port));
- err |= put_user(u, &(data->port));
- err |= get_user(u, &(data32->eCacheSync));
- err |= put_user(u, &(data->eCacheSync));
- err |= get_user(l, &(data32->va));
- err |= put_user(l, &(data->va));
- err |= get_user(u, &(data32->size));
- err |= put_user(u, &(data->size));
- err |= get_user(u, &(data32->mva));
- err |= put_user(u, &(data->mva));
-
- return err;
-}
-
-
-long MTK_M4U_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op || !filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd) {
- case COMPAT_MTK_M4U_T_ALLOC_MVA:
- {
- COMPAT_M4U_MOUDLE_STRUCT __user *data32;
- M4U_MOUDLE_STRUCT __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(M4U_MOUDLE_STRUCT));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_m4u_module_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_ALLOC_MVA,
- (unsigned long)data);
-
- err = compat_put_m4u_module_struct(data32, data);
-
- if (err)
- return err;
-
- return ret;
- }
- case COMPAT_MTK_M4U_T_DEALLOC_MVA:
- {
- COMPAT_M4U_MOUDLE_STRUCT __user *data32;
- M4U_MOUDLE_STRUCT __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(M4U_MOUDLE_STRUCT));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_m4u_module_struct(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_DEALLOC_MVA,
- (unsigned long)data);
- }
- case COMPAT_MTK_M4U_T_CACHE_SYNC:
- {
- COMPAT_M4U_CACHE_STRUCT __user *data32;
- M4U_CACHE_STRUCT __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(M4U_CACHE_STRUCT));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_m4u_cache_struct(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, MTK_M4U_T_CACHE_SYNC,
- (unsigned long)data);
- }
- case MTK_M4U_T_POWER_ON:
- case MTK_M4U_T_POWER_OFF:
- case MTK_M4U_T_DUMP_INFO:
- case MTK_M4U_T_CONFIG_PORT:
- case MTK_M4U_T_MONITOR_START:
- case MTK_M4U_T_MONITOR_STOP:
- case MTK_M4U_T_CACHE_FLUSH_ALL:
- case MTK_M4U_T_CONFIG_PORT_ARRAY:
- case MTK_M4U_T_SEC_INIT:
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-
-}
-
-#else
-
-#define MTK_M4U_COMPAT_ioctl NULL
-
-#endif
-
-
-static const struct file_operations m4u_fops = {
- .owner = THIS_MODULE,
- .open = MTK_M4U_open,
- .release = MTK_M4U_release,
- .flush = MTK_M4U_flush,
- .unlocked_ioctl = MTK_M4U_ioctl,
- .compat_ioctl = MTK_M4U_COMPAT_ioctl,
- //.mmap = NULL;
-};
-
-#if !defined(CONFIG_MTK_LEGACY)
-const char* smi_clk_name[] = {
- "smi_common", "m4u_disp0_smi_larb0", "m4u_vdec0_vdec", "m4u_vdec1_larb", "m4u_img_image_larb2_smi", "m4u_venc_venc", "m4u_venc_larb"
-};
-#endif
-
-static int m4u_probe(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
-
- M4UINFO("m4u_probe 0\n");
-
- if (pdev->dev.of_node) {
- int err;
- err = of_property_read_u32(node, "cell-index", &pdev->id);
- if (err){
- printk("[DTS] get m4u platform_device id fail!!\n");
- }
- }
- M4UINFO("m4u_probe 1, pdev id = %d name = %s\n", pdev->id, pdev->name);
-
- gM4uDev->pDev[pdev->id] = &pdev->dev;
- gM4uDev->m4u_base[pdev->id] = (unsigned long)of_iomap(node, 0);
- gM4uDev->irq_num[pdev->id] = irq_of_parse_and_map(node, 0);
-
-#if !defined(CONFIG_MTK_LEGACY)
- gM4uDev->infra_m4u = devm_clk_get(&pdev->dev, "infra_m4u");
- if (IS_ERR(gM4uDev->infra_m4u)) {
- printk("cannot get infra m4u clock\n");
- return PTR_ERR(gM4uDev->infra_m4u);
- }
-
- int i;
- for (i = SMI_COMMON_CLK; i < SMI_CLK_NUM; i++) {
- gM4uDev->smi_clk[i] = devm_clk_get(&pdev->dev, smi_clk_name[i]);
- if (IS_ERR(gM4uDev->smi_clk[i])) {
- printk("cannot get %s clock\n", smi_clk_name[i]);
- return PTR_ERR(gM4uDev->smi_clk[i]);
- }
- }
-#endif
-
- M4UINFO("m4u_probe 2, of_iomap: 0x%lx, irq_num: %d, pDev: %p\n", gM4uDev->m4u_base[pdev->id], gM4uDev->irq_num[pdev->id], gM4uDev->pDev[pdev->id]);
-
- if(0 == pdev->id)
- {
- m4u_domain_init(gM4uDev, &gMvaNode_unkown);
-
-#ifdef M4U_TEE_SERVICE_ENABLE
- {
- m4u_buf_info_t *pMvaInfo;
- unsigned int mva;
-
- pMvaInfo=m4u_alloc_buf_info();
- if(!pMvaInfo)
- {
- pMvaInfo->port = M4U_PORT_UNKNOWN;
- pMvaInfo->size = M4U_NONSEC_MVA_START - 0x100000;
- }
-
- mva = m4u_do_mva_alloc(0, M4U_NONSEC_MVA_START - 0x100000, pMvaInfo);
- M4UINFO("reserve sec mva: 0x%x\n", mva);
- }
-#endif
-
- }
-
- m4u_hw_init(gM4uDev, pdev->id);
-
- M4UINFO("m4u_probe 3 finish...\n");
- return 0;
-
-
-}
-
-
-static int m4u_remove(struct platform_device *pdev)
-{
-
- m4u_hw_deinit(gM4uDev, pdev->id);
-
-#ifndef __M4U_USE_PROC_NODE
- misc_deregister(&(gM4uDev->dev));
-#else
- if(gM4uDev->m4u_dev_proc_entry)
- proc_remove(gM4uDev->m4u_dev_proc_entry);
-#endif
-
- return 0;
-}
-
-
-
-static int m4u_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- m4u_reg_backup();
- M4UINFO("M4U backup in suspend\n");
-
- return 0;
-}
-
-static int m4u_resume(struct platform_device *pdev)
-{
- m4u_reg_restore();
- M4UINFO("M4U restore in resume\n");
- return 0;
-}
-
-
-/*---------------------------------------------------------------------------*/
-#ifdef CONFIG_PM
-/*---------------------------------------------------------------------------*/
-static int m4u_pm_suspend(struct device *device)
-{
- struct platform_device *pdev = to_platform_device(device);
- BUG_ON(pdev == NULL);
-
- return m4u_suspend(pdev, PMSG_SUSPEND);
-}
-
-static int m4u_pm_resume(struct device *device)
-{
- struct platform_device *pdev = to_platform_device(device);
- BUG_ON(pdev == NULL);
-
- return m4u_resume(pdev);
-}
-
-extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
-extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity);
-static int m4u_pm_restore_noirq(struct device *device)
-{
- int i;
-
- for(i = 0; i<TOTAL_M4U_NUM; i++)
- {
- mt_irq_set_sens(gM4uDev->irq_num[i], MT_LEVEL_SENSITIVE);
- mt_irq_set_polarity(gM4uDev->irq_num[i], MT_POLARITY_LOW);
- }
-
- return 0;
-}
-
-/*---------------------------------------------------------------------------*/
-#else /*CONFIG_PM */
-/*---------------------------------------------------------------------------*/
-#define m4u_pm_suspend NULL
-#define m4u_pm_resume NULL
-#define m4u_pm_restore_noirq NULL
-/*---------------------------------------------------------------------------*/
-#endif /*CONFIG_PM */
-/*---------------------------------------------------------------------------*/
-static const struct of_device_id iommu_of_ids[] = {
- { .compatible = "mediatek,M4U", },
- {}
-};
-
-struct dev_pm_ops m4u_pm_ops = {
- .suspend = m4u_pm_suspend,
- .resume = m4u_pm_resume,
- .freeze = m4u_pm_suspend,
- .thaw = m4u_pm_resume,
- .poweroff = m4u_pm_suspend,
- .restore = m4u_pm_resume,
- .restore_noirq = m4u_pm_restore_noirq,
-};
-
-static struct platform_driver m4uDrv = {
- .probe = m4u_probe,
- .remove = m4u_remove,
- .suspend = m4u_suspend,
- .resume = m4u_resume,
- .driver = {
- .name = "m4u",
- .of_match_table = iommu_of_ids,
-#ifdef CONFIG_PM
- .pm = &m4u_pm_ops,
-#endif
- .owner = THIS_MODULE,
- }
-};
-
-#if 0
-static u64 m4u_dmamask = ~(u32) 0;
-
-static struct platform_device mtk_m4u_dev = {
- .name = M4U_DEV_NAME,
- .id = 0,
- .dev = {
- .dma_mask = &m4u_dmamask,
- .coherent_dma_mask = 0xffffffffUL}
-};
-#endif
-
-#define __M4U_USE_PROC_NODE
-
-static int __init MTK_M4U_Init(void)
-{
- int ret = 0;
-
- gM4uDev = kzalloc(sizeof(struct m4u_device), GFP_KERNEL);
-
- M4UINFO("MTK_M4U_Init kzalloc: %p\n", gM4uDev);
-
- if (!gM4uDev)
- {
- M4UMSG("kmalloc for m4u_device fail\n");
- return -ENOMEM;
- }
-
-#ifndef __M4U_USE_PROC_NODE
- gM4uDev->dev.minor = MISC_DYNAMIC_MINOR;
- gM4uDev->dev.name = M4U_DEV_NAME;
- gM4uDev->dev.fops = &m4u_fops;
- gM4uDev->dev.parent = NULL;
-
- ret = misc_register(&(gM4uDev->dev));
- M4UINFO("misc_register, minor: %d\n", gM4uDev->dev.minor);
- if (ret)
- {
- M4UMSG("failed to register misc device.\n");
- return ret;
- }
-#else
- gM4uDev->m4u_dev_proc_entry = proc_create("m4u", 0, NULL, &m4u_fops);
- if(!(gM4uDev->m4u_dev_proc_entry))
- {
- M4UMSG("m4u:failed to register m4u in proc/m4u_device.\n");
- return ret;
- }
-
-#endif
-
- m4u_debug_init(gM4uDev);
-
- M4UINFO("M4U platform_driver_register start\n");
-
- if (platform_driver_register(&m4uDrv))
- {
- M4UMSG("failed to register M4U driver");
- return -ENODEV;
- }
- M4UINFO("M4U platform_driver_register finsish\n");
-
-#if 0
-
- retval = platform_device_register(&mtk_m4u_dev);
- if (retval != 0)
- {
- return retval;
- }
- printk("register M4U device:%d\n", retval);
-#endif
-
-#ifdef M4U_PROFILE
- m4u_profile_init();
-#endif
-
-#ifdef M4U_TEE_SERVICE_ENABLE
- register_early_suspend(&mtk_m4u_early_suspend_driver);
-#endif
-
- return 0;
-}
-
-static void __exit MTK_M4U_Exit(void)
-{
- platform_driver_unregister(&m4uDrv);
-}
-
-
-subsys_initcall(MTK_M4U_Init);
-module_exit(MTK_M4U_Exit);
-
-
-MODULE_DESCRIPTION("MTKM4Udriver");
-MODULE_AUTHOR("MTK80347 <Xiang.Xu@mediatek.com>");
-MODULE_LICENSE("GPL");
-
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u.h b/drivers/misc/mediatek/m4u/mt6735/m4u.h
new file mode 100644
index 000000000..72154bd66
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/mt6735/m4u.h
@@ -0,0 +1,6 @@
+#ifndef __M4U_H__
+#define __M4U_H__
+
+#include "../2.0/m4u_v2.h"
+
+#endif
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u.mk b/drivers/misc/mediatek/m4u/mt6735/m4u.mk
new file mode 100644
index 000000000..23272b3f0
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/mt6735/m4u.mk
@@ -0,0 +1,9 @@
+ifeq ($(CONFIG_ARCH_MT6735),y)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6735/
+endif
+ifeq ($(CONFIG_ARCH_MT6735M),y)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6735m/
+endif
+ifeq ($(CONFIG_ARCH_MT6753),y)
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/mt6753/
+endif
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_debug.c b/drivers/misc/mediatek/m4u/mt6735/m4u_debug.c
deleted file mode 100644
index 01db43939..000000000
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_debug.c
+++ /dev/null
@@ -1,883 +0,0 @@
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-
-#include "m4u_priv.h"
-#include <linux/mtk_ion.h>
-
-
-//global variables
-int gM4U_log_to_uart = 2;
-int gM4U_log_level = 1;
-
-
-int m4u_test_alloc_dealloc(int id, unsigned int size)
-{
- m4u_client_t *client;
- unsigned long va;
- unsigned int mva;
- int ret;
- unsigned long populate;
-
- if (id == 1)
- va = (unsigned long)kmalloc(size, GFP_KERNEL);
- else if (id == 2)
- va = (unsigned long)vmalloc(size);
- else if (id == 3)
- {
- down_write(&current->mm->mmap_sem);
- va = do_mmap_pgoff(NULL, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, 0, &populate);
- up_write(&current->mm->mmap_sem);
- }
-
- M4UINFO("test va=0x%lx,size=0x%x\n", va, size);
-
- client = m4u_create_client();
- if (IS_ERR_OR_NULL(client))
- {
- M4UMSG("create client fail!\n");
- }
-
- ret = m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, va, NULL, size,
- M4U_PROT_READ | M4U_PROT_CACHE, 0, &mva);
- if (ret)
- {
- M4UMSG("alloc mva fail:va=0x%lx,size=0x%x,ret=%d\n", va,size,ret);
- return -1;
- }
- m4u_dump_pgtable(m4u_get_domain_by_port(M4U_PORT_DISP_OVL0), NULL);
-
- ret = m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, mva);
- m4u_dump_pgtable(m4u_get_domain_by_port(M4U_PORT_DISP_OVL0), NULL);
-
- if (id == 1)
- kfree((void *)va);
- else if (id == 2)
- vfree((void *)va);
- else if (id == 3)
- {
- down_read(&current->mm->mmap_sem);
- ret = do_munmap(current->mm, va, size);
- up_read(&current->mm->mmap_sem);
- if (ret)
- {
- M4UMSG("do_munmap failed\n");
- }
- }
-
-//clean
- m4u_destroy_client(client);
- return 0;
-}
-
-
-m4u_callback_ret_t m4u_test_callback(int alloc_port, unsigned int mva,
- unsigned int size, void* data)
-{
- if(NULL != data)
- printk("test callback port=%d, mva=0x%x, size=0x%x, data=0x%x\n", alloc_port, mva, size, *(int *)data);
- else
- printk("test callback port=%d, mva=0x%x, size=0x%x\n", alloc_port, mva, size);
-
- return M4U_CALLBACK_HANDLED;
-}
-
-
-int m4u_test_reclaim(unsigned int size)
-{
- m4u_client_t *client;
- unsigned int* va[10];
- unsigned int buf_size;
- unsigned int mva;
- int ret, i;
-
- //register callback
- m4u_register_reclaim_callback(M4U_PORT_DISP_OVL0, m4u_test_callback, NULL);
-
-
- client = m4u_create_client();
- if (IS_ERR_OR_NULL(client))
- {
- M4UMSG("createclientfail!\n");
- }
-
- buf_size = size;
- for (i = 0; i < 10; i++)
- {
- va[i] = vmalloc(buf_size);
-
- ret = m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)va[i], NULL, buf_size, M4U_PROT_READ | M4U_PROT_CACHE, 0, &mva);
- if (ret)
- {
- M4UMSG("alloc using kmalloc fail:va=0x%p,size=0x%x\n", va[i], buf_size);
- return -1;
- }
- M4UINFO("alloc mva:va=0x%p,mva=0x%x,size=0x%x\n", va[i], mva, buf_size);
-
- buf_size += size;
- }
-
-
- for (i = 0; i < 10; i++)
- vfree((void *)va[i]);
-
- m4u_dump_buf_info(NULL);
- m4u_dump_pgtable(m4u_get_domain_by_port(M4U_PORT_DISP_OVL0), NULL);
-
- m4u_destroy_client(client);
-
- m4u_unregister_reclaim_callback(M4U_PORT_DISP_OVL0);
-
- return 0;
-}
-
-static int m4u_test_map_kernel(void)
-{
- m4u_client_t *client;
- unsigned long va;
- unsigned int size=1024*1024;
- unsigned int mva;
- unsigned long kernel_va;
- unsigned int kernel_size;
- int i;
- int ret;
- unsigned long populate;
-
- down_write(&current->mm->mmap_sem);
- va = do_mmap_pgoff(NULL, 0, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, 0, &populate);
- up_write(&current->mm->mmap_sem);
- //va = vmalloc(size);
-
- M4UINFO("test va=0x%lx,size=0x%x\n", va, size);
-
- for(i=0; i<size; i+=4)
- {
- *(int*)(va+i) = i;
- }
-
- client = m4u_create_client();
- if (IS_ERR_OR_NULL(client))
- {
- M4UMSG("createclientfail!\n");
- }
-
- ret = m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, va, NULL, size, M4U_PROT_READ | M4U_PROT_CACHE, 0, &mva);
- if (ret)
- {
- M4UMSG("alloc using kmalloc fail:va=0x%lx,size=0x%x\n", va, size);
- return -1;
- }
-
- printk("m4u_test_map_kernel m4u_alloc_mva mva = %d, va = %lu, size = %d.\n", mva, va, size);
-
- ret = m4u_mva_map_kernel(mva, size, &kernel_va, &kernel_size);
- if(ret)
- {
- M4UMSG("map kernel fail!\n");
- return -1;
- }
-
- printk("m4u_test_map_kernel m4u_mva_map_kernel kernel_va = %lu.\n", kernel_va);
- for(i=0; i<size; i+=4)
- {
- if(*(int*)(kernel_va+i) != i)
- {
- M4UMSG("wawawa, get map value fail! i=%d, map=%d\n", i, *(int*)(kernel_va+i));
- }
- }
-
- ret = m4u_mva_unmap_kernel(mva, size, kernel_va);
-
- ret = m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, mva);
- down_read(&current->mm->mmap_sem);
- ret = do_munmap(current->mm, va, size);
- up_read(&current->mm->mmap_sem);
- if (ret)
- {
- M4UMSG("do_munmap failed\n");
- }
-
- m4u_destroy_client(client);
- return 0;
-}
-
-__attribute__((weak)) extern int ddp_mem_test(void);
-__attribute__((weak)) extern int __ddp_mem_test(unsigned int *pSrc, unsigned int pSrcPa,
- unsigned int* pDst, unsigned int pDstPa,
- int need_sync);
-
-int m4u_test_ddp(unsigned int prot)
-{
- unsigned int *pSrc, *pDst;
- unsigned int src_pa, dst_pa;
- unsigned int size = 64*64*3;
- M4U_PORT_STRUCT port;
- m4u_client_t * client = m4u_create_client();
-
- pSrc = vmalloc(size);
- pDst = vmalloc(size);
-
- m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pSrc, NULL,
- size, prot, 0, &src_pa);
-
- m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pDst, NULL,
- size, prot, 0, &dst_pa);
-
- M4UINFO("pSrc=0x%p, pDst=0x%p, src_pa=0x%x, dst_pa=0x%x\n", pSrc, pDst, src_pa, dst_pa);
-
- port.ePortID = M4U_PORT_DISP_OVL0;
- port.Direction = 0;
- port.Distance = 1;
- port.domain = 3;
- port.Security = 0;
- port.Virtuality = 1;
- m4u_config_port(&port);
-
- port.ePortID = M4U_PORT_DISP_WDMA0;
- m4u_config_port(&port);
-
- m4u_monitor_start(0);
- __ddp_mem_test(pSrc, src_pa, pDst, dst_pa, !(prot & M4U_PROT_CACHE));
- m4u_monitor_stop(0);
-
- vfree(pSrc);
- vfree(pDst);
-
- m4u_destroy_client(client);
- return 0;
-}
-
-m4u_callback_ret_t test_fault_callback(int port, unsigned int mva, void* data)
-{
- if(NULL != data)
- printk("fault call port=%d, mva=0x%x, data=0x%x\n", port, mva, *(int *)data);
- else
- printk("fault call port=%d, mva=0x%x\n", port, mva);
-
- /* DO NOT print too much logs here !!!! */
- /* Do NOT use any lock hear !!!!*/
- /* DO NOT do any other things except printk !!!*/
- /* DO NOT make any mistake here (or reboot will happen) !!! */
- return M4U_CALLBACK_HANDLED;
-}
-
-
-int m4u_test_tf(unsigned int prot)
-{
- unsigned int *pSrc, *pDst;
- unsigned int src_pa, dst_pa;
- unsigned int size = 64*64*3;
- M4U_PORT_STRUCT port;
- m4u_client_t * client = m4u_create_client();
- int data = 88;
-
-
- m4u_register_fault_callback(M4U_PORT_DISP_OVL0, test_fault_callback, &data);
- m4u_register_fault_callback(M4U_PORT_DISP_WDMA0, test_fault_callback, &data);
-
- pSrc = vmalloc(size);
- pDst = vmalloc(size);
-
- m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pSrc, NULL,
- size, prot, 0, &src_pa);
-
- m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pDst, NULL,
- size/2, prot, 0, &dst_pa);
-
- M4UINFO("pSrc=0x%p, pDst=0x%p, src_pa=0x%x, dst_pa=0x%x\n", pSrc, pDst, src_pa, dst_pa);
-
- port.ePortID = M4U_PORT_DISP_OVL0;
- port.Direction = 0;
- port.Distance = 1;
- port.domain = 3;
- port.Security = 0;
- port.Virtuality = 1;
- m4u_config_port(&port);
-
- port.ePortID = M4U_PORT_DISP_WDMA0;
- m4u_config_port(&port);
-
- m4u_monitor_start(0);
- __ddp_mem_test(pSrc, src_pa, pDst, dst_pa, !!(prot & M4U_PROT_CACHE));
- m4u_monitor_stop(0);
-
-
- m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, src_pa);
- m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, dst_pa);
-
- vfree(pSrc);
- vfree(pDst);
-
- m4u_destroy_client(client);
-
- return 0;
-}
-#if 1
-#include <linux/ion_drv.h>
-
-void m4u_test_ion(void)
-{
- unsigned int *pSrc, *pDst;
- unsigned long src_pa, dst_pa;
- unsigned int size = 64*64*3, tmp_size;
- M4U_PORT_STRUCT port;
- struct ion_mm_data mm_data;
- struct ion_client *ion_client;
- struct ion_handle *src_handle, *dst_handle;
-
- //FIX-ME: modified for linux-3.10 early porting
- //ion_client = ion_client_create(g_ion_device, 0xffffffff, "test");
- ion_client = ion_client_create(g_ion_device, "test");
-
- src_handle = ion_alloc(ion_client, size, 0, ION_HEAP_MULTIMEDIA_MASK, 0);
- dst_handle = ion_alloc(ion_client, size, 0, ION_HEAP_MULTIMEDIA_MASK, 0);
-
- pSrc = ion_map_kernel(ion_client, src_handle);
- pDst = ion_map_kernel(ion_client, dst_handle);
-
- mm_data.config_buffer_param.kernel_handle = src_handle;
- mm_data.config_buffer_param.eModuleID= M4U_PORT_DISP_OVL0;
- mm_data.config_buffer_param.security= 0;
- mm_data.config_buffer_param.coherent= 0;
- mm_data.mm_cmd = ION_MM_CONFIG_BUFFER;
- if (ion_kernel_ioctl(ion_client, ION_CMD_MULTIMEDIA, (unsigned long)&mm_data) < 0)
- {
- printk("ion_test_drv: Config buffer failed.\n");
- }
- mm_data.config_buffer_param.kernel_handle = dst_handle;
- if (ion_kernel_ioctl(ion_client, ION_CMD_MULTIMEDIA, (unsigned long)&mm_data) < 0)
- {
- printk("ion_test_drv: Config buffer failed.\n");
- }
-
- ion_phys(ion_client, src_handle, &src_pa, &tmp_size);
- ion_phys(ion_client, dst_handle, &dst_pa, &tmp_size);
-
-
- M4UMSG("ion alloced: pSrc=0x%p, pDst=0x%p, src_pa=0x%lu, dst_pa=0x%lu\n", pSrc, pDst, src_pa, dst_pa);
-
- port.ePortID = M4U_PORT_DISP_OVL0;
- port.Direction = 0;
- port.Distance = 1;
- port.domain = 3;
- port.Security = 0;
- port.Virtuality = 1;
- m4u_config_port(&port);
-
- port.ePortID = M4U_PORT_DISP_WDMA0;
- m4u_config_port(&port);
-
- m4u_monitor_start(0);
- __ddp_mem_test(pSrc, (void*)src_pa, pDst, (void*)dst_pa, 0);
- m4u_monitor_stop(0);
-
-
- ion_free(ion_client, src_handle);
- ion_free(ion_client, dst_handle);
-
- ion_client_destroy(ion_client);
-
-}
-#else
-#define m4u_test_ion(...)
-#endif
-
-static int m4u_debug_set(void *data, u64 val)
-{
- m4u_domain_t *domain = data;
-
- M4UMSG("m4u_debug_set:val=%llu\n", val);
-
- switch (val)
- {
- case 1:
- { //map4kpageonly
- struct sg_table table;
- struct sg_table *sg_table = &table;
- struct scatterlist *sg;
- int i;
- struct page *page;
- int page_num = 512;
- unsigned int mva = 0x4000;
-
- page = alloc_pages(GFP_KERNEL, get_order(page_num));
- sg_alloc_table(sg_table, page_num, GFP_KERNEL);
- for_each_sg(sg_table->sgl, sg, sg_table->nents, i)
- {
- sg_set_page(sg, page + i, PAGE_SIZE, 0);
- }
- m4u_map_sgtable(domain, mva, sg_table, page_num * PAGE_SIZE, M4U_PROT_WRITE | M4U_PROT_READ);
- m4u_dump_pgtable(domain, NULL);
- m4u_unmap(domain, mva, page_num * PAGE_SIZE);
- m4u_dump_pgtable(domain, NULL);
-
- sg_free_table(sg_table);
- __free_pages(page, get_order(page_num));
-
- }
- break;
- case 2:
- { //map64kpageonly
- struct sg_table table;
- struct sg_table *sg_table = &table;
- struct scatterlist *sg;
- int i;
- int page_num = 51;
- unsigned int page_size = SZ_64K;
- unsigned int mva = SZ_64K;
-
- sg_alloc_table(sg_table, page_num, GFP_KERNEL);
- for_each_sg(sg_table->sgl, sg, sg_table->nents, i)
- {
- sg_dma_address(sg) = page_size * (i + 1);
- sg_dma_len(sg) = page_size;
- }
-
- m4u_map_sgtable(domain, mva, sg_table, page_num * page_size, M4U_PROT_WRITE | M4U_PROT_READ);
- m4u_dump_pgtable(domain, NULL);
- m4u_unmap(domain, mva, page_num * page_size);
- m4u_dump_pgtable(domain, NULL);
- sg_free_table(sg_table);
- }
- break;
-
- case 3:
- { //map1Mpageonly
- struct sg_table table;
- struct sg_table *sg_table = &table;
- struct scatterlist *sg;
- int i;
- int page_num = 37;
- unsigned int page_size = SZ_1M;
- unsigned int mva = SZ_1M;
-
- sg_alloc_table(sg_table, page_num, GFP_KERNEL);
-
- for_each_sg(sg_table->sgl, sg, sg_table->nents, i)
- {
- sg_dma_address(sg) = page_size * (i + 1);
- sg_dma_len(sg) = page_size;
- }
- m4u_map_sgtable(domain, mva, sg_table, page_num * page_size, M4U_PROT_WRITE | M4U_PROT_READ);
- m4u_dump_pgtable(domain, NULL);
- m4u_unmap(domain, mva, page_num * page_size);
- m4u_dump_pgtable(domain, NULL);
-
- sg_free_table(sg_table);
-
- }
- break;
-
- case 4:
- { //map16Mpageonly
- struct sg_table table;
- struct sg_table *sg_table = &table;
- struct scatterlist *sg;
- int i;
- int page_num = 2;
- unsigned int page_size = SZ_16M;
- unsigned int mva = SZ_16M;
-
- sg_alloc_table(sg_table, page_num, GFP_KERNEL);
- for_each_sg(sg_table->sgl, sg, sg_table->nents, i)
- {
- sg_dma_address(sg) = page_size * (i + 1);
- sg_dma_len(sg) = page_size;
- }
- m4u_map_sgtable(domain, mva, sg_table, page_num * page_size, M4U_PROT_WRITE | M4U_PROT_READ);
- m4u_dump_pgtable(domain, NULL);
- m4u_unmap(domain, mva, page_num * page_size);
- m4u_dump_pgtable(domain, NULL);
- sg_free_table(sg_table);
- }
- break;
-
- case 5:
- { //mapmiscpages
- struct sg_table table;
- struct sg_table *sg_table = &table;
- struct scatterlist *sg;
- unsigned int mva = 0x4000;
- unsigned int size = SZ_16M * 2;
-
- sg_alloc_table(sg_table, 1, GFP_KERNEL);
- sg = sg_table->sgl;
- sg_dma_address(sg) = 0x4000;
- sg_dma_len(sg) = size;
-
- m4u_map_sgtable(domain, mva, sg_table, size, M4U_PROT_WRITE | M4U_PROT_READ);
- m4u_dump_pgtable(domain, NULL);
- m4u_unmap(domain, mva, size);
- m4u_dump_pgtable(domain, NULL);
- sg_free_table(sg_table);
-
- }
- break;
-
- case 6:
- {
- m4u_test_alloc_dealloc(1, SZ_4M);
- }
- break;
-
- case 7:
- {
- m4u_test_alloc_dealloc(2, SZ_4M);
- }
- break;
-
- case 8:
- {
- m4u_test_alloc_dealloc(3, SZ_4M);
- }
- break;
-
- case 9: //m4u_alloc_mvausingkmallocbuffer
- {
- m4u_test_reclaim(SZ_16K);
- m4u_mvaGraph_dump();
- }
- break;
-
- case 10:
- {
- unsigned int mva;
- mva = m4u_do_mva_alloc_fix(0x90000000, 0x10000000, NULL);
- M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
- mva = m4u_do_mva_alloc_fix(0xb0000000, 0x10000000, NULL);
- M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
- mva = m4u_do_mva_alloc_fix(0xa0000000, 0x10000000, NULL);
- M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
- mva = m4u_do_mva_alloc_fix(0xa4000000, 0x10000000, NULL);
- M4UINFO("mva alloc fix done:mva=0x%x\n", mva);
- m4u_mvaGraph_dump();
- m4u_do_mva_free(0x90000000, 0x10000000);
- m4u_do_mva_free(0xa0000000, 0x10000000);
- m4u_do_mva_free(0xb0000000, 0x10000000);
- m4u_mvaGraph_dump();
- }
- break;
-
- case 11: //map unmap kernel
- m4u_test_map_kernel();
- break;
-
- case 12:
- ddp_mem_test();
- break;
-
- case 13:
- m4u_test_ddp(M4U_PROT_READ|M4U_PROT_WRITE);
- break;
- case 14:
- m4u_test_tf(M4U_PROT_READ|M4U_PROT_WRITE);
- break;
- case 15:
- m4u_test_ion();
- break;
- case 16:
- m4u_dump_main_tlb(0, 0);
- break;
- case 17:
- m4u_dump_pfh_tlb(0);
- break;
- case 18:
- m4u_dump_main_tlb(1, 0);
- break;
- case 19:
- m4u_dump_pfh_tlb(1);
- break;
- case 20:
- {
- M4U_PORT_STRUCT rM4uPort;
- int i;
-
- rM4uPort.Virtuality = 1;
- rM4uPort.Security = 0;
- rM4uPort.Distance = 1;
- rM4uPort.Direction = 0;
- rM4uPort.domain = 3;
- for(i = 0; i< M4U_PORT_UNKNOWN;i++)
- {
- rM4uPort.ePortID = i;
- m4u_config_port(&rM4uPort);
- }
- }
- break;
- case 21:
- {
- M4U_PORT_STRUCT rM4uPort;
- int i;
-
- rM4uPort.Virtuality = 0;
- rM4uPort.Security = 0;
- rM4uPort.Distance = 1;
- rM4uPort.Direction = 0;
- rM4uPort.domain = 3;
- for(i = 0; i< M4U_PORT_UNKNOWN;i++)
- {
- rM4uPort.ePortID = i;
- m4u_config_port(&rM4uPort);
- }
- }
- break;
- case 22:
- {
- int i;
- unsigned int *pSrc;
- pSrc = vmalloc(128);
- memset(pSrc, 55, 128);
- m4u_cache_sync(NULL, 0, 0, 0, 0, M4U_CACHE_FLUSH_ALL);
-
- for(i=0;i<128/32;i+=32)
- {
- M4UMSG("+0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x \n", 8*i, pSrc[i], pSrc[i+1], pSrc[i+2], pSrc[i+3], pSrc[i+4],
- pSrc[i+5], pSrc[i+6], pSrc[i+7]);
- }
-
- }
- break;
- case 23:
- {
- void* pgd_va;
- void* pgd_pa;
- unsigned size;
- m4u_get_pgd(NULL, 0, &pgd_va, &pgd_pa, &size);
- M4UMSG("pgd_va:0x%p pgd_pa:0x%p, size: %d\n", pgd_va, pgd_pa, size);
- }
- break;
- case 24:
- {
- unsigned int *pSrc;
- unsigned int mva;
- unsigned long pa;
-
- m4u_client_t * client = m4u_create_client();
- pSrc = vmalloc(128);
- m4u_alloc_mva(client, M4U_PORT_DISP_OVL0, (unsigned long)pSrc, NULL,
- 128, 0, 0, &mva);
-
- m4u_dump_pgtable(domain, NULL);
-
- pa = m4u_mva_to_pa(NULL, 0, mva);
- m4u_dealloc_mva(client, M4U_PORT_DISP_OVL0, mva);
- M4UMSG("mva:0x%x pa:0x%lu\n", mva, pa);
- m4u_destroy_client(client);
- }
- break;
- case 25:
- {
- m4u_monitor_start(0);
- }
- break;
- case 26:
- {
- m4u_monitor_stop(0);
- }
- break;
- case 27:
- {
- m4u_dump_reg_for_smi_hang_issue();
- }
- break;
-#ifdef M4U_TEE_SERVICE_ENABLE
- case 50:
- {
- extern int m4u_sec_init(void);
- m4u_sec_init();
- }
- break;
- case 51:
- {
- extern int m4u_config_port_tee(M4U_PORT_STRUCT* pM4uPort);
- M4U_PORT_STRUCT port;
- memset(&port, 0, sizeof(M4U_PORT_STRUCT));
-
- port.ePortID = M4U_PORT_HW_VDEC_PP_EXT;
- M4UMSG("(1) config port: mmu: %d, sec: %d\n", port.Virtuality, port.Security);
- m4u_config_port_tee(&port);
- port.Security = 1;
- M4UMSG("(2) config port: mmu: %d, sec: %d\n", port.Virtuality, port.Security);
- m4u_config_port_tee(&port);
- }
- break;
-#endif
- default:
- M4UMSG("m4u_debug_set error,val=%llu\n", val);
- }
-
- return 0;
-}
-
-static int m4u_debug_get(void *data, u64 * val)
-{
- *val = 0;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(m4u_debug_fops, m4u_debug_get, m4u_debug_set, "%llu\n");
-
-
-static int m4u_log_level_set(void *data, u64 val)
-{
-
- gM4U_log_to_uart = (val & 0xf0)>>4;
- gM4U_log_level = val & 0xf;
- M4UMSG("gM4U_log_level: %d, gM4U_log_to_uart:%d\n", gM4U_log_level, gM4U_log_to_uart);
-
- return 0;
-}
-
-static int m4u_log_level_get(void *data, u64 * val)
-{
- *val = gM4U_log_level | (gM4U_log_to_uart<<4);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(m4u_log_level_fops, m4u_log_level_get, m4u_log_level_set, "%llu\n");
-
-static int m4u_debug_freemva_set(void *data, u64 val)
-{
- m4u_domain_t *domain = data;
- m4u_buf_info_t *pMvaInfo;
- unsigned int mva = (unsigned int)val;
- M4UMSG("free mva: 0x%x\n", mva);
- pMvaInfo = mva_get_priv(mva);
- if(pMvaInfo)
- {
- m4u_unmap(domain, mva, pMvaInfo->size);
- m4u_do_mva_free(mva, pMvaInfo->size);
- }
- return 0;
-}
-
-static int m4u_debug_freemva_get(void *data, u64 * val)
-{
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(m4u_debug_freemva_fops, m4u_debug_freemva_get, m4u_debug_freemva_set, "%llu\n");
-
-
-int m4u_debug_port_show(struct seq_file *s, void *unused)
-{
- m4u_print_port_status(s, 0);
- return 0;
-}
-
-int m4u_debug_port_open(struct inode *inode, struct file *file)
-{
- return single_open(file, m4u_debug_port_show, inode->i_private);
-}
-
-struct file_operations m4u_debug_port_fops = {
- .open = m4u_debug_port_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-
-int m4u_debug_buf_show(struct seq_file *s, void *unused)
-{
- m4u_dump_buf_info(s);
- return 0;
-}
-
-int m4u_debug_buf_open(struct inode *inode, struct file *file)
-{
- return single_open(file, m4u_debug_buf_show, inode->i_private);
-}
-
-struct file_operations m4u_debug_buf_fops = {
- .open = m4u_debug_buf_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-int m4u_debug_monitor_show(struct seq_file *s, void *unused)
-{
- m4u_print_perf_counter(0, 0, "monitor");
- return 0;
-}
-
-int m4u_debug_monitor_open(struct inode *inode, struct file *file)
-{
- return single_open(file, m4u_debug_monitor_show, inode->i_private);
-}
-
-struct file_operations m4u_debug_monitor_fops = {
- .open = m4u_debug_monitor_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-int m4u_debug_register_show(struct seq_file *s, void *unused)
-{
- m4u_dump_reg(0);
- return 0;
-}
-
-int m4u_debug_register_open(struct inode *inode, struct file *file)
-{
- return single_open(file, m4u_debug_register_show, inode->i_private);
-}
-
-struct file_operations m4u_debug_register_fops = {
- .open = m4u_debug_register_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-
-int m4u_debug_init(struct m4u_device *m4u_dev)
-{
- struct dentry * debug_file;
- m4u_domain_t *domain = m4u_get_domain_by_id(0);
-
-
- m4u_dev->debug_root = debugfs_create_dir("m4u", NULL);
-
- if (IS_ERR_OR_NULL(m4u_dev->debug_root))
- M4UMSG("m4u: failed to create debug dir.\n");
-
- debug_file = debugfs_create_file("buffer", 0644, m4u_dev->debug_root, domain, &m4u_debug_buf_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 1.\n");
-
- debug_file = debugfs_create_file("debug", 0644, m4u_dev->debug_root, domain, &m4u_debug_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 2.\n");
-
- debug_file = debugfs_create_file("port", 0644, m4u_dev->debug_root, domain, &m4u_debug_port_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 3.\n");
-
- debug_file = debugfs_create_file("log_level", 0644, m4u_dev->debug_root, domain, &m4u_log_level_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 4.\n");
-
- debug_file = debugfs_create_file("monitor", 0644, m4u_dev->debug_root, domain, &m4u_debug_monitor_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 5.\n");
-
- debug_file = debugfs_create_file("register", 0644, m4u_dev->debug_root, domain, &m4u_debug_register_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 6.\n");
-
- debug_file = debugfs_create_file("freemva", 0644, m4u_dev->debug_root, domain, &m4u_debug_freemva_fops);
- if (IS_ERR_OR_NULL(debug_file))
- M4UMSG("m4u: failed to create debug files 7.\n");
-
-
- return 0;
-}
-
-
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_hw.c b/drivers/misc/mediatek/m4u/mt6735/m4u_hw.c
index d3c280332..508304493 100644
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_hw.c
+++ b/drivers/misc/mediatek/m4u/mt6735/m4u_hw.c
@@ -1,7 +1,5 @@
-#include <mach/mt_irq.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <mach/mt_clkmgr.h>
#include "m4u_priv.h"
#include "m4u_hw.h"
@@ -11,18 +9,11 @@
static m4u_domain_t gM4uDomain;
-extern unsigned int gM4UTagCount[];
-extern const char* gM4U_SMILARB[];
-extern M4U_RANGE_DES_T gM4u0_seq[];
-extern M4U_RANGE_DES_T *gM4USeq[];
-extern m4u_port_t gM4uPort[];
-
-extern struct m4u_device *gM4uDev;
static unsigned long gM4UBaseAddr[TOTAL_M4U_NUM];
static unsigned long gLarbBaseAddr[SMI_LARB_NR];
static unsigned long gPericfgBaseAddr;
-static M4U_MAU_STATUS_T gM4u0_mau[M4U0_MAU_NR] = {{0}};
+static M4U_MAU_STATUS_T gM4u0_mau[M4U0_MAU_NR] = {{0} };
static unsigned int gMAU_candidate_id = M4U0_MAU_NR - 1;
static DEFINE_MUTEX(gM4u_seq_mutex);
@@ -30,895 +21,795 @@ static DEFINE_MUTEX(gM4u_seq_mutex);
int gM4U_L2_enable = 1;
int gM4U_4G_DRAM_Mode = 0;
-
static spinlock_t gM4u_reg_lock;
int gM4u_port_num = M4U_PORT_UNKNOWN;
-#ifdef M4U_TEE_SERVICE_ENABLE
-extern int m4u_tee_en;
-#endif
-
-int m4u_invalid_tlb(int m4u_id,int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end)
+int m4u_invalid_tlb(int m4u_id, int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end)
{
- unsigned int reg = 0;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+ unsigned int reg = 0;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- if(mva_start>=mva_end)
- isInvAll = 1;
-
- if(!isInvAll)
- {
- mva_start = round_down(mva_start, SZ_4K);
- mva_end = round_up(mva_end, SZ_4K);
- }
-
- if(L2_en)
- reg = F_MMU_INV_EN_L2;
-
- reg |= F_MMU_INV_EN_L1;
-
- M4U_WriteReg32(m4u_base, REG_INVLID_SEL, reg);
-
- if(isInvAll)
- {
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_ALL);
- }
- else
- {
- /*
- unsigned int type_start = m4u_get_pt_type(gPgd_nonsec, mva_start);
- unsigned int type_end = m4u_get_pt_type(gPgd_nonsec, mva_end);
- unsigned int type = max(type_start, type_end);
- unsigned int alignment;
- if(type > MMU_PT_TYPE_SUPERSECTION)
- type = MMU_PT_TYPE_SUPERSECTION;
- alignment = m4u_get_pt_type_size(type) - 1;
-
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start & (~alignment));
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end | alignment);
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
- */
-
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start);
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end);
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
- }
-
- if(!isInvAll)
- {
- while(!M4U_ReadReg32(m4u_base, REG_MMU_CPE_DONE));
- M4U_WriteReg32(m4u_base, REG_MMU_CPE_DONE, 0);
- }
+ if (mva_start >= mva_end)
+ isInvAll = 1;
- return 0;
-
+ if (!isInvAll) {
+ mva_start = round_down(mva_start, SZ_4K);
+ mva_end = round_up(mva_end, SZ_4K);
+ }
+
+ if (L2_en)
+ reg = F_MMU_INV_EN_L2;
+
+ reg |= F_MMU_INV_EN_L1;
+
+ M4U_WriteReg32(m4u_base, REG_INVLID_SEL, reg);
+
+ if (isInvAll)
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_ALL);
+ else {
+ /*
+ unsigned int type_start = m4u_get_pt_type(gPgd_nonsec, mva_start);
+ unsigned int type_end = m4u_get_pt_type(gPgd_nonsec, mva_end);
+ unsigned int type = max(type_start, type_end);
+ unsigned int alignment;
+ if(type > MMU_PT_TYPE_SUPERSECTION)
+ type = MMU_PT_TYPE_SUPERSECTION;
+ alignment = m4u_get_pt_type_size(type) - 1;
+
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start & (~alignment));
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end | alignment);
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
+ */
+
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA , mva_start);
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end);
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
+ }
+
+ if (!isInvAll) {
+ while (!M4U_ReadReg32(m4u_base, REG_MMU_CPE_DONE))
+ ;
+ M4U_WriteReg32(m4u_base, REG_MMU_CPE_DONE, 0);
+ }
+
+ return 0;
}
static void m4u_invalid_tlb_all(int m4u_id)
{
- m4u_invalid_tlb(m4u_id, gM4U_L2_enable, 1, 0, 0);
+ m4u_invalid_tlb(m4u_id, gM4U_L2_enable, 1, 0, 0);
}
-void m4u_invalid_tlb_by_range(m4u_domain_t *m4u_domain,unsigned int mva_start,unsigned int mva_end)
+void m4u_invalid_tlb_by_range(m4u_domain_t *m4u_domain, unsigned int mva_start, unsigned int mva_end)
{
- int i;
- //to-do: should get m4u connected to domain here
- for(i=0; i<TOTAL_M4U_NUM; i++)
- m4u_invalid_tlb(i, gM4U_L2_enable, 0, mva_start, mva_end);
- //m4u_invalid_tlb_all(0);
- //m4u_invalid_tlb_all(1);
+ int i;
+ /* to-do: should get m4u connected to domain here */
+ for (i = 0; i < TOTAL_M4U_NUM; i++)
+ m4u_invalid_tlb(i, gM4U_L2_enable, 0, mva_start, mva_end);
+ /* m4u_invalid_tlb_all(0); */
+ /* m4u_invalid_tlb_all(1); */
}
-
-void m4u_invalid_tlb_sec(int m4u_id,int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end)
+void m4u_invalid_tlb_sec(int m4u_id, int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end)
{
- unsigned int reg = 0;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+ unsigned int reg = 0;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- if(mva_start>=mva_end)
- isInvAll = 1;
-
- if(!isInvAll)
- {
- mva_start = round_down(mva_start, SZ_4K);
- mva_end = round_up(mva_end, SZ_4K);
- }
-
- reg = F_MMU_INV_SEC_EN_L2;
- reg |= F_MMU_INV_SEC_EN_L1;
-
- M4U_WriteReg32(m4u_base, REG_INVLID_SEL_SEC, reg);
-
- if(isInvAll)
- {
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SEC, F_MMU_INV_SEC_ALL);
- }
- else
- {
- /*
- unsigned int type_start = m4u_get_pt_type(gPgd_nonsec, mva_start);
- unsigned int type_end = m4u_get_pt_type(gPgd_nonsec, mva_end);
- unsigned int type = max(type_start, type_end);
- unsigned int alignment;
- if(type > MMU_PT_TYPE_SUPERSECTION)
- type = MMU_PT_TYPE_SUPERSECTION;
- alignment = m4u_get_pt_type_size(type) - 1;
-
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start & (~alignment));
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end | alignment);
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
- */
-
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA_SEC ,mva_start);
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA_SEC, mva_end);
- M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SEC, F_MMU_INV_SEC_RANGE);
- }
-
- if(!isInvAll)
- {
- while(!M4U_ReadReg32(m4u_base, REG_MMU_CPE_DONE_SEC));
- M4U_WriteReg32(m4u_base, REG_MMU_CPE_DONE_SEC, 0);
- }
-}
+ if (mva_start >= mva_end)
+ isInvAll = 1;
+
+ if (!isInvAll) {
+ mva_start = round_down(mva_start, SZ_4K);
+ mva_end = round_up(mva_end, SZ_4K);
+ }
+
+ reg = F_MMU_INV_SEC_EN_L2;
+ reg |= F_MMU_INV_SEC_EN_L1;
+
+ M4U_WriteReg32(m4u_base, REG_INVLID_SEL_SEC, reg);
+
+ if (isInvAll)
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SEC, F_MMU_INV_SEC_ALL);
+ else {
+ /*
+ unsigned int type_start = m4u_get_pt_type(gPgd_nonsec, mva_start);
+ unsigned int type_end = m4u_get_pt_type(gPgd_nonsec, mva_end);
+ unsigned int type = max(type_start, type_end);
+ unsigned int alignment;
+ if(type > MMU_PT_TYPE_SUPERSECTION)
+ type = MMU_PT_TYPE_SUPERSECTION;
+ alignment = m4u_get_pt_type_size(type) - 1;
+
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA ,mva_start & (~alignment));
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA, mva_end | alignment);
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD, F_MMU_INV_RANGE);
+ */
+
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SA_SEC , mva_start);
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_EA_SEC, mva_end);
+ M4U_WriteReg32(m4u_base, REG_MMU_INVLD_SEC, F_MMU_INV_SEC_RANGE);
+ }
+ if (!isInvAll) {
+ while (!M4U_ReadReg32(m4u_base, REG_MMU_CPE_DONE_SEC))
+ ;
+ M4U_WriteReg32(m4u_base, REG_MMU_CPE_DONE_SEC, 0);
+ }
+}
void m4u_invalid_tlb_sec_by_range(int m4u_id,
- unsigned int mva_start,
- unsigned int mva_end)
+ unsigned int mva_start,
+ unsigned int mva_end)
{
- m4u_invalid_tlb_sec(m4u_id, gM4U_L2_enable, 0, mva_start, mva_end);
+ m4u_invalid_tlb_sec(m4u_id, gM4U_L2_enable, 0, mva_start, mva_end);
}
-
-
static int __m4u_dump_rs_info(unsigned int va[], unsigned int pa[], unsigned int st[], unsigned int pte[])
{
- int i;
-
- M4ULOG_MID("m4u dump RS information =====>\n");
- M4ULOG_MID("id mva valid port-id pa pte larb w/r other-status \n");
- for(i=0; i<MMU_TOTAL_RS_NR; i++)
- {
- M4ULOG_MID("%d: 0x%8x %5d 0x%3x 0x%8x 0x%8x %d %d 0x%3x\n", i,
- F_MMU_RSx_VA_GET(va[i]), F_MMU_RSx_VA_VALID(va[i]),
- F_MMU_RSx_VA_PID(va[i]), pa[i], pte[i], F_MMU_RSx_ST_LID(st[i]),
- F_MMU_RSx_ST_WRT(st[i]), F_MMU_RSx_ST_OTHER(st[i])
- );
- }
- M4ULOG_MID("m4u dump RS information done =====>\n");
- return 0;
+ int i;
+
+ M4ULOG_MID("m4u dump RS information =====>\n");
+ M4ULOG_MID("id mva valid port-id pa pte larb w/r other-status\n");
+ for (i = 0; i < MMU_TOTAL_RS_NR; i++) {
+ M4ULOG_MID("%d: 0x%8x %5d 0x%3x 0x%8x 0x%8x %d %d 0x%3x\n", i,
+ F_MMU_RSx_VA_GET(va[i]), F_MMU_RSx_VA_VALID(va[i]),
+ F_MMU_RSx_VA_PID(va[i]), pa[i], pte[i], F_MMU_RSx_ST_LID(st[i]),
+ F_MMU_RSx_ST_WRT(st[i]), F_MMU_RSx_ST_OTHER(st[i]));
+ }
+ M4ULOG_MID("m4u dump RS information done =====>\n");
+ return 0;
}
static int m4u_dump_rs_info(int m4u_index, int m4u_slave_id)
{
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
- int i;
- unsigned int va[MMU_TOTAL_RS_NR], pa[MMU_TOTAL_RS_NR], st[MMU_TOTAL_RS_NR], pte[MMU_TOTAL_RS_NR];
-
- for(i=0; i<MMU_TOTAL_RS_NR; i++)
- {
- va[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_VA(m4u_slave_id, i)));
- pa[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_PA(m4u_slave_id, i)));
- st[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_ST(m4u_slave_id, i)));
- pte[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_2ND_BASE(m4u_slave_id, i)));
- }
-
- M4ULOG_MID("m4u dump RS information index: %d=====>\n", m4u_slave_id);
- __m4u_dump_rs_info(va, pa, st, pte);
- M4ULOG_MID("m4u dump RS information done =====>\n");
- return 0;
-}
+ unsigned long m4u_base = gM4UBaseAddr[m4u_index];
+ int i;
+ unsigned int va[MMU_TOTAL_RS_NR], pa[MMU_TOTAL_RS_NR], st[MMU_TOTAL_RS_NR], pte[MMU_TOTAL_RS_NR];
+
+ for (i = 0; i < MMU_TOTAL_RS_NR; i++) {
+ va[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_VA(m4u_slave_id, i)));
+ pa[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_PA(m4u_slave_id, i)));
+ st[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_ST(m4u_slave_id, i)));
+ pte[i] = COM_ReadReg32((m4u_base+REG_MMU_RSx_2ND_BASE(m4u_slave_id, i)));
+ }
+ M4ULOG_MID("m4u dump RS information index: %d=====>\n", m4u_slave_id);
+ __m4u_dump_rs_info(va, pa, st, pte);
+ M4ULOG_MID("m4u dump RS information done =====>\n");
+ return 0;
+}
static inline void m4u_clear_intr(unsigned int m4u_id)
{
- m4uHw_set_field_by_mask(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, F_INT_L2_CLR_BIT, F_INT_L2_CLR_BIT);
+ m4uHw_set_field_by_mask(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, F_INT_L2_CLR_BIT, F_INT_L2_CLR_BIT);
}
+
static inline void m4u_enable_intr(unsigned int m4u_id)
{
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, 0x6f);
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_MAIN_CONTROL, 0xffffffff);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, 0x6f);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_MAIN_CONTROL, 0xffffffff);
}
+
static inline void m4u_disable_intr(unsigned int m4u_id)
{
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, 0);
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_MAIN_CONTROL, 0);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_L2_CONTROL, 0);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_INT_MAIN_CONTROL, 0);
}
+
static inline void m4u_intr_modify_all(unsigned long enable)
{
- int i;
- for(i=0;i<TOTAL_M4U_NUM;i++)
- {
- if(enable)
- m4u_enable_intr(i);
- else
- m4u_disable_intr(i);
- }
-}
-
-
-struct mau_config_info
-{
- int m4u_id;
- int m4u_slave_id;
- int mau_set;
- unsigned int start;
- unsigned int end;
- unsigned int port_mask;
- unsigned int larb_mask;
- unsigned int write_monitor;//:1;
- unsigned int virt;//:1;
- unsigned int io;//:1;
- unsigned int start_bit32;//:1;
- unsigned int end_bit32;//:1;
-
+ int i;
+
+ for (i = 0; i < TOTAL_M4U_NUM; i++)
+ if (enable)
+ m4u_enable_intr(i);
+ else
+ m4u_disable_intr(i);
+}
+
+struct mau_config_info {
+ int m4u_id;
+ int m4u_slave_id;
+ int mau_set;
+ unsigned int start;
+ unsigned int end;
+ unsigned int port_mask;
+ unsigned int larb_mask;
+ unsigned int write_monitor;/* :1; */
+ unsigned int virt;/* :1; */
+ unsigned int io;/* :1; */
+ unsigned int start_bit32;/* :1; */
+ unsigned int end_bit32;/* :1; */
};
/***********************************************************/
-/**
+/**
* @param m4u_id -- IOMMU main id
* @param m4u_slave_id -- IOMMU slave id
* @param mau_set -- mau set/entry (3 mau set per iommu)
* @param wr -- write monitor enable: 0 for read, 1 for write
- NOTES: cannot monitor read and write using one mau set!!
+ NOTES: cannot monitor read and write using one mau set!!
* @param vir -- virtual monitor enable ? (if enable we will monitor mva, or else monitor PA)
* @param io -- I/O use mau at input or output of RS. 0 for input, 1 for output
- input: mau @ RS input, can monitor mva or pa (bypass m4u);
- output:mau @ RS output, can monitor pa to emi(bypass m4u, or after mva translation)
-* @param bit32 -- enable bit32 monitor?
+ input: mau @ RS input, can monitor mva or pa (bypass m4u);
+ output:mau @ RS output, can monitor pa to emi(bypass m4u, or after mva translation)
+* @param bit32 -- enable bit32 monitor?
* @param start -- start address of monitor (can be any address without alignment)
* @param end -- end address of monitor (can be any address without alignment)
* @param port -- port mask or AXI_ID[4:0] mask
* @param larb -- larb[0..7] mask or AXI_ID[7:5] mask
*
-* @return
-* @remark
- monitor range is [start, end)
-* @see
+* @return
+* @remark
+ monitor range is [start, end)
+* @see
* @author K Zhang @date 2013/11/13
************************************************************/
int mau_start_monitor(int m4u_id, int m4u_slave_id, int mau_set,
- int wr, int vir, int io, int bit32,
- unsigned int start, unsigned int end, unsigned int port_mask, unsigned int larb_mask)
+ int wr, int vir, int io, int bit32,
+ unsigned int start, unsigned int end, unsigned int port_mask, unsigned int larb_mask)
{
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- if(0 == m4u_base)
- return -1;
-
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_START(m4u_slave_id, mau_set), start);
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set), !!(bit32));
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_END(m4u_slave_id, mau_set), end);
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave_id, mau_set), !!(bit32));
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave_id, mau_set), port_mask);
+ if (0 == m4u_base)
+ return -1;
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave_id),
- F_MAU_LARB_MSK(mau_set), F_MAU_LARB_VAL(mau_set, larb_mask));
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_START(m4u_slave_id, mau_set), start);
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set), !!(bit32));
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_END(m4u_slave_id, mau_set), end);
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave_id, mau_set), !!(bit32));
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_IO(m4u_slave_id),
- F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(io, mau_set));
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave_id, mau_set), port_mask);
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_RW(m4u_slave_id),
- F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(wr, mau_set));
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave_id),
+ F_MAU_LARB_MSK(mau_set), F_MAU_LARB_VAL(mau_set, larb_mask));
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_VA(m4u_slave_id),
- F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(vir, mau_set));
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_IO(m4u_slave_id),
+ F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(io, mau_set));
- return 0;
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_RW(m4u_slave_id),
+ F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(wr, mau_set));
+
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_MAU_VA(m4u_slave_id),
+ F_MAU_BIT_VAL(1, mau_set), F_MAU_BIT_VAL(vir, mau_set));
+
+ return 0;
}
int config_mau(M4U_MAU_STRUCT mau)
{
- int i;
- int free_id = -1;
- int m4u_id = m4u_port_2_m4u_id(mau.port);
- int larb = m4u_port_2_larb_id(mau.port);
- unsigned int MVAStart = mau.mva;
- unsigned int MVAEnd = mau.mva + mau.size;
-
- if(0 != m4u_id)
- return -1;
-
- for(i=0; i<M4U0_MAU_NR; i++)
- {
- if(0!=gM4u0_mau[i].Enabled)
- {
- if(MVAStart >= gM4u0_mau[i].MVAStart && MVAEnd <= gM4u0_mau[i].MVAEnd) //no overlap
- {
- if(mau.enable == 0)
- {
- gM4u0_mau[i].Enabled = 0;
- mau_start_monitor(0, 0, i, 0, 0, 0, 0, 0, 0, 0, 0);
- continue;
- }
- }
- }
- else
- {
- free_id = i;
- }
- }
-
- if(mau.enable == 0)
- {
- return 0;
- }
-
- if(free_id == -1)
- {
- if(mau.force == 0)
- return -1;
- else
- {
- free_id = gMAU_candidate_id;
- if(0 == gMAU_candidate_id)
- {
- gMAU_candidate_id = M4U0_MAU_NR -1;
- }
- else
- {
- gMAU_candidate_id--;
- }
- }
- }
-
- gM4u0_mau[free_id].Enabled = 1;
- gM4u0_mau[free_id].MVAStart = MVAStart;
- gM4u0_mau[free_id].MVAEnd = MVAEnd;
- gM4u0_mau[free_id].port = mau.port;
-
- mau_start_monitor(m4u_id, larb_2_m4u_slave_id(larb), free_id, (int)mau.write, 1, 0, 0, MVAStart, MVAEnd, 1 << m4u_port_2_larb_port(mau.port), 1 << larb);
- return free_id;
-}
-
-//notes: you must fill cfg->m4u_id/m4u_slave_id/mau_set before call this func.
-int mau_get_config_info(struct mau_config_info * cfg)
-{
- int m4u_id = cfg->m4u_id;
- int m4u_slave_id = cfg->m4u_slave_id;
- int mau_set = cfg->mau_set;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
-
- cfg->start = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START(m4u_slave_id, mau_set));
- cfg->end = M4U_ReadReg32(m4u_base, REG_MMU_MAU_END(m4u_slave_id, mau_set));
- cfg->start_bit32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set));
- cfg->end_bit32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set));
- cfg->port_mask = M4U_ReadReg32(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave_id, mau_set));
- cfg->larb_mask = m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave_id),F_MAU_LARB_MSK(mau_set));
+ int i;
+ int free_id = -1;
+ int m4u_id = m4u_port_2_m4u_id(mau.port);
+ int larb = m4u_port_2_larb_id(mau.port);
+ unsigned int MVAStart = mau.mva;
+ unsigned int MVAEnd = mau.mva + mau.size;
+ int port_id = m4u_port_2_larb_port(mau.port);
+
+ if (0 != m4u_id)
+ return -1;
+
+ if (port_id >= M4U_PORT_UNKNOWN || larb == -1)
+ return -1;
+
+ for (i = 0; i < M4U0_MAU_NR; i++) {
+ if (0 != gM4u0_mau[i].Enabled) {
+ if (MVAStart >= gM4u0_mau[i].MVAStart && MVAEnd <= gM4u0_mau[i].MVAEnd) { /* no overlap */
+ if (mau.enable == 0) {
+ gM4u0_mau[i].Enabled = 0;
+ mau_start_monitor(0, 0, i, 0, 0, 0, 0, 0, 0, 0, 0);
+ continue;
+ }
+ }
+ } else {
+ free_id = i;
+ }
+ }
- cfg->io = !!(m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_IO(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set)));
+ if (mau.enable == 0)
+ return 0;
- cfg->write_monitor = !!m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_RW(m4u_slave_id),F_MAU_BIT_VAL(1, mau_set));
+ if (free_id == -1) {
+ if (mau.force == 0)
+ return -1;
+ }
+ else {
+ free_id = gMAU_candidate_id;
+ if (0 == gMAU_candidate_id)
+ gMAU_candidate_id = M4U0_MAU_NR - 1;
+ else
+ gMAU_candidate_id--;
+ }
- cfg->virt = !!m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_VA(m4u_slave_id),F_MAU_BIT_VAL(1, mau_set));
+ gM4u0_mau[free_id].Enabled = 1;
+ gM4u0_mau[free_id].MVAStart = MVAStart;
+ gM4u0_mau[free_id].MVAEnd = MVAEnd;
+ gM4u0_mau[free_id].port = mau.port;
- return 0;
+ mau_start_monitor(m4u_id, larb_2_m4u_slave_id(larb), free_id, (int)mau.write,
+ 1, 0, 0, MVAStart, MVAEnd, 1 << port_id, 1 << larb);
+ return free_id;
}
+/* notes: you must fill cfg->m4u_id/m4u_slave_id/mau_set before call this func. */
+int mau_get_config_info(struct mau_config_info *cfg)
+{
+ int m4u_id = cfg->m4u_id;
+ int m4u_slave_id = cfg->m4u_slave_id;
+ int mau_set = cfg->mau_set;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+
+ cfg->start = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START(m4u_slave_id, mau_set));
+ cfg->end = M4U_ReadReg32(m4u_base, REG_MMU_MAU_END(m4u_slave_id, mau_set));
+ cfg->start_bit32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set));
+ cfg->end_bit32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave_id, mau_set));
+ cfg->port_mask = M4U_ReadReg32(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave_id, mau_set));
+ cfg->larb_mask = m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave_id), F_MAU_LARB_MSK(mau_set));
+
+ cfg->io = !!(m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_IO(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set)));
+
+ cfg->write_monitor =
+ !!m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_RW(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set));
+
+ cfg->virt = !!m4uHw_get_field_by_mask(m4u_base, REG_MMU_MAU_VA(m4u_slave_id), F_MAU_BIT_VAL(1, mau_set));
+
+ return 0;
+}
int __mau_dump_status(int m4u_id, int m4u_slave_id, int mau)
{
- unsigned long m4u_base;
- unsigned int status;
- unsigned int assert_id, assert_addr, assert_b32;
- int larb, port;
- struct mau_config_info mau_cfg;
-
- m4u_base = gM4UBaseAddr[m4u_id];
- status = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ASSERT_ST(m4u_slave_id));
-
- if (status & (1 << mau))
- {
- M4ULOG_HIGH("mau_assert in set %d\n", mau);
- assert_id = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ASSERT_ID(m4u_slave_id, mau));
- assert_addr = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ADDR(m4u_slave_id, mau));
- assert_b32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ADDR_BIT32(m4u_slave_id, mau));
- larb = F_MMU_MAU_ASSERT_ID_LARB(assert_id);
- port = F_MMU_MAU_ASSERT_ID_PORT(assert_id);
- M4ULOG_HIGH("id=0x%x(%s),addr=0x%x,b32=0x%x\n", assert_id,
- m4u_get_port_name(larb_port_2_m4u_port(larb, port)),assert_addr, assert_b32);
-
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_CLR(m4u_slave_id), (1 << mau));
- M4U_WriteReg32(m4u_base, REG_MMU_MAU_CLR(m4u_slave_id), 0);
-
- mau_cfg.m4u_id = m4u_id;
- mau_cfg.m4u_slave_id = m4u_slave_id;
- mau_cfg.mau_set = mau;
- mau_get_config_info(&mau_cfg);
- M4ULOG_HIGH("mau_cfg: start=0x%x,end=0x%x,virt(%d),io(%d),wr(%d),s_b32(%d),e_b32(%d)\n",
- mau_cfg.start, mau_cfg.end, mau_cfg.virt, mau_cfg.io,
- mau_cfg.write_monitor, mau_cfg.start_bit32, mau_cfg.end_bit32);
-
- }
- else
- {
- M4ULOG_MID("mau no assert in set %d\n", mau);
- }
+ unsigned long m4u_base;
+ unsigned int status;
+ unsigned int assert_id, assert_addr, assert_b32;
+ int larb, port;
+ struct mau_config_info mau_cfg;
+
+ m4u_base = gM4UBaseAddr[m4u_id];
+ status = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ASSERT_ST(m4u_slave_id));
+
+ if (status & (1 << mau)) {
+ M4ULOG_HIGH("mau_assert in set %d\n", mau);
+ assert_id = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ASSERT_ID(m4u_slave_id, mau));
+ assert_addr = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ADDR(m4u_slave_id, mau));
+ assert_b32 = M4U_ReadReg32(m4u_base, REG_MMU_MAU_ADDR_BIT32(m4u_slave_id, mau));
+ larb = F_MMU_MAU_ASSERT_ID_LARB(assert_id);
+ port = F_MMU_MAU_ASSERT_ID_PORT(assert_id);
+ M4ULOG_HIGH("id=0x%x(%s),addr=0x%x,b32=0x%x\n", assert_id,
+ m4u_get_port_name(larb_port_2_m4u_port(larb, port)), assert_addr, assert_b32);
+
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_CLR(m4u_slave_id), (1 << mau));
+ M4U_WriteReg32(m4u_base, REG_MMU_MAU_CLR(m4u_slave_id), 0);
+
+ mau_cfg.m4u_id = m4u_id;
+ mau_cfg.m4u_slave_id = m4u_slave_id;
+ mau_cfg.mau_set = mau;
+ mau_get_config_info(&mau_cfg);
+ M4ULOG_HIGH("mau_cfg: start=0x%x,end=0x%x,virt(%d),io(%d),wr(%d),s_b32(%d),e_b32(%d)\n",
+ mau_cfg.start, mau_cfg.end, mau_cfg.virt, mau_cfg.io,
+ mau_cfg.write_monitor, mau_cfg.start_bit32, mau_cfg.end_bit32);
+ } else
+ M4ULOG_MID("mau no assert in set %d\n", mau);
- return 0;
+ return 0;
}
int mau_dump_status(int m4u_id, int m4u_slave_id)
{
- int i;
+ int i;
- for (i = 0; i < MAU_NR_PER_M4U_SLAVE; i++)
- {
- __mau_dump_status(m4u_id, m4u_slave_id, i);
- }
+ for (i = 0; i < MAU_NR_PER_M4U_SLAVE; i++)
+ __mau_dump_status(m4u_id, m4u_slave_id, i);
- return 0;
+ return 0;
}
-int m4u_dump_reg(int m4u_index)
+int m4u_dump_reg(int m4u_index, unsigned int start)
{
- int i;
- M4UINFO("Register Start ======= \n");
- for(i=0;i<368/8;i+=4)
- {
- M4UINFO("+0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x \n", 8*i,
- M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*0), M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*1),
- M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*2), M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*3),
- M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*4), M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*5),
- M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*6), M4U_ReadReg32(gM4UBaseAddr[m4u_index], 8*i + 4*7));
- }
- M4UINFO("Register End ========== \n");
+ int i;
- return 0;
+ M4UINFO("Register Start =======\n");
+ for (i = 0; i < 368 / 8; i += 4) {
+ M4UINFO("+0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", start + 8 * i,
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 0),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 1),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 2),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 3),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 4),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 5),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 6),
+ M4U_ReadReg32(gM4UBaseAddr[m4u_index], start + 8 * i + 4 * 7));
+ }
+ M4UINFO("Register End ==========\n");
+
+ return 0;
}
-unsigned int m4u_get_main_descriptor(int m4u_id,int m4u_slave_id, int idx)
+unsigned int m4u_get_main_descriptor(int m4u_id, int m4u_slave_id, int idx)
{
- unsigned int regValue=0;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- regValue = F_READ_ENTRY_EN \
- | F_READ_ENTRY_MMx_MAIN(m4u_slave_id) \
- | F_READ_ENTRY_MAIN_IDX(idx);
-
-
- M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue);
- while(M4U_ReadReg32(m4u_base, REG_MMU_READ_ENTRY)&F_READ_ENTRY_EN);
- return M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA);
+ unsigned int regValue = 0;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+
+ regValue = F_READ_ENTRY_EN
+ | F_READ_ENTRY_MMx_MAIN(m4u_slave_id)
+ | F_READ_ENTRY_MAIN_IDX(idx);
+
+ M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue);
+ while (M4U_ReadReg32(m4u_base, REG_MMU_READ_ENTRY)&F_READ_ENTRY_EN)
+ ;
+ return M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA);
}
+
unsigned int m4u_get_main_tag(int m4u_id, int m4u_slave_id, int idx)
{
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- return M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(m4u_slave_id, idx));
-
+ return M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(m4u_slave_id, idx));
}
void m4u_get_main_tlb(int m4u_id, int m4u_slave_id, int idx, mmu_tlb_t *pTlb)
{
- pTlb->tag = m4u_get_main_tag(m4u_id, m4u_slave_id, idx);
- pTlb->desc = m4u_get_main_descriptor(m4u_id, m4u_slave_id, idx);
+ pTlb->tag = m4u_get_main_tag(m4u_id, m4u_slave_id, idx);
+ pTlb->desc = m4u_get_main_descriptor(m4u_id, m4u_slave_id, idx);
}
-
unsigned int m4u_get_pfh_tlb(int m4u_id, int set, int page, int way, mmu_tlb_t *pTlb)
{
- unsigned int regValue=0;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
-
- regValue = F_READ_ENTRY_EN \
- | F_READ_ENTRY_PFH \
- | F_READ_ENTRY_PFH_IDX(set) \
- | F_READ_ENTRY_PFH_PAGE_IDX(page) \
- | F_READ_ENTRY_PFH_WAY(way);
-
- M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue);
- while(M4U_ReadReg32(m4u_base, REG_MMU_READ_ENTRY)&F_READ_ENTRY_EN);
- pTlb->desc = M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA);
- pTlb->tag = M4U_ReadReg32(m4u_base, REG_MMU_PFH_TAG_RDATA);
-
- return 0;
+ unsigned int regValue = 0;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+
+ regValue = F_READ_ENTRY_EN
+ | F_READ_ENTRY_PFH
+ | F_READ_ENTRY_PFH_IDX(set)
+ | F_READ_ENTRY_PFH_PAGE_IDX(page)
+ | F_READ_ENTRY_PFH_WAY(way);
+
+ M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue);
+ while (M4U_ReadReg32(m4u_base, REG_MMU_READ_ENTRY)&F_READ_ENTRY_EN)
+ ;
+ pTlb->desc = M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA);
+ pTlb->tag = M4U_ReadReg32(m4u_base, REG_MMU_PFH_TAG_RDATA);
+
+ return 0;
}
+
unsigned int m4u_get_pfh_tag(int m4u_id, int set, int page, int way)
{
- mmu_tlb_t tlb;
- m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
- return tlb.tag;
+ mmu_tlb_t tlb;
+
+ m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
+ return tlb.tag;
}
unsigned int m4u_get_pfh_descriptor(int m4u_id, int set, int page, int way)
{
- mmu_tlb_t tlb;
- m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
- return tlb.desc;
-}
+ mmu_tlb_t tlb;
+ m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
+ return tlb.desc;
+}
-int m4u_dump_main_tlb(int m4u_id, int m4u_slave_id)
+int m4u_dump_main_tlb(int m4u_id, int m4u_slave_id)
{
- // M4U related
- unsigned int i=0;
- mmu_tlb_t tlb;
- M4ULOG_HIGH("dump main tlb: m4u %d ====>\n", m4u_id);
- for(i=0;i<gM4UTagCount[m4u_id];i++)
- {
- m4u_get_main_tlb(m4u_id, m4u_slave_id, i, &tlb);
- printk("%d:0x%x:0x%x ", i, tlb.tag, tlb.desc);
- if((i+1)%8==0)
- printk("===\n");
- }
+ /* M4U related */
+ unsigned int i = 0;
+ mmu_tlb_t tlb;
- return 0;
+ M4ULOG_HIGH("dump main tlb: m4u %d ====>\n", m4u_id);
+ for (i = 0; i < gM4UTagCount[m4u_id]; i++) {
+ m4u_get_main_tlb(m4u_id, m4u_slave_id, i, &tlb);
+ M4ULOG_HIGH("%d:0x%x:0x%x ", i, tlb.tag, tlb.desc);
+ if ((i+1)%8 == 0)
+ M4ULOG_HIGH("===\n");
+ }
+
+ return 0;
}
int m4u_dump_invalid_main_tlb(int m4u_id, int m4u_slave_id)
{
- unsigned int i=0;
- mmu_tlb_t tlb;
- M4UMSG("dump inv main tlb=>\n");
- for(i=0;i<gM4UTagCount[m4u_id];i++)
- {
- m4u_get_main_tlb(m4u_id, m4u_slave_id, i, &tlb);
- if((tlb.tag&(F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT))
+ unsigned int i = 0;
+ mmu_tlb_t tlb;
+
+ M4UMSG("dump inv main tlb=>\n");
+ for (i = 0; i < gM4UTagCount[m4u_id]; i++) {
+ m4u_get_main_tlb(m4u_id, m4u_slave_id, i, &tlb);
+ if ((tlb.tag&(F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT))
== (F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT))
- {
- printk("%d:0x%x:0x%x ", i, tlb.tag, tlb.desc);
- }
- }
- printk("\n");
-
- return 0;
+ M4ULOG_HIGH("%d:0x%x:0x%x ", i, tlb.tag, tlb.desc);
+ }
+ M4ULOG_HIGH("\n");
+
+ return 0;
}
-static unsigned int imu_pfh_tag_to_va(int mmu,int set,int way,unsigned int tag)
+static unsigned int imu_pfh_tag_to_va(int mmu, int set, int way, unsigned int tag)
{
- unsigned int tmp;
- if(tag&F_PFH_TAG_LAYER_BIT)
- {
- return (F_PFH_TAG_VA_GET(mmu,tag)|((set)<<15));
- }
- else
- {
- tmp = F_PFH_TAG_VA_GET(mmu,tag);
- tmp &= F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu);
- tmp |= (set)<<23;
- return tmp;
- }
-}
+ unsigned int tmp;
+
+ if (tag&F_PFH_TAG_LAYER_BIT)
+ return (F_PFH_TAG_VA_GET(mmu, tag)|((set)<<15));
+ tmp = F_PFH_TAG_VA_GET(mmu, tag);
+ tmp &= F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu);
+ tmp |= (set)<<23;
+ return tmp;
+}
-int m4u_dump_pfh_tlb(int m4u_id)
+int m4u_dump_pfh_tlb(int m4u_id)
{
- unsigned int regval;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- int result = 0;
- int set_nr, way_nr, set, way;
- int valid;
-
-
- set_nr = MMU_SET_NR(m4u_id);
- way_nr = MMU_WAY_NR;
-
- printk("dump pfh_tlb: m4u %d ====> \n", m4u_id);
-
- for(way=0; way<way_nr; way++)
- {
- for(set=0; set<set_nr; set++)
- {
- int page;
- mmu_tlb_t tlb;
- regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_id, set, way));
- valid = !!(regval & F_MMU_PFH_VLD_BIT(set, way));
- m4u_get_pfh_tlb(m4u_id, set, 0, way, &tlb);
- printk("va(0x%x) lay(%d) 16x(%d) sec(%d) pfh(%d) v(%d),set(%d),way(%d), 0x%x:",
- imu_pfh_tag_to_va(m4u_id, set, way, tlb.tag),
- !!(tlb.tag & F_PFH_TAG_LAYER_BIT),
- !!(tlb.tag & F_PFH_TAG_16X_BIT),
- !!(tlb.tag & F_PFH_TAG_SEC_BIT),
- !!(tlb.tag & F_PFH_TAG_AUTO_PFH),
- valid,
- set, way,
- tlb.desc);
-
- for(page=1; page<8; page++)
- {
- m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
- printk("0x%x:", tlb.desc);
- }
- printk("\n");
-
- }
- }
-
- return result;
-}
-
-
-int m4u_get_pfh_tlb_all(int m4u_id, mmu_pfh_tlb_t* pfh_buf)
-{
- unsigned int regval;
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- int set_nr, way_nr, set, way;
- int valid;
- int pfh_id = 0;
-
- set_nr = MMU_SET_NR(m4u_id);
- way_nr = MMU_WAY_NR;
-
- for(way=0; way<way_nr; way++)
- {
- for(set=0; set<set_nr; set++)
- {
- int page;
- mmu_tlb_t tlb;
- regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_id, set, way));
- valid = !!(regval & F_MMU_PFH_VLD_BIT(set, way));
- m4u_get_pfh_tlb(m4u_id, set, 0, way, &tlb);
-
- pfh_buf[pfh_id].tag = tlb.tag;
- pfh_buf[pfh_id].va = imu_pfh_tag_to_va(m4u_id, set, way, tlb.tag);
- pfh_buf[pfh_id].layer = !!(tlb.tag & F_PFH_TAG_LAYER_BIT);
- pfh_buf[pfh_id].x16 = !!(tlb.tag & F_PFH_TAG_16X_BIT);
- pfh_buf[pfh_id].sec = !!(tlb.tag & F_PFH_TAG_SEC_BIT);
- pfh_buf[pfh_id].pfh = !!(tlb.tag & F_PFH_TAG_AUTO_PFH);
- pfh_buf[pfh_id].set = set;
- pfh_buf[pfh_id].way = way;
- pfh_buf[pfh_id].valid = valid;
- pfh_buf[pfh_id].desc[0] = tlb.desc;
- pfh_buf[pfh_id].page_size = pfh_buf[pfh_id].layer ? MMU_SMALL_PAGE_SIZE : MMU_SECTION_SIZE;
-
- for(page=1; page<MMU_PAGE_PER_LINE; page++)
- {
- m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
- pfh_buf[pfh_id].desc[page] = tlb.desc;
- }
- pfh_id++;
-
- }
- }
+ unsigned int regval;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+ int result = 0;
+ int set_nr, way_nr, set, way;
+ int valid;
- return 0;
-}
+ set_nr = MMU_SET_NR(m4u_id);
+ way_nr = MMU_WAY_NR;
+ M4ULOG_HIGH("dump pfh_tlb: m4u %d ====>\n", m4u_id);
+ for (way = 0; way < way_nr; way++) {
+ for (set = 0; set < set_nr; set++) {
+ int page;
+ mmu_tlb_t tlb;
+ regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_id, set, way));
+ valid = !!(regval & F_MMU_PFH_VLD_BIT(set, way));
+ m4u_get_pfh_tlb(m4u_id, set, 0, way, &tlb);
+ M4ULOG_HIGH("va(0x%x) lay(%d) 16x(%d) sec(%d) pfh(%d) v(%d),set(%d),way(%d), 0x%x:",
+ imu_pfh_tag_to_va(m4u_id, set, way, tlb.tag),
+ !!(tlb.tag & F_PFH_TAG_LAYER_BIT),
+ !!(tlb.tag & F_PFH_TAG_16X_BIT),
+ !!(tlb.tag & F_PFH_TAG_SEC_BIT),
+ !!(tlb.tag & F_PFH_TAG_AUTO_PFH),
+ valid,
+ set, way,
+ tlb.desc);
+ for (page = 1; page < 8; page++) {
+ m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
+ M4ULOG_HIGH("0x%x:", tlb.desc);
+ }
+ M4ULOG_HIGH("\n");
+ }
+ }
+
+ return result;
+}
+
+int m4u_get_pfh_tlb_all(int m4u_id, mmu_pfh_tlb_t *pfh_buf)
+{
+ unsigned int regval;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_id];
+ int set_nr, way_nr, set, way;
+ int valid;
+ int pfh_id = 0;
+
+ set_nr = MMU_SET_NR(m4u_id);
+ way_nr = MMU_WAY_NR;
+
+ for (way = 0; way < way_nr; way++) {
+ for (set = 0; set < set_nr; set++) {
+ int page;
+ mmu_tlb_t tlb;
+
+ regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_id, set, way));
+ valid = !!(regval & F_MMU_PFH_VLD_BIT(set, way));
+ m4u_get_pfh_tlb(m4u_id, set, 0, way, &tlb);
+
+ pfh_buf[pfh_id].tag = tlb.tag;
+ pfh_buf[pfh_id].va = imu_pfh_tag_to_va(m4u_id, set, way, tlb.tag);
+ pfh_buf[pfh_id].layer = !!(tlb.tag & F_PFH_TAG_LAYER_BIT);
+ pfh_buf[pfh_id].x16 = !!(tlb.tag & F_PFH_TAG_16X_BIT);
+ pfh_buf[pfh_id].sec = !!(tlb.tag & F_PFH_TAG_SEC_BIT);
+ pfh_buf[pfh_id].pfh = !!(tlb.tag & F_PFH_TAG_AUTO_PFH);
+ pfh_buf[pfh_id].set = set;
+ pfh_buf[pfh_id].way = way;
+ pfh_buf[pfh_id].valid = valid;
+ pfh_buf[pfh_id].desc[0] = tlb.desc;
+ pfh_buf[pfh_id].page_size = pfh_buf[pfh_id].layer ? MMU_SMALL_PAGE_SIZE : MMU_SECTION_SIZE;
+
+ for (page = 1; page < MMU_PAGE_PER_LINE; page++) {
+ m4u_get_pfh_tlb(m4u_id, set, page, way, &tlb);
+ pfh_buf[pfh_id].desc[page] = tlb.desc;
+ }
+ pfh_id++;
+ }
+ }
+
+ return 0;
+}
int m4u_confirm_main_range_invalidated(int m4u_index, int m4u_slave_id, unsigned int MVAStart, unsigned int MVAEnd)
{
- unsigned int i;
- unsigned int regval;
-
- ///> check Main TLB part
- for(i=0;i<gM4UTagCount[m4u_index];i++)
- {
- regval = m4u_get_main_tag(m4u_index, m4u_slave_id, i);
-
- if(regval & (F_MAIN_TLB_VALID_BIT))
- {
- unsigned int tag_s, tag_e, sa, ea;
- int layer = regval&F_MAIN_TLB_LAYER_BIT;
- int large = regval&F_MAIN_TLB_16X_BIT;
-
- tag_s = regval & F_MAIN_TLB_VA_MSK;
- sa = MVAStart & (~(PAGE_SIZE-1));
- ea = MVAEnd | (PAGE_SIZE-1);
-
- if(layer)
- { //pte
- if(large)
- {
- tag_e = tag_s + MMU_LARGE_PAGE_SIZE -1;
- }
- else
- {
- tag_e = tag_s + PAGE_SIZE - 1;
- }
-
- if( !((tag_e<sa)||(tag_s>ea)))
- {
- M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
- i, m4u_index, MVAStart, MVAEnd, regval);
- return -1;
- }
-
- }
- else
- {
- if(large)
- {
- tag_e = tag_s + MMU_SUPERSECTION_SIZE -1;
- }
- else
- {
- tag_e = tag_s + MMU_SECTION_SIZE - 1;
- }
-
- if((tag_s>=sa)&&(tag_e<=ea))
- {
- M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
- i, m4u_index, MVAStart, MVAEnd, regval);
- return -1;
- }
- }
-
- }
- }
- return 0;
+ unsigned int i;
+ unsigned int regval;
+
+ /* /> check Main TLB part */
+ for (i = 0; i < gM4UTagCount[m4u_index]; i++) {
+ regval = m4u_get_main_tag(m4u_index, m4u_slave_id, i);
+
+ if (regval & (F_MAIN_TLB_VALID_BIT)) {
+ unsigned int tag_s, tag_e, sa, ea;
+ int layer = regval&F_MAIN_TLB_LAYER_BIT;
+ int large = regval&F_MAIN_TLB_16X_BIT;
+
+ tag_s = regval & F_MAIN_TLB_VA_MSK;
+ sa = MVAStart & (~(PAGE_SIZE-1));
+ ea = MVAEnd | (PAGE_SIZE-1);
+
+ if (layer) { /* pte */
+ if (large)
+ tag_e = tag_s + MMU_LARGE_PAGE_SIZE - 1;
+ else
+ tag_e = tag_s + PAGE_SIZE - 1;
+
+ if (!((tag_e < sa) || (tag_s > ea))) {
+ M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
+ i, m4u_index, MVAStart, MVAEnd, regval);
+ return -1;
+ }
+ } else {
+ if (large)
+ tag_e = tag_s + MMU_SUPERSECTION_SIZE - 1;
+ else
+ tag_e = tag_s + MMU_SECTION_SIZE - 1;
+
+ if ((tag_s >= sa) && (tag_e <= ea)) {
+ M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
+ i, m4u_index, MVAStart, MVAEnd, regval);
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
}
int m4u_confirm_range_invalidated(int m4u_index, unsigned int MVAStart, unsigned int MVAEnd)
{
- unsigned int i = 0;
- unsigned int regval;
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
- int result = 0;
- int set_nr, way_nr, set, way;
-
- ///> check Main TLB part
- result = m4u_confirm_main_range_invalidated(m4u_index, 0, MVAStart, MVAEnd);
- if(result < 0)
- return -1;
-
- if(m4u_index==0)
- {
- result = m4u_confirm_main_range_invalidated(m4u_index, 1, MVAStart, MVAEnd);
- if(result < 0)
- return -1;
- }
-
-
- set_nr = MMU_SET_NR(m4u_index);
- way_nr = MMU_WAY_NR;
-
-
- for(way=0; way<way_nr; way++)
- {
- for(set=0; set<set_nr; set++)
- {
- regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_index, set, way));
- if(regval & F_MMU_PFH_VLD_BIT(set, way))
- {
- unsigned int tag = m4u_get_pfh_tag(m4u_index, set, 0, way);
- unsigned int tag_s, tag_e, sa, ea;
- int layer = tag&F_PFH_TAG_LAYER_BIT;
- int large = tag&F_PFH_TAG_16X_BIT;
-
- tag_s = imu_pfh_tag_to_va(m4u_index,set, way, tag);
-
- sa = MVAStart & (~(PAGE_SIZE-1));
- ea = MVAEnd | (PAGE_SIZE-1);
-
- if(layer)
- { //pte
- if(large)
- {
- tag_e = tag_s + MMU_LARGE_PAGE_SIZE*8 -1;
- }
- else
- {
- tag_e = tag_s + PAGE_SIZE*8 - 1;
- }
-
- if( !((tag_e<sa)||(tag_s>ea)))
- {
- M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
- i, m4u_index, MVAStart, MVAEnd, regval);
- return -1;
- }
-
- }
- else
- {
- if(large)
- {
- tag_e = tag_s + MMU_SUPERSECTION_SIZE*8 -1;
- }
- else
- {
- tag_e = tag_s + MMU_SECTION_SIZE*8 - 1;
- }
-
- //if((tag_s>=sa)&&(tag_e<=ea))
- if( !((tag_e<sa)||(tag_s>ea)))
- {
- M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
- i, m4u_index, MVAStart, MVAEnd, regval);
- return -1;
- }
- }
-
- }
- }
- }
-
- return result;
+ unsigned int i = 0;
+ unsigned int regval;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_index];
+ int result = 0;
+ int set_nr, way_nr, set, way;
+
+ /* /> check Main TLB part */
+ result = m4u_confirm_main_range_invalidated(m4u_index, 0, MVAStart, MVAEnd);
+ if (result < 0)
+ return -1;
+
+ if (m4u_index == 0) {
+ result = m4u_confirm_main_range_invalidated(m4u_index, 1, MVAStart, MVAEnd);
+ if (result < 0)
+ return -1;
+ }
+
+ set_nr = MMU_SET_NR(m4u_index);
+ way_nr = MMU_WAY_NR;
+
+ for (way = 0; way < way_nr; way++) {
+ for (set = 0; set < set_nr; set++) {
+ regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_index, set, way));
+ if (regval & F_MMU_PFH_VLD_BIT(set, way)) {
+ unsigned int tag = m4u_get_pfh_tag(m4u_index, set, 0, way);
+ unsigned int tag_s, tag_e, sa, ea;
+ int layer = tag&F_PFH_TAG_LAYER_BIT;
+ int large = tag&F_PFH_TAG_16X_BIT;
+
+ tag_s = imu_pfh_tag_to_va(m4u_index, set, way, tag);
+
+ sa = MVAStart & (~(PAGE_SIZE-1));
+ ea = MVAEnd | (PAGE_SIZE-1);
+
+ if (layer) { /* pte */
+ if (large)
+ tag_e = tag_s + MMU_LARGE_PAGE_SIZE*8 - 1;
+ else
+ tag_e = tag_s + PAGE_SIZE*8 - 1;
+
+ if (!((tag_e < sa) || (tag_s > ea))) {
+ M4UERR(
+ "main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
+ i, m4u_index, MVAStart, MVAEnd, regval);
+ return -1;
+ }
+
+ } else {
+ if (large)
+ tag_e = tag_s + MMU_SUPERSECTION_SIZE*8 - 1;
+ else
+ tag_e = tag_s + MMU_SECTION_SIZE*8 - 1;
+
+ /* if((tag_s>=sa)&&(tag_e<=ea)) */
+ if (!((tag_e < sa) || (tag_s > ea))) {
+ M4UERR(
+ "main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n",
+ i, m4u_index, MVAStart, MVAEnd, regval);
+ return -1;
+ }
+ }
+ }
+ }
+ }
+
+ return result;
}
int m4u_confirm_main_all_invalid(int m4u_index, int m4u_slave_id)
{
- unsigned int i;
- unsigned int regval;
-
- for(i=0;i<gM4UTagCount[m4u_index];i++)
- {
- regval = m4u_get_main_tag(m4u_index, m4u_slave_id, i);
-
- if(regval & (F_MAIN_TLB_VALID_BIT))
- {
- M4UERR("main: i=%d, idx=0x%x, RegValue=0x%x\n", i, m4u_index, regval);
- return -1;
- }
- }
- return 0;
+ unsigned int i;
+ unsigned int regval;
+
+ for (i = 0; i < gM4UTagCount[m4u_index]; i++) {
+ regval = m4u_get_main_tag(m4u_index, m4u_slave_id, i);
+
+ if (regval & (F_MAIN_TLB_VALID_BIT)) {
+ M4UERR("main: i=%d, idx=0x%x, RegValue=0x%x\n", i, m4u_index, regval);
+ return -1;
+ }
+ }
+ return 0;
}
int m4u_confirm_pfh_all_invalid(int m4u_index)
{
- unsigned int regval;
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
- int set_nr, way_nr, set, way;
-
- set_nr = MMU_SET_NR(m4u_index);
- way_nr = MMU_WAY_NR;
-
- for(way=0; way<way_nr; way++)
- {
- for(set=0; set<set_nr; set++)
- {
- regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_index, set, way));
- if(regval & F_MMU_PFH_VLD_BIT(set, way))
- {
- return -1;
- }
- }
- }
- return 0;
-}
+ unsigned int regval;
+ unsigned long m4u_base = gM4UBaseAddr[m4u_index];
+ int set_nr, way_nr, set, way;
-int m4u_confirm_all_invalidated(int m4u_index)
-{
- if(m4u_confirm_main_all_invalid(m4u_index, 0))
- return -1;
-
- if(m4u_index==0)
- {
- if(m4u_confirm_main_all_invalid(m4u_index, 1))
- return -1;
- }
+ set_nr = MMU_SET_NR(m4u_index);
+ way_nr = MMU_WAY_NR;
- if(m4u_confirm_pfh_all_invalid(m4u_index))
- return -1;
+ for (way = 0; way < way_nr; way++)
+ for (set = 0; set < set_nr; set++)
+ regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_VLD(m4u_index, set, way));
+ if (regval & F_MMU_PFH_VLD_BIT(set, way))
+ return -1;
- return 0;
+ return 0;
}
+int m4u_confirm_all_invalidated(int m4u_index)
+{
+ if (m4u_confirm_main_all_invalid(m4u_index, 0))
+ return -1;
+ if (m4u_index == 0)
+ if (m4u_confirm_main_all_invalid(m4u_index, 1))
+ return -1;
+ if (m4u_confirm_pfh_all_invalid(m4u_index))
+ return -1;
+ return 0;
+}
int m4u_power_on(int m4u_index)
{
- return 0;
+ return 0;
}
int m4u_power_off(int m4u_index)
{
- return 0;
+ return 0;
}
-#if !defined(CONFIG_MTK_LEGACY)
-extern char *smi_clk_name[];
-#endif
-
static int m4u_clock_on(void)
{
#if defined(CONFIG_MTK_LEGACY)
-// no m4u, smi CG
- enable_clock(MT_CG_INFRA_M4U, "infra_m4u");
-// enable_clock(MT_CG_INFRA_SMI, "infra_smi");
+/* no m4u, smi CG */
+ enable_clock(MT_CG_INFRA_M4U, "infra_m4u");
+/* enable_clock(MT_CG_INFRA_SMI, "infra_smi"); */
#else
- int ret;
- ret = clk_prepare_enable(gM4uDev->infra_m4u);
- if (ret)
- M4UMSG("error: prepare clk infra m4u fail!.\n");
+ int ret;
+
+ ret = clk_prepare_enable(gM4uDev->infra_m4u);
+ if (ret)
+ M4UMSG("error: prepare clk infra m4u fail!.\n");
#endif
- return 0;
+ return 0;
}
/*
@@ -935,884 +826,873 @@ static int m4u_clock_off(void)
}
*/
+#if !defined(CONFIG_MTK_LEGACY)
+const char *smi_clk_name[] = {
+ "smi_common", "m4u_disp0_smi_larb0", "m4u_vdec0_vdec", "m4u_vdec1_larb",
+ "m4u_img_image_larb2_smi", "m4u_venc_venc", "m4u_venc_larb"
+};
+#endif
+
static int larb_clock_on(int larb)
{
-#if defined (CONFIG_MTK_LEGACY)
- switch (larb)
- {
- case 0:
- enable_clock(MT_CG_DISP0_SMI_LARB0, "m4u_larb0");
- break;
- case 1:
- enable_clock(MT_CG_VDEC0_VDEC, "m4u_larb1");
- enable_clock(MT_CG_VDEC1_LARB, "m4u_larb1");
- break;
- case 2:
- enable_clock(MT_CG_IMAGE_LARB2_SMI, "m4u_larb2");
- break;
+#if defined(CONFIG_MTK_LEGACY)
+ switch (larb) {
+ case 0:
+ enable_clock(MT_CG_DISP0_SMI_LARB0, "m4u_larb0");
+ break;
+ case 1:
+ enable_clock(MT_CG_VDEC0_VDEC, "m4u_larb1");
+ enable_clock(MT_CG_VDEC1_LARB, "m4u_larb1");
+ break;
+ case 2:
+ enable_clock(MT_CG_IMAGE_LARB2_SMI, "m4u_larb2");
+ break;
#if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
- case 3:
- enable_clock(MT_CG_VENC_VENC, "m4u_larb3");
- enable_clock(MT_CG_VENC_LARB, "m4u_larb3");
- break;
+ case 3:
+ enable_clock(MT_CG_VENC_VENC, "m4u_larb3");
+ enable_clock(MT_CG_VENC_LARB, "m4u_larb3");
+ break;
#endif
- default:
- M4UMSG("error: unknown larb id %d, %s\n", larb, __FUNCTION__);
- break;
- }
+ default:
+ M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
+ break;
+ }
#else
- int ret;
- switch (larb)
- {
- case 0:
- ret = clk_prepare_enable(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[DISP0_SMI_LARB0_CLK]);
- break;
- case 1:
- ret = clk_prepare_enable(gM4uDev->smi_clk[VDEC0_VDEC_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VDEC0_VDEC_CLK]);
- ret = clk_prepare_enable(gM4uDev->smi_clk[VDEC1_LARB_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VDEC1_LARB_CLK]);
- break;
- case 2:
- ret = clk_prepare_enable(gM4uDev->smi_clk[LARB2_SMI_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[LARB2_SMI_CLK]);
- break;
+ int ret;
+
+ switch (larb) {
+ case 0:
+ ret = clk_prepare_enable(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[DISP0_SMI_LARB0_CLK]);
+ break;
+ case 1:
+ ret = clk_prepare_enable(gM4uDev->smi_clk[VDEC0_VDEC_CLK]);
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VDEC0_VDEC_CLK]);
+ ret = clk_prepare_enable(gM4uDev->smi_clk[VDEC1_LARB_CLK]);
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VDEC1_LARB_CLK]);
+ break;
+ case 2:
+ ret = clk_prepare_enable(gM4uDev->smi_clk[LARB2_SMI_CLK]);
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[LARB2_SMI_CLK]);
+ break;
#if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
- case 3:
- ret = clk_prepare_enable(gM4uDev->smi_clk[VENC_VENC_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VENC_VENC_CLK]);
- ret = clk_prepare_enable(gM4uDev->smi_clk[VENC_LARB_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VENC_LARB_CLK]);
- break;
+ case 3:
+ ret = clk_prepare_enable(gM4uDev->smi_clk[VENC_VENC_CLK]);
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VENC_VENC_CLK]);
+ ret = clk_prepare_enable(gM4uDev->smi_clk[VENC_LARB_CLK]);
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[VENC_LARB_CLK]);
+ break;
#endif
- default:
- M4UMSG("error: unknown larb id %d, %s\n", larb, __FUNCTION__);
- break;
- }
+ default:
+ M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
+ break;
+ }
#endif
- return 0;
+ return 0;
}
static int larb_clock_off(int larb)
{
-#if defined (CONFIG_MTK_LEGACY)
- switch (larb)
- {
- case 0:
- disable_clock(MT_CG_DISP0_SMI_LARB0, "m4u_larb0");
- break;
- case 1:
- disable_clock(MT_CG_VDEC0_VDEC, "m4u_larb1");
- disable_clock(MT_CG_VDEC1_LARB, "m4u_larb1");
- break;
- case 2:
- disable_clock(MT_CG_IMAGE_LARB2_SMI, "m4u_larb2");
- break;
+#if defined(CONFIG_MTK_LEGACY)
+ switch (larb) {
+ case 0:
+ disable_clock(MT_CG_DISP0_SMI_LARB0, "m4u_larb0");
+ break;
+ case 1:
+ disable_clock(MT_CG_VDEC0_VDEC, "m4u_larb1");
+ disable_clock(MT_CG_VDEC1_LARB, "m4u_larb1");
+ break;
+ case 2:
+ disable_clock(MT_CG_IMAGE_LARB2_SMI, "m4u_larb2");
+ break;
#if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
- case 3:
- disable_clock(MT_CG_VENC_VENC, "m4u_larb3");
- disable_clock(MT_CG_VENC_LARB, "m4u_larb3");
- break;
+ case 3:
+ disable_clock(MT_CG_VENC_VENC, "m4u_larb3");
+ disable_clock(MT_CG_VENC_LARB, "m4u_larb3");
+ break;
#endif
- default:
- M4UMSG("error: unknown larb id %d, %s\n", larb, __FUNCTION__);
- break;
- }
+ default:
+ M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
+ break;
+ }
#else
- switch (larb)
- {
- case 0:
- clk_disable_unprepare(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
- break;
- case 1:
- clk_disable_unprepare(gM4uDev->smi_clk[VDEC0_VDEC_CLK]);
- clk_disable_unprepare(gM4uDev->smi_clk[VDEC1_LARB_CLK]);
- break;
- case 2:
- clk_disable_unprepare(gM4uDev->smi_clk[LARB2_SMI_CLK]);
- break;
+ switch (larb) {
+ case 0:
+ clk_disable_unprepare(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
+ break;
+ case 1:
+ clk_disable_unprepare(gM4uDev->smi_clk[VDEC0_VDEC_CLK]);
+ clk_disable_unprepare(gM4uDev->smi_clk[VDEC1_LARB_CLK]);
+ break;
+ case 2:
+ clk_disable_unprepare(gM4uDev->smi_clk[LARB2_SMI_CLK]);
+ break;
#if defined(CONFIG_ARCH_MT6735) || defined(CONFIG_ARCH_MT6753)
- case 3:
- clk_disable_unprepare(gM4uDev->smi_clk[VENC_VENC_CLK]);
- clk_disable_unprepare(gM4uDev->smi_clk[VENC_LARB_CLK]);
- break;
+ case 3:
+ clk_disable_unprepare(gM4uDev->smi_clk[VENC_VENC_CLK]);
+ clk_disable_unprepare(gM4uDev->smi_clk[VENC_LARB_CLK]);
+ break;
#endif
- default:
- M4UMSG("error: unknown larb id %d, %s\n", larb, __FUNCTION__);
- break;
- }
+ default:
+ M4UMSG("error: unknown larb id %d, %s\n", larb, __func__);
+ break;
+ }
#endif
- return 0;
+ return 0;
}
static int larb_clock_all_on(void)
{
- int i;
- for( i=0 ; i < SMI_LARB_NR ; i++)
- {
- larb_clock_on(i);
- }
- return 0;
+ int i;
+
+ for (i = 0 ; i < SMI_LARB_NR ; i++)
+ larb_clock_on(i);
+
+ return 0;
}
static int larb_clock_all_off(void)
{
- int i;
- for( i=0 ; i < SMI_LARB_NR ; i++)
- {
- larb_clock_off(i);
- }
- return 0;
+ int i;
+
+ for (i = 0 ; i < SMI_LARB_NR ; i++)
+ larb_clock_off(i);
+
+ return 0;
+}
+
+void smi_common_clock_on(void)
+{
+#if defined(CONFIG_MTK_LEGACY)
+ enable_clock(MT_CG_DISP0_SMI_COMMON, "smi_common");
+ /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x1); */
+#else
+ int ret = clk_prepare_enable(gM4uDev->smi_clk[SMI_COMMON_CLK]);
+
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[SMI_COMMON_CLK]);
+#endif
+}
+
+void smi_larb0_clock_on(void)
+{
+#if defined(CONFIG_MTK_LEGACY)
+ enable_clock(MT_CG_DISP0_SMI_LARB0, "smi_larb0");
+ /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x1); */
+#else
+ int ret = clk_prepare_enable(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
+
+ if (ret)
+ M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[DISP0_SMI_LARB0_CLK]);
+#endif
}
-static void smi_common_clock_on(void)
+EXPORT_SYMBOL(smi_common_clock_on);
+
+void smi_common_clock_off(void)
{
-#if defined (CONFIG_MTK_LEGACY)
- enable_clock(MT_CG_DISP0_SMI_COMMON, "smi_common");
- //m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x1);
+#if defined(CONFIG_MTK_LEGACY)
+ disable_clock(MT_CG_DISP0_SMI_COMMON, "smi_common");
+ /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x0); */
#else
- int ret = clk_prepare_enable(gM4uDev->smi_clk[SMI_COMMON_CLK]);
- if (ret)
- M4UMSG("error: prepare clk %s fail!.\n", smi_clk_name[SMI_COMMON_CLK]);
+ clk_disable_unprepare(gM4uDev->smi_clk[SMI_COMMON_CLK]);
#endif
}
-static void smi_common_clock_off(void)
+void smi_larb0_clock_off(void)
{
-#if defined (CONFIG_MTK_LEGACY)
- disable_clock(MT_CG_DISP0_SMI_COMMON, "smi_common");
- //m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x0);
+#if defined(CONFIG_MTK_LEGACY)
+ disable_clock(MT_CG_DISP0_SMI_LARB0, "smi_larb0");
+ /* m4uHw_set_field_by_mask(0, 0xf4000108, 0x1, 0x0); */
#else
- clk_disable_unprepare(gM4uDev->smi_clk[SMI_COMMON_CLK]);
+ clk_disable_unprepare(gM4uDev->smi_clk[DISP0_SMI_LARB0_CLK]);
#endif
}
+EXPORT_SYMBOL(smi_common_clock_off);
-int m4u_insert_seq_range(M4U_PORT_ID port,unsigned int MVAStart,unsigned int MVAEnd)
+int m4u_insert_seq_range(M4U_PORT_ID port, unsigned int MVAStart, unsigned int MVAEnd)
{
- int i, free_id = -1;
- unsigned int m4u_index = m4u_port_2_m4u_id(port);
- unsigned int m4u_slave_id = m4u_port_2_m4u_slave_id(port);
- M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + M4U_SEQ_NUM(m4u_index)*m4u_slave_id;
-
- M4ULOG_MID("m4u_insert_seq_range , module:%s, MVAStart:0x%x, MVAEnd:0x%x\n",
- m4u_get_port_name(port), MVAStart, MVAEnd);
+ int i, free_id = -1;
+ unsigned int m4u_index;
+ unsigned int m4u_slave_id;
+ M4U_RANGE_DES_T *pSeq;
- if(MVAEnd - MVAStart < PAGE_SIZE)
- {
- M4ULOG_MID("too small size, skip to insert! module:%s, MVAStart:0x%x, size:%d\n",
- m4u_get_port_name(port), MVAStart, MVAEnd - MVAStart +1);
- return free_id;
- }
+ m4u_index = m4u_port_2_m4u_id(port);
+ m4u_slave_id = m4u_port_2_m4u_slave_id(port);
-//===============================================
- //every seq range has to align to 1M Bytes
- MVAStart &= ~M4U_SEQ_ALIGN_MSK;
- MVAEnd |= M4U_SEQ_ALIGN_MSK;
+ if (m4u_index == -1 || m4u_slave_id == -1)
+ return -1;
- mutex_lock(&gM4u_seq_mutex);
+ pSeq = gM4USeq[m4u_index] + M4U_SEQ_NUM(m4u_index)*m4u_slave_id;
+
+ M4ULOG_MID("m4u_insert_seq_range , module:%s, MVAStart:0x%x, MVAEnd:0x%x\n",
+ m4u_get_port_name(port), MVAStart, MVAEnd);
+
+ if (MVAEnd - MVAStart < PAGE_SIZE) {
+ M4ULOG_MID("too small size, skip to insert! module:%s, MVAStart:0x%x, size:%d\n",
+ m4u_get_port_name(port), MVAStart, MVAEnd - MVAStart + 1);
+ return free_id;
+ }
-//==================================================================
- // check if the range is overlap with previous ones
-
- for(i=0; i<M4U_SEQ_NUM(m4u_index); i++)
- {
- if(1==pSeq[i].Enabled)
- {
- if(MVAEnd<pSeq[i].MVAStart || MVAStart>pSeq[i].MVAEnd) //no overlap
- {
- continue;
- }
- else
- {
- M4ULOG_HIGH("insert range overlap!: larb=%d,module=%s\n",
- m4u_port_2_larb_id(port), m4u_get_port_name(port));
- M4ULOG_HIGH("warning: insert tlb range is overlapped with previous ranges, current process=%s,!\n", current->comm);
- M4ULOG_HIGH("module=%s, mva_start=0x%x, mva_end=0x%x \n", m4u_get_port_name(port), MVAStart, MVAEnd);
- M4ULOG_HIGH("overlapped range id=%d, module=%s, mva_start=0x%x, mva_end=0x%x \n",
- i, m4u_get_port_name(pSeq[i].port), pSeq[i].MVAStart, pSeq[i].MVAEnd);
- mutex_unlock(&gM4u_seq_mutex);
- return -1;
- }
- }
- else
- {
- free_id = i;
- }
- }
-
- if(free_id == -1)
- {
- M4ULOG_MID("warning: can not find available range \n");
- mutex_unlock(&gM4u_seq_mutex);
- return -1;
- }
-
- ///> record range information in array
- pSeq[free_id].Enabled = 1;
- pSeq[free_id].port = port;
- pSeq[free_id].MVAStart = MVAStart;
- pSeq[free_id].MVAEnd = MVAEnd;
-
- mutex_unlock(&gM4u_seq_mutex);
-
- ///> set the range register
-
- MVAStart &= F_SQ_VA_MASK;
- MVAStart |= F_SQ_EN_BIT;
- //align mvaend to 1M
- MVAEnd |= ~F_SQ_VA_MASK;
-
- spin_lock(&gM4u_reg_lock);
- {
- M4U_WriteReg32(gM4UBaseAddr[m4u_index], REG_MMU_SQ_START(m4u_slave_id,free_id), MVAStart);
- M4U_WriteReg32(gM4UBaseAddr[m4u_index], REG_MMU_SQ_END(m4u_slave_id,free_id), MVAEnd);
- }
- spin_unlock(&gM4u_reg_lock);
-
- return free_id;
-}
+ /* =============================================== */
+ /* every seq range has to align to 1M Bytes */
+ MVAStart &= ~M4U_SEQ_ALIGN_MSK;
+ MVAEnd |= M4U_SEQ_ALIGN_MSK;
+ mutex_lock(&gM4u_seq_mutex);
+ /* ================================================================== */
+ /* check if the range is overlap with previous ones */
+
+ for (i = 0; i < M4U_SEQ_NUM(m4u_index); i++) {
+ if (1 == pSeq[i].Enabled) {
+ if (MVAEnd < pSeq[i].MVAStart || MVAStart > pSeq[i].MVAEnd)
+ continue;
+ else {
+ M4ULOG_HIGH("insert range overlap!: port=%d,module=%s\n",
+ port, m4u_get_port_name(port));
+ M4ULOG_HIGH(
+ "warning: insert tlb range is overlapped with previous ranges, current process=%s,!\n",
+ current->comm);
+ M4ULOG_HIGH("module=%s, mva_start=0x%x, mva_end=0x%x\n",
+ m4u_get_port_name(port), MVAStart, MVAEnd);
+ M4ULOG_HIGH("overlapped range id=%d, module=%s, mva_start=0x%x, mva_end=0x%x\n",
+ i, m4u_get_port_name(pSeq[i].port), pSeq[i].MVAStart, pSeq[i].MVAEnd);
+ mutex_unlock(&gM4u_seq_mutex);
+ return -1;
+ }
+ } else
+ free_id = i;
+ }
+
+ if (free_id == -1) {
+ M4ULOG_MID("warning: can not find available range\n");
+ mutex_unlock(&gM4u_seq_mutex);
+ return -1;
+ }
+
+ /* /> record range information in array */
+ pSeq[free_id].Enabled = 1;
+ pSeq[free_id].port = port;
+ pSeq[free_id].MVAStart = MVAStart;
+ pSeq[free_id].MVAEnd = MVAEnd;
+
+ mutex_unlock(&gM4u_seq_mutex);
+
+ /* /> set the range register */
+
+ MVAStart &= F_SQ_VA_MASK;
+ MVAStart |= F_SQ_EN_BIT;
+ /* align mvaend to 1M */
+ MVAEnd |= ~F_SQ_VA_MASK;
+
+ spin_lock(&gM4u_reg_lock);
+ {
+ M4U_WriteReg32(gM4UBaseAddr[m4u_index], REG_MMU_SQ_START(m4u_slave_id, free_id), MVAStart);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_index], REG_MMU_SQ_END(m4u_slave_id, free_id), MVAEnd);
+ }
+ spin_unlock(&gM4u_reg_lock);
+
+ return free_id;
+}
int m4u_invalid_seq_range_by_id(int port, int seq_id)
{
- int m4u_index = m4u_port_2_m4u_id(port);
- int m4u_slave_id = m4u_port_2_m4u_slave_id(port);
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
- M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + M4U_SEQ_NUM(m4u_index)*m4u_slave_id;
- int ret=0;
+ int m4u_index;
+ int m4u_slave_id;
+ unsigned long m4u_base;
+ M4U_RANGE_DES_T *pSeq;
+ int ret = 0;
- mutex_lock(&gM4u_seq_mutex);
- {
- pSeq[seq_id].Enabled = 0;
- }
- mutex_unlock(&gM4u_seq_mutex);
-
- spin_lock(&gM4u_reg_lock);
- M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(m4u_slave_id,seq_id), 0);
- M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(m4u_slave_id,seq_id), 0);
- spin_unlock(&gM4u_reg_lock);
+ m4u_index = m4u_port_2_m4u_id(port);
+ m4u_slave_id = m4u_port_2_m4u_slave_id(port);
+ if(m4u_index == -1 && m4u_slave_id == -1)
+ return -1;
+
+ m4u_base = gM4UBaseAddr[m4u_index];
+ pSeq = gM4USeq[m4u_index] + M4U_SEQ_NUM(m4u_index)*m4u_slave_id;
+
+ mutex_lock(&gM4u_seq_mutex);
+ {
+ pSeq[seq_id].Enabled = 0;
+ }
+ mutex_unlock(&gM4u_seq_mutex);
- return ret;
+ spin_lock(&gM4u_reg_lock);
+ M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(m4u_slave_id, seq_id), 0);
+ M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(m4u_slave_id, seq_id), 0);
+ spin_unlock(&gM4u_reg_lock);
+
+ return ret;
}
/*
static int m4u_invalid_seq_range_by_mva(int m4u_index, int m4u_slave_id, unsigned int MVAStart, unsigned int MVAEnd)
{
- unsigned int i;
- unsigned int m4u_base = gM4UBaseAddr[m4u_index];
- M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + SEQ_NR_PER_M4U_SLAVE*m4u_slave_id;
- int ret=-1;
+ unsigned int i;
+ unsigned int m4u_base = gM4UBaseAddr[m4u_index];
+ M4U_RANGE_DES_T *pSeq = gM4USeq[m4u_index] + SEQ_NR_PER_M4U_SLAVE*m4u_slave_id;
+ int ret=-1;
+
+ MVAStart &= ~M4U_SEQ_ALIGN_MSK;
+ MVAEnd |= M4U_SEQ_ALIGN_MSK;
- MVAStart &= ~M4U_SEQ_ALIGN_MSK;
- MVAEnd |= M4U_SEQ_ALIGN_MSK;
-
mutex_lock(&gM4u_seq_mutex);
- for(i=0; i<SEQ_NR_PER_M4U_SLAVE; i++)
- {
- if(pSeq[i].Enabled == 1 &&
- pSeq[i].MVAStart>=MVAStart &&
- pSeq[i].MVAEnd<=MVAEnd)
- {
- pSeq[i].Enabled = 0;
- spin_lock(&gM4u_reg_lock);
- M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(m4u_slave_id,i), 0);
- M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(m4u_slave_id,i), 0);
- spin_unlock(&gM4u_reg_lock);
- break;
- }
- }
- mutex_unlock(&gM4u_seq_mutex);
-
- return ret;
+ for(i=0; i<SEQ_NR_PER_M4U_SLAVE; i++) {
+ if(pSeq[i].Enabled == 1 &&
+ pSeq[i].MVAStart>=MVAStart &&
+ pSeq[i].MVAEnd<=MVAEnd) {
+ pSeq[i].Enabled = 0;
+ spin_lock(&gM4u_reg_lock);
+ M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(m4u_slave_id,i), 0);
+ M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(m4u_slave_id,i), 0);
+ spin_unlock(&gM4u_reg_lock);
+ break;
+ }
+ }
+ mutex_unlock(&gM4u_seq_mutex);
+
+ return ret;
}
*/
-
-
static int _m4u_config_port(int port, int virt, int sec, int dis, int dir)
{
- int m4u_index = m4u_port_2_m4u_id(port);
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
- unsigned long larb_base;
- unsigned int larb, larb_port;
- int ret = 0;
-
- M4ULOG_HIGH("config_port:%s,v%d,s%d\n",
- m4u_get_port_name(port), virt, sec);
-
- //MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], MMProfileFlagStart, port, virt);
-
- spin_lock(&gM4u_reg_lock);
- // Direction, one bit for each port, 1:-, 0:+
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIR(port),\
- F_MMU_PFH_DIR(port, 1), F_MMU_PFH_DIR(port, dir));
-
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIST(port),\
- F_MMU_PFH_DIST_MASK(port), F_MMU_PFH_DIST_VAL(port,dis));
-
- if(m4u_index==0)
- {
- int mmu_en = 0;
- larb = m4u_port_2_larb_id(port);
- larb_port = m4u_port_2_larb_port(port);
- larb_base = gLarbBaseAddr[larb];
-
- m4uHw_set_field_by_mask(larb_base, SMI_LARB_MMU_EN,\
- F_SMI_MMU_EN(larb_port, 1), F_SMI_MMU_EN(larb_port, !!(virt)));
-
- m4uHw_set_field_by_mask(larb_base, SMI_LARB_SEC_EN,\
- F_SMI_SEC_EN(larb_port, 1), F_SMI_SEC_EN(larb_port, !!(sec)));
-
- //multimedia engines will should set domain as 3.
- //m4uHw_set_field_by_mask(larb_base, REG_SMI_LARB_DOMN_OF_PORT(larb_port),
- // F_SMI_DOMN(larb_port, 0x3), F_SMI_DOMN(larb_port, pM4uPort->domain));
-
-
-//debug use
- mmu_en = m4uHw_get_field_by_mask(larb_base, SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1));
- if(!!(mmu_en) != virt)
- {
- M4ULOG_HIGH("m4u_config_port error, port=%s, Virtuality=%d, mmu_en=%x (%x, %x)\n",m4u_get_port_name(port), virt, mmu_en,M4U_ReadReg32(larb_base, SMI_LARB_MMU_EN),F_SMI_MMU_EN(larb_port, 1));
- }
- }
- else {
- larb_port = m4u_port_2_larb_port(port);
-
- m4uHw_set_field_by_mask(gPericfgBaseAddr, REG_PERIAXI_BUS_CTL3,\
- F_PERI_MMU_EN(larb_port, 1), F_PERI_MMU_EN(larb_port, !!(virt)));
- }
-
-unlock_out:
- spin_unlock(&gM4u_reg_lock);
-
- //MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], MMProfileFlagEnd, dis, dir);
-
- return ret;
+ int m4u_index = m4u_port_2_m4u_id(port);
+ unsigned long m4u_base = gM4UBaseAddr[m4u_index];
+ unsigned long larb_base;
+ unsigned int larb, larb_port;
+ int ret = 0;
+
+ if (m4u_index == -1)
+ return -1;
+
+ M4ULOG_HIGH("config_port:%s,v%d,s%d\n",
+ m4u_get_port_name(port), virt, sec);
+
+ /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], MMProfileFlagStart, port, virt); */
+
+ spin_lock(&gM4u_reg_lock);
+ /* Direction, one bit for each port, 1:-, 0:+ */
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIR(port),
+ F_MMU_PFH_DIR(port, 1), F_MMU_PFH_DIR(port, dir));
+
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIST(port),
+ F_MMU_PFH_DIST_MASK(port), F_MMU_PFH_DIST_VAL(port, dis));
+
+ if (m4u_index == 0) {
+ int mmu_en = 0;
+
+ larb = m4u_port_2_larb_id(port);
+ larb_port = m4u_port_2_larb_port(port);
+ larb_base = gLarbBaseAddr[larb];
+ m4uHw_set_field_by_mask(larb_base, SMI_LARB_MMU_EN,
+ F_SMI_MMU_EN(larb_port, 1), F_SMI_MMU_EN(larb_port, !!(virt)));
+
+ m4uHw_set_field_by_mask(larb_base, SMI_LARB_SEC_EN,
+ F_SMI_SEC_EN(larb_port, 1), F_SMI_SEC_EN(larb_port, !!(sec)));
+
+ /* multimedia engines will should set domain as 3. */
+ /* m4uHw_set_field_by_mask(larb_base, REG_SMI_LARB_DOMN_OF_PORT(larb_port), */
+ /* F_SMI_DOMN(larb_port, 0x3), F_SMI_DOMN(larb_port, pM4uPort->domain)); */
+
+ /* debug use */
+ mmu_en = m4uHw_get_field_by_mask(larb_base, SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1));
+ if (!!(mmu_en) != virt)
+ M4ULOG_HIGH(
+ "m4u_config_port error, port=%s, Virtuality=%d, mmu_en=%x (%x, %x)\n",
+ m4u_get_port_name(port), virt, mmu_en,
+ M4U_ReadReg32(larb_base, SMI_LARB_MMU_EN),
+ F_SMI_MMU_EN(larb_port, 1));
+ } else {
+ larb_port = m4u_port_2_larb_port(port);
+
+ m4uHw_set_field_by_mask(gPericfgBaseAddr, REG_PERIAXI_BUS_CTL3,
+ F_PERI_MMU_EN(larb_port, 1), F_PERI_MMU_EN(larb_port, !!(virt)));
+ }
+
+ spin_unlock(&gM4u_reg_lock);
+
+ /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_CONFIG_PORT], MMProfileFlagEnd, dis, dir); */
+
+ return ret;
}
static inline void _m4u_port_clock_toggle(int m4u_index, int larb, int on)
{
- unsigned long long start, end;
-
- //MMProfileLogEx(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], MMProfileFlagStart, larb, on);
- if(m4u_index==0)
- {
- start = sched_clock();
- if(on)
- {
- smi_common_clock_on();
- larb_clock_on(larb);
- }
- else
- {
- larb_clock_off(larb);
- smi_common_clock_off();
- }
- end = sched_clock();
-
- if(end-start > 50000000ULL) //unit is ns
- {
- M4ULOG_HIGH("warn: larb%d clock %d time: %lld ns\n", larb, on, end-start);
- }
- }
- //MMProfileLogEx(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], MMProfileFlagEnd, 0, 0);
-}
-
-int m4u_config_port(M4U_PORT_STRUCT* pM4uPort) //native
-{
- M4U_PORT_ID PortID = (pM4uPort->ePortID);
- int m4u_index = m4u_port_2_m4u_id(PortID);
- int larb = m4u_port_2_larb_id(PortID);
- int ret;
-#ifdef M4U_TEE_SERVICE_ENABLE
- unsigned int larb_port, mmu_en = 0, sec_en = 0;
+ unsigned long long start, end;
+
+ /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], MMProfileFlagStart, larb, on); */
+ if (m4u_index == 0) {
+ start = sched_clock();
+ if (on) {
+ smi_common_clock_on();
+ larb_clock_on(larb);
+ } else {
+ larb_clock_off(larb);
+ smi_common_clock_off();
+ }
+ end = sched_clock();
+
+ if (end-start > 50000000ULL) /* unit is ns */
+ M4ULOG_HIGH("warn: larb%d clock %d time: %lld ns\n", larb, on, end-start);
+ }
+ /* MMProfileLogEx(M4U_MMP_Events[M4U_MMP_TOGGLE_CG], MMProfileFlagEnd, 0, 0); */
+}
+
+int m4u_config_port(M4U_PORT_STRUCT *pM4uPort) /* native */
+{
+ int m4u_index;
+ M4U_PORT_ID PortID;
+ int larb;
+ int ret;
+#ifdef M4U_TEE_SERVICE_ENABLE
+ unsigned int larb_port, mmu_en = 0, sec_en = 0;
#endif
-
- _m4u_port_clock_toggle(m4u_index, larb, 1);
-
+ if (pM4uPort->ePortID < 0 || pM4uPort->ePortID > M4U_PORT_UNKNOWN) {
+ M4UERR("port is unknown,error port is %d\n", pM4uPort->ePortID);
+ return -1;
+ }
+ PortID = (pM4uPort->ePortID);
+ m4u_index = m4u_port_2_m4u_id(PortID);
+ larb = m4u_port_2_larb_id(PortID);
+
+ _m4u_port_clock_toggle(m4u_index, larb, 1);
+
#ifdef M4U_TEE_SERVICE_ENABLE
- larb_port = m4u_port_2_larb_port(PortID);
-// mmu_en = !!(m4uHw_get_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1)));
-// sec_en = !!(m4uHw_get_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_SEC_EN, F_SMI_SEC_EN(larb_port, 1)));
- M4ULOG_HIGH("m4u_config_port: %s, m4u_tee_en:%d, mmu_en: %d -> %d, sec_en:%d -> %d\n", m4u_get_port_name(PortID), m4u_tee_en, mmu_en, pM4uPort->Virtuality, sec_en, pM4uPort->Security);
+ larb_port = m4u_port_2_larb_port(PortID);
+ /* mmu_en =
+ * !!(m4uHw_get_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1))); */
+ /* sec_en =
+ * !!(m4uHw_get_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_SEC_EN, F_SMI_SEC_EN(larb_port, 1))); */
+ M4ULOG_HIGH("m4u_config_port: %s, m4u_tee_en:%d, mmu_en: %d -> %d, sec_en:%d -> %d\n",
+ m4u_get_port_name(PortID), m4u_tee_en, mmu_en,
+ pM4uPort->Virtuality, sec_en, pM4uPort->Security);
#if 0
- if(mmu_en == pM4uPort->Virtuality && sec_en == pM4uPort->Security)
- {
- _m4u_port_clock_toggle(m4u_index, larb, 0);
- return 0;
- }
+ if (mmu_en == pM4uPort->Virtuality && sec_en == pM4uPort->Security) {
+ _m4u_port_clock_toggle(m4u_index, larb, 0);
+ return 0;
+ }
#endif
- if(m4u_tee_en)
- {
- m4u_config_port_tee(pM4uPort);
- }
- else
-#endif
- {
- ret = _m4u_config_port(PortID, pM4uPort->Virtuality,
- pM4uPort->Security, pM4uPort->Distance, pM4uPort->Direction);
- }
- _m4u_port_clock_toggle(m4u_index, larb, 0);
+ if (m4u_tee_en)
+ m4u_config_port_tee(pM4uPort);
+ else
+#endif
+ {
+ ret = _m4u_config_port(PortID, pM4uPort->Virtuality,
+ pM4uPort->Security, pM4uPort->Distance, pM4uPort->Direction);
+ }
+ _m4u_port_clock_toggle(m4u_index, larb, 0);
- return 0;
+ return 0;
}
-void m4u_port_array_init(struct m4u_port_array * port_array)
+void m4u_port_array_init(struct m4u_port_array *port_array)
{
- memset(port_array, 0, sizeof(struct m4u_port_array));
+ memset(port_array, 0, sizeof(struct m4u_port_array));
}
-int m4u_port_array_add(struct m4u_port_array *port_array,
+int m4u_port_array_add(struct m4u_port_array *port_array,
int port, int m4u_en, int secure)
{
- if(port>=M4U_PORT_NR)
- {
- M4UMSG("error: port_array_add, port=%d, v(%d), s(%d)\n", port, m4u_en, secure);
- return -1;
- }
- port_array->ports[port] = M4U_PORT_ATTR_EN;
- if(m4u_en)
- port_array->ports[port] |= M4U_PORT_ATTR_VIRTUAL;
- if(secure)
- port_array->ports[port] |= M4U_PORT_ATTR_SEC;
- return 0;
+ if (port >= M4U_PORT_NR) {
+ M4UMSG("error: port_array_add, port=%d, v(%d), s(%d)\n", port, m4u_en, secure);
+ return -1;
+ }
+ port_array->ports[port] = M4U_PORT_ATTR_EN;
+ if (m4u_en)
+ port_array->ports[port] |= M4U_PORT_ATTR_VIRTUAL;
+ if (secure)
+ port_array->ports[port] |= M4U_PORT_ATTR_SEC;
+ return 0;
}
int m4u_config_port_array(struct m4u_port_array *port_array)
{
- int port, larb, larb_port;
- int ret=0;
-
- unsigned int config_larb[SMI_LARB_NR] = {0};
- unsigned int regOri[SMI_LARB_NR] = {0} ;
- unsigned int regNew[SMI_LARB_NR] = {0} ;
- unsigned int change = 0;
- unsigned char m4u_port_array[(M4U_PORT_NR+1)/2] = {0};
-
- for(port=0; port<M4U_PORT_NR; port++)
- {
- if(port_array->ports[port]&&M4U_PORT_ATTR_EN != 0)
- {
- unsigned int value;
- larb = m4u_port_2_larb_id(port);
- larb_port = m4u_port_2_larb_port(port);
- config_larb[larb] |= (1 << larb_port);
- value = (!!(port_array->ports[port]&&M4U_PORT_ATTR_VIRTUAL))<<larb_port;
- regNew[larb] = (regNew[larb] & (~(1 << larb_port))) | value;
-
-#ifdef M4U_TEE_SERVICE_ENABLE
- {
- unsigned char attr = ((!!value)<<1)|0x1;
- if(port%2)
- m4u_port_array[port/2] |= (attr<<4);
- else
- m4u_port_array[port/2] |= attr;
- }
+ int port, larb, larb_port;
+ int ret = 0;
+
+ unsigned int config_larb[SMI_LARB_NR];
+ unsigned int regOri[SMI_LARB_NR];
+ unsigned int regNew[SMI_LARB_NR];
+ unsigned int change = 0;
+ unsigned char m4u_port_array[(M4U_PORT_NR+1)/2];
+
+ memset(config_larb, 0, SMI_LARB_NR * sizeof(unsigned int));
+ memset(regOri, 0, SMI_LARB_NR * sizeof(unsigned int));
+ memset(regNew, 0, SMI_LARB_NR * sizeof(unsigned int));
+ memset(m4u_port_array, 0, (M4U_PORT_NR+1)/2 * sizeof(unsigned char));
+
+ for (port = 0; port < M4U_PORT_NR; port++) {
+ if (port_array->ports[port] && M4U_PORT_ATTR_EN != 0) {
+ unsigned int value;
+
+ larb = m4u_port_2_larb_id(port);
+ larb_port = m4u_port_2_larb_port(port);
+ config_larb[larb] |= (1 << larb_port);
+ value = (!!(port_array->ports[port] && M4U_PORT_ATTR_VIRTUAL))<<larb_port;
+ regOri[larb] = M4U_ReadReg32(gLarbBaseAddr[larb], SMI_LARB_MMU_EN);
+ regNew[larb] = (regOri[larb] & (~(1 << larb_port)))
+ | (regNew[larb] & (~(1 << larb_port))) | value;
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ {
+ unsigned char attr = ((!!value)<<1)|0x1;
+
+ if (port%2)
+ m4u_port_array[port/2] |= (attr<<4);
+ else
+ m4u_port_array[port/2] |= attr;
+ }
#endif
- M4ULOG_LOW("m4u_config_port_array 0, 0x%x, 0x%x, 0x%x, port_array: 0x%x\n", port_array->ports[port], value, regNew[larb], m4u_port_array[port/2]);
- }
- }
-
-
-
- for(larb = 0; larb < SMI_LARB_NR; larb++)
- {
- if(0 != config_larb[larb])
- {
- _m4u_port_clock_toggle(0, larb, 1);
-#ifdef M4U_TEE_SERVICE_ENABLE
- if(m4u_tee_en)
- {
- change = 1;
- }
- else
+ M4ULOG_LOW("m4u_config_port_array 0, 0x%x, 0x%x, 0x%x, port_array: 0x%x\n",
+ port_array->ports[port], value, regNew[larb], m4u_port_array[port/2]);
+ }
+ }
+
+ for (larb = 0; larb < SMI_LARB_NR; larb++) {
+ if (0 != config_larb[larb]) {
+ _m4u_port_clock_toggle(0, larb, 1);
+#ifdef M4U_TEE_SERVICE_ENABLE
+ if (m4u_tee_en)
+ change = 1;
+ else
#endif
- {
- regOri[larb] = M4U_ReadReg32(gLarbBaseAddr[larb], SMI_LARB_MMU_EN);
- M4ULOG_LOW("m4u_config_port_array 2 larb: %d ori reg: 0x%x, new reg: 0x%x\n", larb, regOri[larb], regNew[larb]);
- if(regOri[larb] != regNew[larb])
- change = 1;
- }
- }
- }
- M4ULOG_HIGH("m4u_config_port_array 1: [0x%x, 0x%x, 0x%x, 0x%x, 0x%x] %d\n", config_larb[0], config_larb[1], config_larb[2], config_larb[3], config_larb[4], change);
-
-#ifdef M4U_TEE_SERVICE_ENABLE
- if(m4u_tee_en && 1 == change)
- {
- m4u_config_port_array_tee(m4u_port_array);
- for(larb = 0; larb < SMI_LARB_NR; larb++)
- {
- if(0 != config_larb[larb])
- _m4u_port_clock_toggle(0, larb, 0);
- }
- return ret;
- }
+ {
+ regOri[larb] = M4U_ReadReg32(gLarbBaseAddr[larb], SMI_LARB_MMU_EN);
+ M4ULOG_LOW("m4u_config_port_array 2 larb: %d ori reg: 0x%x, new reg: 0x%x\n",
+ larb, regOri[larb], regNew[larb]);
+ if (regOri[larb] != regNew[larb])
+ change = 1;
+ }
+ }
+ M4ULOG_MID("m4u_config_port_array 1: larb: %d, [0x%x], %d\n", larb, config_larb[larb], change);
+ }
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ if (m4u_tee_en && 1 == change) {
+ m4u_config_port_array_tee(m4u_port_array);
+ for (larb = 0; larb < SMI_LARB_NR; larb++)
+ if (0 != config_larb[larb])
+ _m4u_port_clock_toggle(0, larb, 0);
+ return ret;
+ }
#endif
- for(larb = 0; larb < SMI_LARB_NR; larb++)
- {
-
- if(0 != config_larb[larb] && 1 == change )
- {
- M4ULOG_MID("m4u_config_port_array larb: %d ori reg: 0x%x, new reg: 0x%x\n", larb, regOri[larb], regNew[larb]);
- spin_lock(&gM4u_reg_lock);
- m4uHw_set_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_MMU_EN, config_larb[larb], regNew[larb]);
- spin_unlock(&gM4u_reg_lock);
- }
- if(0 != config_larb[larb])
- _m4u_port_clock_toggle(0, larb, 0);
- }
+ for (larb = 0; larb < SMI_LARB_NR; larb++) {
+ if (0 != config_larb[larb] && 1 == change) {
+ M4ULOG_MID("m4u_config_port_array larb: %d ori reg: 0x%x, new reg: 0x%x\n",
+ larb, regOri[larb], regNew[larb]);
+ spin_lock(&gM4u_reg_lock);
+ m4uHw_set_field_by_mask(gLarbBaseAddr[larb], SMI_LARB_MMU_EN, config_larb[larb], regNew[larb]);
+ spin_unlock(&gM4u_reg_lock);
+ }
+ if (0 != config_larb[larb])
+ _m4u_port_clock_toggle(0, larb, 0);
+ }
- return ret;
+ return ret;
}
-
-
void m4u_get_perf_counter(int m4u_index, int m4u_slave_id, M4U_PERF_COUNT *pM4U_perf_count)
{
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
- pM4U_perf_count->transaction_cnt= M4U_ReadReg32(m4u_base, REG_MMU_ACC_CNT(m4u_slave_id)); ///> Transaction access count
- pM4U_perf_count->main_tlb_miss_cnt= M4U_ReadReg32(m4u_base, REG_MMU_MAIN_MSCNT(m4u_slave_id)); ///> Main TLB miss count
- pM4U_perf_count->pfh_tlb_miss_cnt= M4U_ReadReg32(m4u_base, REG_MMU_PF_MSCNT); ///> Prefetch TLB miss count
- pM4U_perf_count->pfh_cnt = M4U_ReadReg32(m4u_base, REG_MMU_PF_CNT); ///> Prefetch count
- pM4U_perf_count->rs_perf_cnt = M4U_ReadReg32(m4u_base, REG_MMU_RS_PERF_CNT(m4u_slave_id));
-}
+ unsigned long m4u_base = gM4UBaseAddr[m4u_index];
+ pM4U_perf_count->transaction_cnt = M4U_ReadReg32(m4u_base, REG_MMU_ACC_CNT(m4u_slave_id));
+ pM4U_perf_count->main_tlb_miss_cnt = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_MSCNT(m4u_slave_id));
+ pM4U_perf_count->pfh_tlb_miss_cnt = M4U_ReadReg32(m4u_base, REG_MMU_PF_MSCNT);
+ pM4U_perf_count->pfh_cnt = M4U_ReadReg32(m4u_base, REG_MMU_PF_CNT); /* /> Prefetch count */
+ pM4U_perf_count->rs_perf_cnt = M4U_ReadReg32(m4u_base, REG_MMU_RS_PERF_CNT(m4u_slave_id));
+}
int m4u_monitor_start(int m4u_id)
{
- unsigned long m4u_base = gM4UBaseAddr[m4u_id];
- M4UINFO("====m4u_monitor_start: %d======\n", m4u_id);
- //clear GMC performance counter
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
- F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(1));
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
- F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(0));
-
- //enable GMC performance monitor
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
- F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(1));
- return 0;
+ unsigned long m4u_base;
+
+ if (m4u_id < 0) {
+ M4UERR("ERROR m4u id ,error id is %d\n", m4u_id);
+ return -1;
+ }
+ m4u_base = gM4UBaseAddr[m4u_id];
+
+ M4UINFO("====m4u_monitor_start: %d======\n", m4u_id);
+ /* clear GMC performance counter */
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
+ F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(1));
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
+ F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(0));
+
+ /* enable GMC performance monitor */
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
+ F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(1));
+ return 0;
}
/**
- * @brief ,
- * @param
- * @return
+ * @brief ,
+ * @param
+ * @return
*/
int m4u_monitor_stop(int m4u_id)
{
- M4U_PERF_COUNT cnt;
- int m4u_index = m4u_id;
- unsigned long m4u_base = gM4UBaseAddr[m4u_index];
-
- //disable GMC performance monitor
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
- F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(0));
-
- m4u_get_perf_counter(m4u_index, 0, &cnt);
- //read register get the count
- M4ULOG_MID("[M4U%d-%d] total:%u, main miss:%u, pfh miss(walk):%u, auto pfh:%u\n",
- m4u_id, 0,
- cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt,cnt.pfh_cnt);
-
- return 0;
-}
+ M4U_PERF_COUNT cnt;
+ int m4u_index = m4u_id;
+ unsigned long m4u_base;
+ if (m4u_id < 0) {
+ M4UERR("ERROR m4u id ,error id is %d\n", m4u_id);
+ return -1;
+ }
+ m4u_base = gM4UBaseAddr[m4u_id];
+
+ /* disable GMC performance monitor */
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG,
+ F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(0));
+
+ m4u_get_perf_counter(m4u_index, 0, &cnt);
+ /* read register get the count */
+ M4ULOG_MID("[M4U%d-%d] total:%u, main miss:%u, pfh miss(walk):%u, auto pfh:%u\n",
+ m4u_id, 0,
+ cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt, cnt.pfh_cnt);
+
+ return 0;
+}
void m4u_print_perf_counter(int m4u_index, int m4u_slave_id, const char *msg)
{
- M4U_PERF_COUNT cnt;
- M4UINFO("====m4u performance count for %s m4u%d_%d======\n", msg, m4u_index, m4u_slave_id);
- m4u_get_perf_counter(m4u_index, m4u_slave_id, &cnt);
- M4UINFO("total trans=%u, main_miss=%u, pfh_miss=%u, pfh_cnt=%u, rs_perf_cnt=%u\n",
- cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt, cnt.pfh_cnt, cnt.rs_perf_cnt);
-}
+ M4U_PERF_COUNT cnt;
+ M4UINFO("====m4u performance count for %s m4u%d_%d======\n", msg, m4u_index, m4u_slave_id);
+ m4u_get_perf_counter(m4u_index, m4u_slave_id, &cnt);
+ M4UINFO("total trans=%u, main_miss=%u, pfh_miss=%u, pfh_cnt=%u, rs_perf_cnt=%u\n",
+ cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt, cnt.pfh_cnt, cnt.rs_perf_cnt);
+}
#define M4U_REG_BACKUP_SIZE (100*sizeof(unsigned int))
-static unsigned int* pM4URegBackUp = 0;
-static unsigned int gM4u_reg_backup_real_size = 0;
+static unsigned int *pM4URegBackUp;
+static unsigned int gM4u_reg_backup_real_size;
+
+#define __M4U_BACKUP(base, reg, back) ((back) = M4U_ReadReg32(base, reg))
-#define __M4U_BACKUP(base, reg, back) do{(back)=M4U_ReadReg32(base, reg);}while(0)
-void __M4U_RESTORE(unsigned long base, unsigned int reg, unsigned int back) {M4U_WriteReg32(base, reg, back);}
+void __M4U_RESTORE(unsigned long base, unsigned int reg, unsigned int back) {M4U_WriteReg32(base, reg, back); }
int m4u_reg_backup(void)
{
- unsigned int* pReg = pM4URegBackUp;
- unsigned long m4u_base;
- int m4u_id, m4u_slave;
- int seq, mau;
- unsigned int real_size;
- int pfh_dist, pfh_dir;
-
- for(m4u_id=0; m4u_id<TOTAL_M4U_NUM; m4u_id++)
- {
- m4u_base = gM4UBaseAddr[m4u_id];
- __M4U_BACKUP(m4u_base, REG_MMUg_PT_BASE , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMUg_PT_BASE_SEC , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_SEC_ABORT_INFO , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_STANDARD_AXI_MODE , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_PRIORITY , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_DCM_DIS , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_WR_LEN , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_HW_DEBUG , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_NON_BLOCKING_DIS , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_LEGACY_4KB_MODE , *(pReg++) );
- for (pfh_dist = 0; pfh_dist < MMU_PFH_DIST_NR; pfh_dist++) {
- __M4U_BACKUP(m4u_base, REG_MMU_PFH_DIST_NR(pfh_dist) , *(pReg++) );
- }
- for (pfh_dir = 0; pfh_dir < MMU_PFH_DIR_NR; pfh_dir++) {
- __M4U_BACKUP(m4u_base, REG_MMU_PFH_DIR_NR(pfh_dir) , *(pReg++) );
- }
- __M4U_BACKUP(m4u_base, REG_MMU_CTRL_REG , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_INT_L2_CONTROL , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_INT_MAIN_CONTROL , *(pReg++) );
-
- for(m4u_slave=0; m4u_slave < M4U_SLAVE_NUM(m4u_id); m4u_slave++)
- {
- for(seq=0; seq<M4U_SEQ_NUM(m4u_id); seq++)
- {
- __M4U_BACKUP(m4u_base, REG_MMU_SQ_START(m4u_slave, seq) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_SQ_END(m4u_slave, seq) , *(pReg++) );
- }
-
- for(mau=0; mau<MAU_NR_PER_M4U_SLAVE; mau++)
- {
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_START(m4u_slave,mau) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave,mau) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_END(m4u_slave,mau) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave,mau) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave,mau) , *(pReg++) );
- }
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_IO(m4u_slave) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_RW(m4u_slave) , *(pReg++) );
- __M4U_BACKUP(m4u_base, REG_MMU_MAU_VA(m4u_slave) , *(pReg++) );
- }
- }
-
- //check register size (to prevent overflow)
- real_size = (pReg - pM4URegBackUp);
- if(real_size > M4U_REG_BACKUP_SIZE)
- {
- m4u_aee_print("m4u_reg overflow! %d>%d\n", real_size, (int)M4U_REG_BACKUP_SIZE);
- }
- gM4u_reg_backup_real_size = real_size;
+ unsigned int *pReg = pM4URegBackUp;
+ unsigned long m4u_base;
+ int m4u_id, m4u_slave;
+ int seq, mau;
+ unsigned int real_size;
+ int pfh_dist, pfh_dir;
+
+ for (m4u_id = 0; m4u_id < TOTAL_M4U_NUM; m4u_id++) {
+ m4u_base = gM4UBaseAddr[m4u_id];
+ __M4U_BACKUP(m4u_base, REG_MMUg_PT_BASE , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMUg_PT_BASE_SEC , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_SEC_ABORT_INFO , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_STANDARD_AXI_MODE , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_PRIORITY , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_DCM_DIS , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_WR_LEN , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_HW_DEBUG , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_NON_BLOCKING_DIS , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_LEGACY_4KB_MODE , *(pReg++));
+ for (pfh_dist = 0; pfh_dist < MMU_PFH_DIST_NR; pfh_dist++)
+ __M4U_BACKUP(m4u_base, REG_MMU_PFH_DIST_NR(pfh_dist) , *(pReg++));
+ for (pfh_dir = 0; pfh_dir < MMU_PFH_DIR_NR; pfh_dir++)
+ __M4U_BACKUP(m4u_base, REG_MMU_PFH_DIR_NR(pfh_dir) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_CTRL_REG , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_INT_L2_CONTROL , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_INT_MAIN_CONTROL , *(pReg++));
+
+ for (m4u_slave = 0; m4u_slave < M4U_SLAVE_NUM(m4u_id); m4u_slave++) {
+ for (seq = 0; seq < M4U_SEQ_NUM(m4u_id); seq++) {
+ __M4U_BACKUP(m4u_base, REG_MMU_SQ_START(m4u_slave, seq) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_SQ_END(m4u_slave, seq) , *(pReg++));
+ }
+
+ for (mau = 0; mau < MAU_NR_PER_M4U_SLAVE; mau++) {
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_START(m4u_slave, mau) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave, mau) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_END(m4u_slave, mau) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave, mau) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave, mau) , *(pReg++));
+ }
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_IO(m4u_slave) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_RW(m4u_slave) , *(pReg++));
+ __M4U_BACKUP(m4u_base, REG_MMU_MAU_VA(m4u_slave) , *(pReg++));
+ }
+ }
- return 0;
+ /* check register size (to prevent overflow) */
+ real_size = (pReg - pM4URegBackUp);
+ if (real_size > M4U_REG_BACKUP_SIZE)
+ m4u_aee_print("m4u_reg overflow! %d>%d\n", real_size, (int)M4U_REG_BACKUP_SIZE);
+ gM4u_reg_backup_real_size = real_size;
+
+ return 0;
}
int m4u_reg_restore(void)
{
- unsigned int* pReg = pM4URegBackUp;
- unsigned long m4u_base;
- int m4u_id, m4u_slave;
- int seq, mau;
- unsigned int real_size;
- int pfh_dist, pfh_dir;
-
- for(m4u_id=0; m4u_id<TOTAL_M4U_NUM; m4u_id++)
- {
- m4u_base = gM4UBaseAddr[m4u_id];
- __M4U_RESTORE(m4u_base, REG_MMUg_PT_BASE , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMUg_PT_BASE_SEC , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_SEC_ABORT_INFO , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_STANDARD_AXI_MODE , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_PRIORITY , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_DCM_DIS , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_WR_LEN , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_HW_DEBUG , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_NON_BLOCKING_DIS , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_LEGACY_4KB_MODE , *(pReg++) );
- for (pfh_dist = 0; pfh_dist < MMU_PFH_DIST_NR; pfh_dist++) {
- __M4U_RESTORE(m4u_base, REG_MMU_PFH_DIST_NR(pfh_dist) , *(pReg++) );
- }
- for (pfh_dir = 0; pfh_dir < MMU_PFH_DIR_NR; pfh_dir++) {
- __M4U_RESTORE(m4u_base, REG_MMU_PFH_DIR_NR(pfh_dir) , *(pReg++) );
- }
- __M4U_RESTORE(m4u_base, REG_MMU_CTRL_REG , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_INT_L2_CONTROL , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_INT_MAIN_CONTROL , *(pReg++) );
-
- for(m4u_slave=0; m4u_slave < M4U_SLAVE_NUM(m4u_id); m4u_slave++)
- {
-
- for(seq=0; seq<M4U_SEQ_NUM(m4u_id); seq++)
- {
- __M4U_RESTORE(m4u_base, REG_MMU_SQ_START(m4u_slave, seq) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_SQ_END(m4u_slave, seq) , *(pReg++) );
- }
-
- for(mau=0; mau<MAU_NR_PER_M4U_SLAVE; mau++)
- {
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_START(m4u_slave,mau) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave,mau) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_END(m4u_slave,mau) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave,mau) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave,mau) , *(pReg++) );
- }
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_IO(m4u_slave) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_RW(m4u_slave) , *(pReg++) );
- __M4U_RESTORE(m4u_base, REG_MMU_MAU_VA(m4u_slave) , *(pReg++) );
- }
-
- }
-
- //check register size (to prevent overflow)
- real_size = (pReg - pM4URegBackUp);
- if(real_size != gM4u_reg_backup_real_size)
- {
- m4u_aee_print("m4u_reg_retore %d!=%d\n", real_size, gM4u_reg_backup_real_size);
- }
+ unsigned int *pReg = pM4URegBackUp;
+ unsigned long m4u_base;
+ int m4u_id, m4u_slave;
+ int seq, mau;
+ unsigned int real_size;
+ int pfh_dist, pfh_dir;
+
+ for (m4u_id = 0; m4u_id < TOTAL_M4U_NUM; m4u_id++) {
+ m4u_base = gM4UBaseAddr[m4u_id];
+ __M4U_RESTORE(m4u_base, REG_MMUg_PT_BASE , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMUg_PT_BASE_SEC , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_SEC_ABORT_INFO , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_STANDARD_AXI_MODE , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_PRIORITY , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_DCM_DIS , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_WR_LEN , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_HW_DEBUG , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_NON_BLOCKING_DIS , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_LEGACY_4KB_MODE , *(pReg++));
+ for (pfh_dist = 0; pfh_dist < MMU_PFH_DIST_NR; pfh_dist++)
+ __M4U_RESTORE(m4u_base, REG_MMU_PFH_DIST_NR(pfh_dist) , *(pReg++));
+ for (pfh_dir = 0; pfh_dir < MMU_PFH_DIR_NR; pfh_dir++)
+ __M4U_RESTORE(m4u_base, REG_MMU_PFH_DIR_NR(pfh_dir) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_CTRL_REG , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_INT_L2_CONTROL , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_INT_MAIN_CONTROL , *(pReg++));
+
+ for (m4u_slave = 0; m4u_slave < M4U_SLAVE_NUM(m4u_id); m4u_slave++) {
+ for (seq = 0; seq < M4U_SEQ_NUM(m4u_id); seq++) {
+ __M4U_RESTORE(m4u_base, REG_MMU_SQ_START(m4u_slave, seq) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_SQ_END(m4u_slave, seq) , *(pReg++));
+ }
+
+ for (mau = 0; mau < MAU_NR_PER_M4U_SLAVE; mau++) {
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_START(m4u_slave, mau) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_START_BIT32(m4u_slave, mau) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_END(m4u_slave, mau) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_END_BIT32(m4u_slave, mau) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_PORT_EN(m4u_slave, mau) , *(pReg++));
+ }
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_LARB_EN(m4u_slave) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_IO(m4u_slave) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_RW(m4u_slave) , *(pReg++));
+ __M4U_RESTORE(m4u_base, REG_MMU_MAU_VA(m4u_slave) , *(pReg++));
+ }
+ }
- return 0;
+ /* check register size (to prevent overflow) */
+ real_size = (pReg - pM4URegBackUp);
+ if (real_size != gM4u_reg_backup_real_size)
+ m4u_aee_print("m4u_reg_retore %d!=%d\n", real_size, gM4u_reg_backup_real_size);
+
+ return 0;
}
static unsigned int larb_reg_backup_buf[SMI_LARB_NR][6];
-static void larb_backup(struct larb_monitor *h, int larb_idx)
+void m4u_larb_backup(int larb_idx)
{
- unsigned long larb_base;
-
- if(larb_idx >= SMI_LARB_NR)
- {
- M4UMSG("error: %s larb_idx = %d\n", __FUNCTION__, larb_idx);
- return;
- }
+ unsigned long larb_base;
- larb_base = gLarbBaseAddr[larb_idx];
- M4UINFO("larb(%d) backup\n", larb_idx);
+ if (larb_idx >= SMI_LARB_NR) {
+ M4UMSG("error: %s larb_idx = %d\n", __func__, larb_idx);
+ return;
+ }
-#ifdef M4U_TEE_SERVICE_ENABLE
- if(m4u_tee_en)
- {
-// m4u_larb_backup_sec(larb_idx);
- }
-#endif
- {
- __M4U_BACKUP(larb_base, SMI_LARB_MMU_EN, larb_reg_backup_buf[larb_idx][0]);
- __M4U_BACKUP(larb_base, SMI_LARB_SEC_EN, larb_reg_backup_buf[larb_idx][1]);
- __M4U_BACKUP(larb_base, SMI_LARB_DOMN_0, larb_reg_backup_buf[larb_idx][2]);
- __M4U_BACKUP(larb_base, SMI_LARB_DOMN_1, larb_reg_backup_buf[larb_idx][3]);
- __M4U_BACKUP(larb_base, SMI_LARB_DOMN_2, larb_reg_backup_buf[larb_idx][4]);
- __M4U_BACKUP(larb_base, SMI_LARB_DOMN_3, larb_reg_backup_buf[larb_idx][5]);
- }
- return;
-}
-
-static void larb_restore(struct larb_monitor *h, int larb_idx)
-{
- unsigned long larb_base;
-
- if(larb_idx >= SMI_LARB_NR)
- {
- M4UMSG("error: %s larb_idx = %d\n", __FUNCTION__, larb_idx);
- return;
- }
-
- larb_base = gLarbBaseAddr[larb_idx];
- M4UINFO("larb(%d) restore\n", larb_idx);
+ larb_base = gLarbBaseAddr[larb_idx];
+ M4ULOG_MID("larb(%d) backup\n", larb_idx);
#ifdef M4U_TEE_SERVICE_ENABLE
- if(m4u_tee_en)
- {
-// m4u_larb_restore_sec(larb_idx);
- }
- else
-#endif
- {
- __M4U_RESTORE(larb_base, SMI_LARB_MMU_EN, larb_reg_backup_buf[larb_idx][0]);
- __M4U_RESTORE(larb_base, SMI_LARB_SEC_EN, larb_reg_backup_buf[larb_idx][1]);
- __M4U_RESTORE(larb_base, SMI_LARB_DOMN_0, larb_reg_backup_buf[larb_idx][2]);
- __M4U_RESTORE(larb_base, SMI_LARB_DOMN_1, larb_reg_backup_buf[larb_idx][3]);
- __M4U_RESTORE(larb_base, SMI_LARB_DOMN_2, larb_reg_backup_buf[larb_idx][4]);
- __M4U_RESTORE(larb_base, SMI_LARB_DOMN_3, larb_reg_backup_buf[larb_idx][5]);
- }
- return;
-}
-
-struct larb_monitor m4u_larb_monitor_handler =
-{
- .level = LARB_MONITOR_LEVEL_HIGH,
- .backup = larb_backup,
- .restore = larb_restore
-};
+ if (m4u_tee_en)
+ /* m4u_larb_backup_sec(larb_idx); */
+#endif
+ {
+ __M4U_BACKUP(larb_base, SMI_LARB_MMU_EN, larb_reg_backup_buf[larb_idx][0]);
+ __M4U_BACKUP(larb_base, SMI_LARB_SEC_EN, larb_reg_backup_buf[larb_idx][1]);
+ __M4U_BACKUP(larb_base, SMI_LARB_DOMN_0, larb_reg_backup_buf[larb_idx][2]);
+ __M4U_BACKUP(larb_base, SMI_LARB_DOMN_1, larb_reg_backup_buf[larb_idx][3]);
+ __M4U_BACKUP(larb_base, SMI_LARB_DOMN_2, larb_reg_backup_buf[larb_idx][4]);
+ __M4U_BACKUP(larb_base, SMI_LARB_DOMN_3, larb_reg_backup_buf[larb_idx][5]);
+ }
+}
-static void m4u_register_larb_monitor(void)
+void m4u_larb_restore(int larb_idx)
{
- int i;
- for( i=0 ; i < SMI_LARB_NR ; i++)
- {
- larb_clock_on(i);
- }
- register_larb_monitor(&m4u_larb_monitor_handler);
- for( i=0 ; i < SMI_LARB_NR ; i++)
- {
- larb_clock_off(i);
- }
-}
+ unsigned long larb_base;
+ if (larb_idx >= SMI_LARB_NR) {
+ M4UMSG("error: %s larb_idx = %d\n", __func__, larb_idx);
+ return;
+ }
+ larb_base = gLarbBaseAddr[larb_idx];
+ M4ULOG_MID("larb(%d) restore\n", larb_idx);
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+ if (m4u_tee_en) {
+ /* m4u_larb_restore_sec(larb_idx); */
+ } else
+#endif
+ {
+ __M4U_RESTORE(larb_base, SMI_LARB_MMU_EN, larb_reg_backup_buf[larb_idx][0]);
+ __M4U_RESTORE(larb_base, SMI_LARB_SEC_EN, larb_reg_backup_buf[larb_idx][1]);
+ __M4U_RESTORE(larb_base, SMI_LARB_DOMN_0, larb_reg_backup_buf[larb_idx][2]);
+ __M4U_RESTORE(larb_base, SMI_LARB_DOMN_1, larb_reg_backup_buf[larb_idx][3]);
+ __M4U_RESTORE(larb_base, SMI_LARB_DOMN_2, larb_reg_backup_buf[larb_idx][4]);
+ __M4U_RESTORE(larb_base, SMI_LARB_DOMN_3, larb_reg_backup_buf[larb_idx][5]);
+ }
+}
void m4u_print_port_status(struct seq_file *seq, int only_print_active)
{
- int port, mmu_en, sec;
- int m4u_index, larb, larb_port;
- unsigned long larb_base;
+ int port, mmu_en, sec;
+ int m4u_index, larb, larb_port;
+ unsigned long larb_base;
- M4U_PRINT_LOG_OR_SEQ(seq, "m4u_print_port_status ========>\n");
-
- smi_common_clock_on();
- larb_clock_all_on();
+ M4U_PRINT_LOG_OR_SEQ(seq, "m4u_print_port_status ========>\n");
- for(port=0; port<gM4u_port_num; port++)
- {
- m4u_index = m4u_port_2_m4u_id(port);
- if(m4u_index==0)
- {
- larb = m4u_port_2_larb_id(port);
- larb_port = m4u_port_2_larb_port(port);
- larb_base = gLarbBaseAddr[larb];
-
- mmu_en = m4uHw_get_field_by_mask(larb_base, SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1));
- sec = m4uHw_get_field_by_mask(larb_base, SMI_LARB_SEC_EN, F_SMI_SEC_EN(larb_port, 1));
+ smi_common_clock_on();
+ larb_clock_all_on();
- }
- else
- {
- larb_port = m4u_port_2_larb_port(port);
+ for (port = 0; port < gM4u_port_num; port++) {
+ m4u_index = m4u_port_2_m4u_id(port);
+ if (m4u_index == 0) {
+ larb = m4u_port_2_larb_id(port);
+ larb_port = m4u_port_2_larb_port(port);
+ larb_base = gLarbBaseAddr[larb];
- mmu_en = m4uHw_get_field_by_mask(gPericfgBaseAddr, REG_PERIAXI_BUS_CTL3,F_PERI_MMU_EN(larb_port, 1));
- }
+ mmu_en = m4uHw_get_field_by_mask(larb_base, SMI_LARB_MMU_EN, F_SMI_MMU_EN(larb_port, 1));
+ sec = m4uHw_get_field_by_mask(larb_base, SMI_LARB_SEC_EN, F_SMI_SEC_EN(larb_port, 1));
+ } else {
+ larb_port = m4u_port_2_larb_port(port);
+ mmu_en = m4uHw_get_field_by_mask(gPericfgBaseAddr,
+ REG_PERIAXI_BUS_CTL3, F_PERI_MMU_EN(larb_port, 1));
+ }
- if(only_print_active && !mmu_en)
- {
- continue;
- }
- M4U_PRINT_LOG_OR_SEQ(seq, "%s(%d),", m4u_get_port_name(port), !!mmu_en);
- }
+ if (only_print_active && !mmu_en)
+ continue;
- larb_clock_all_off();
- smi_common_clock_off();
+ M4U_PRINT_LOG_OR_SEQ(seq, "%s(%d),", m4u_get_port_name(port), !!mmu_en);
+ }
- M4U_PRINT_LOG_OR_SEQ(seq, "\n");
+ larb_clock_all_off();
+ smi_common_clock_off();
+ M4U_PRINT_LOG_OR_SEQ(seq, "\n");
}
/*
@@ -1848,70 +1728,70 @@ static int m4u_disable_error_hang(int m4u_id)
}
*/
-int m4u_register_reclaim_callback(int port, m4u_reclaim_mva_callback_t *fn, void* data)
+int m4u_register_reclaim_callback(int port, m4u_reclaim_mva_callback_t *fn, void *data)
{
- if(port > M4U_PORT_UNKNOWN)
- {
- M4UMSG("%s fail, port=%d\n", __FUNCTION__, port);
- return -1;
- }
- gM4uPort[port].reclaim_fn= fn;
- gM4uPort[port].reclaim_data= data;
- return 0;
+ if (port > M4U_PORT_UNKNOWN) {
+ M4UMSG("%s fail, port=%d\n", __func__, port);
+ return -1;
+ }
+ gM4uPort[port].reclaim_fn = fn;
+ gM4uPort[port].reclaim_data = data;
+ return 0;
}
int m4u_unregister_reclaim_callback(int port)
{
- if(port > M4U_PORT_UNKNOWN)
- {
- M4UMSG("%s fail, port=%d\n", __FUNCTION__, port);
- return -1;
- }
- gM4uPort[port].reclaim_fn= NULL;
- gM4uPort[port].reclaim_data= NULL;
- return 0;
+ if (port > M4U_PORT_UNKNOWN) {
+ M4UMSG("%s fail, port=%d\n", __func__, port);
+ return -1;
+ }
+ gM4uPort[port].reclaim_fn = NULL;
+ gM4uPort[port].reclaim_data = NULL;
+ return 0;
}
int m4u_reclaim_notify(int port, unsigned int mva, unsigned int size)
{
- int i;
- for(i=0; i<M4U_PORT_UNKNOWN; i++)
- {
- if(gM4uPort[i].reclaim_fn)
- gM4uPort[i].reclaim_fn(port, mva, size, gM4uPort[i].reclaim_data);
- }
- return 0;
+ int i;
+
+ for (i = 0; i < M4U_PORT_UNKNOWN; i++)
+ if (gM4uPort[i].reclaim_fn)
+ gM4uPort[i].reclaim_fn(port, mva, size, gM4uPort[i].reclaim_data);
+ return 0;
}
-int m4u_register_fault_callback(int port, m4u_fault_callback_t *fn, void* data)
+int m4u_register_fault_callback(int port, m4u_fault_callback_t *fn, void *data)
{
- if(port > M4U_PORT_UNKNOWN)
- {
- M4UMSG("%s fail, port=%d\n", __FUNCTION__, port);
- return -1;
- }
- gM4uPort[port].fault_fn= fn;
- gM4uPort[port].fault_data= data;
- return 0;
+ if (port > M4U_PORT_UNKNOWN) {
+ M4UMSG("%s fail, port=%d\n", __func__, port);
+ return -1;
+ }
+ gM4uPort[port].fault_fn = fn;
+ gM4uPort[port].fault_data = data;
+ return 0;
}
+
int m4u_unregister_fault_callback(int port)
{
- if(port > M4U_PORT_UNKNOWN)
- {
- M4UMSG("%s fail, port=%d\n", __FUNCTION__, port);
- return -1;
- }
- gM4uPort[port].fault_fn= NULL;
- gM4uPort[port].fault_data= NULL;
- return 0;
+ if (port > M4U_PORT_UNKNOWN) {
+ M4UMSG("%s fail, port=%d\n", __func__, port);
+ return -1;
+ }
+ gM4uPort[port].fault_fn = NULL;
+ gM4uPort[port].fault_data = NULL;
+ return 0;
}
int m4u_enable_tf(int port, bool fgenable)
{
- gM4uPort[port].enable_tf = fgenable;
- return 0;
+ if (port >= 0 && port < gM4u_port_num)
+ gM4uPort[port].enable_tf = fgenable;
+ else
+ M4UMSG("%s, error: port=%d\n", __func__, port);
+
+ return 0;
}
-//==============================================================================
+/* ============================================================================== */
static struct timer_list m4u_isr_pause_timer;
static void m4u_isr_restart(unsigned long unused)
@@ -1929,8 +1809,8 @@ static int m4u_isr_pause_timer_init(void)
static int m4u_isr_pause(int delay)
{
- m4u_intr_modify_all(0); //disable all intr
- m4u_isr_pause_timer.expires = jiffies + delay*HZ; //delay seconds
+ m4u_intr_modify_all(0); /* disable all intr */
+ m4u_isr_pause_timer.expires = jiffies + delay*HZ; /* delay seconds */
add_timer(&m4u_isr_pause_timer);
M4UMSG("warning: stop m4u irq for %ds\n", delay);
return 0;
@@ -1938,519 +1818,495 @@ static int m4u_isr_pause(int delay)
static void m4u_isr_record(void)
{
- static int m4u_isr_cnt=0;
- static unsigned long first_jiffies=0;
-
- //we allow one irq in 1s, or we will disable them after 5s.
- if(!m4u_isr_cnt || time_after(jiffies, first_jiffies + m4u_isr_cnt*HZ))
- {
- m4u_isr_cnt = 1;
- first_jiffies = jiffies;
- }
- else
- {
- m4u_isr_cnt++;
- if(m4u_isr_cnt >= 5)
- {
- //5 irqs come in 5s, too many !
- //disable irq for a while, to avoid HWT timeout
- m4u_isr_pause(10);
- m4u_isr_cnt=0;
- }
- }
-}
-
-#define MMU_INT_REPORT(mmu,mmu_2nd_id,id) M4UMSG("iommu%d_%d " #id "(0x%x) int happens!!\n", mmu,mmu_2nd_id, id)
+ static int m4u_isr_cnt;
+ static unsigned long first_jiffies;
+
+ /* we allow one irq in 1s, or we will disable them after 5s. */
+ if (!m4u_isr_cnt || time_after(jiffies, first_jiffies + m4u_isr_cnt*HZ)) {
+ m4u_isr_cnt = 1;
+ first_jiffies = jiffies;
+ } else {
+ m4u_isr_cnt++;
+ if (m4u_isr_cnt >= 5) {
+ /* 5 irqs come in 5s, too many ! */
+ /* disable irq for a while, to avoid HWT timeout */
+ m4u_isr_pause(10);
+ m4u_isr_cnt = 0;
+ }
+ }
+}
+
+#define MMU_INT_REPORT(mmu, mmu_2nd_id, id) M4UMSG("iommu%d_%d " #id "(0x%x) int happens!!\n", mmu, mmu_2nd_id, id)
irqreturn_t MTK_M4U_isr(int irq, void *dev_id)
{
- unsigned long m4u_base;
- unsigned int m4u_index;
-
- if(irq == gM4uDev->irq_num[0])
- {
- m4u_base = gM4UBaseAddr[0];
- m4u_index = 0;
- }
- else if(irq == gM4uDev->irq_num[1])
- {
- m4u_base = gM4UBaseAddr[1];
- m4u_index = 1;
- }
- else
- {
- M4UMSG("MTK_M4U_isr(), Invalid irq number %d\n", irq);
- return -1;
- }
-
- {
- //L2 interrupt
- unsigned int regval = M4U_ReadReg32(m4u_base, REG_MMU_L2_FAULT_ST);
- M4UMSG("m4u L2 interrupt sta=0x%x\n", regval);
-
- if(regval&F_INT_L2_MULTI_HIT_FAULT)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MULTI_HIT_FAULT);
- }
- if(regval&F_INT_L2_TABLE_WALK_FAULT)
- {
- unsigned int fault_va, layer;
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_TABLE_WALK_FAULT);
- fault_va = M4U_ReadReg32(m4u_base, REG_MMU_TBWALK_FAULT_VA);
- layer = fault_va&1;
- fault_va &= (~1);
- m4u_aee_print("L2 table walk fault: mva=0x%x, layer=%d\n", fault_va, layer);
-
- }
- if(regval&F_INT_L2_PFH_DMA_FIFO_OVERFLOW)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_DMA_FIFO_OVERFLOW);
- }
- if(regval&F_INT_L2_MISS_DMA_FIFO_OVERFLOW)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_DMA_FIFO_OVERFLOW);
- }
- if(regval&F_INT_L2_INVALD_DONE)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_INVALD_DONE);
- }
- if(regval&F_INT_L2_PFH_OUT_FIFO_ERROR)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_OUT_FIFO_ERROR);
- }
- if(regval&F_INT_L2_PFH_IN_FIFO_ERROR)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_IN_FIFO_ERROR);
- }
- if(regval&F_INT_L2_MISS_OUT_FIFO_ERROR)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_OUT_FIFO_ERROR);
- }
- if(regval&F_INT_L2_MISS_IN_FIFO_ERR)
- {
- MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_IN_FIFO_ERR);
- }
-
- }
-
-
- {
- unsigned int IntrSrc = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_FAULT_ST);
- int m4u_slave_id;
- unsigned int regval;
- int layer, write, m4u_port;
- unsigned int fault_mva, fault_pa;
-
- M4UMSG("m4u main interrupt happened: sta=0x%x\n", IntrSrc);
-
- if(IntrSrc & (F_INT_MMU0_MAIN_MSK | F_INT_MMU0_MAU_MSK))
- {
- m4u_slave_id = 0;
- }
- else
- {
- m4u_clear_intr(m4u_index);
- return 0;
- }
-
- //read error info from registers
- fault_mva = M4U_ReadReg32(m4u_base, REG_MMU_FAULT_VA(m4u_slave_id));
- layer = !!(fault_mva & F_MMU_FAULT_VA_LAYER_BIT);
- write = !!(fault_mva & F_MMU_FAULT_VA_WRITE_BIT);
- fault_mva &= F_MMU_FAULT_VA_MSK;
- fault_pa = M4U_ReadReg32(m4u_base, REG_MMU_INVLD_PA(m4u_slave_id));
- regval = M4U_ReadReg32(m4u_base, REG_MMU_INT_ID(m4u_slave_id));
- m4u_port = m4u_get_port_by_tf_id(m4u_index, regval);
-
- //dump something quickly
- m4u_dump_rs_info(m4u_index, m4u_slave_id);
- m4u_dump_invalid_main_tlb(m4u_index, m4u_slave_id);
- m4u_dump_main_tlb(m4u_index, 0);
- m4u_dump_pfh_tlb(m4u_index);
-
- if(IntrSrc & F_INT_TRANSLATION_FAULT(m4u_slave_id))
- {
- int bypass_DISP_TF = 0;
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_TRANSLATION_FAULT(m4u_slave_id));
- M4UMSG("fault: port=%s, mva=0x%x, pa=0x%x, layer=%d, wr=%d, 0x%x\n",
- m4u_get_port_name(m4u_port), fault_mva, fault_pa, layer, write, regval);
-
- if(M4U_PORT_DISP_OVL0 == m4u_port
+ unsigned long m4u_base;
+ unsigned int m4u_index;
+
+ if (irq == gM4uDev->irq_num[0]) {
+ m4u_base = gM4UBaseAddr[0];
+ m4u_index = 0;
+ } else {
+ M4UMSG("MTK_M4U_isr(), Invalid irq number %d\n", irq);
+ return -1;
+ }
+
+ {
+ /* L2 interrupt */
+ unsigned int regval = M4U_ReadReg32(m4u_base, REG_MMU_L2_FAULT_ST);
+
+ M4UMSG("m4u L2 interrupt sta=0x%x\n", regval);
+
+ if (regval&F_INT_L2_MULTI_HIT_FAULT)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MULTI_HIT_FAULT);
+
+ if (regval&F_INT_L2_TABLE_WALK_FAULT) {
+ unsigned int fault_va, layer;
+
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_TABLE_WALK_FAULT);
+ fault_va = M4U_ReadReg32(m4u_base, REG_MMU_TBWALK_FAULT_VA);
+ layer = fault_va&1;
+ fault_va &= (~1);
+ m4u_aee_print("L2 table walk fault: mva=0x%x, layer=%d\n", fault_va, layer);
+ }
+
+ if (regval&F_INT_L2_PFH_DMA_FIFO_OVERFLOW)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_DMA_FIFO_OVERFLOW);
+
+ if (regval&F_INT_L2_MISS_DMA_FIFO_OVERFLOW)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_DMA_FIFO_OVERFLOW);
+
+ if (regval&F_INT_L2_INVALD_DONE)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_INVALD_DONE);
+
+ if (regval&F_INT_L2_PFH_OUT_FIFO_ERROR)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_OUT_FIFO_ERROR);
+
+ if (regval&F_INT_L2_PFH_IN_FIFO_ERROR)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_PFH_IN_FIFO_ERROR);
+
+ if (regval&F_INT_L2_MISS_OUT_FIFO_ERROR)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_OUT_FIFO_ERROR);
+
+ if (regval&F_INT_L2_MISS_IN_FIFO_ERR)
+ MMU_INT_REPORT(m4u_index, 0, F_INT_L2_MISS_IN_FIFO_ERR);
+ }
+
+ {
+ unsigned int IntrSrc = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_FAULT_ST);
+ int m4u_slave_id;
+ unsigned int regval;
+ int layer, write, m4u_port;
+ unsigned int fault_mva, fault_pa;
+
+ M4UMSG("m4u main interrupt happened: sta=0x%x\n", IntrSrc);
+
+ if (IntrSrc & (F_INT_MMU0_MAIN_MSK | F_INT_MMU0_MAU_MSK))
+ m4u_slave_id = 0;
+ else {
+ m4u_clear_intr(m4u_index);
+ return 0;
+ }
+
+ /* read error info from registers */
+ fault_mva = M4U_ReadReg32(m4u_base, REG_MMU_FAULT_VA(m4u_slave_id));
+ layer = !!(fault_mva & F_MMU_FAULT_VA_LAYER_BIT);
+ write = !!(fault_mva & F_MMU_FAULT_VA_WRITE_BIT);
+ fault_mva &= F_MMU_FAULT_VA_MSK;
+ fault_pa = M4U_ReadReg32(m4u_base, REG_MMU_INVLD_PA(m4u_slave_id));
+ regval = M4U_ReadReg32(m4u_base, REG_MMU_INT_ID(m4u_slave_id));
+ m4u_port = m4u_get_port_by_tf_id(m4u_index, regval);
+
+ /* dump something quickly */
+ /* m4u_dump_rs_info(m4u_index, m4u_slave_id); */
+ m4u_dump_invalid_main_tlb(m4u_index, m4u_slave_id);
+ /* m4u_dump_main_tlb(m4u_index, 0); */
+ /* m4u_dump_pfh_tlb(m4u_index); */
+
+ if (IntrSrc & F_INT_TRANSLATION_FAULT(m4u_slave_id)) {
+ int bypass_DISP_TF = 0;
+
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_TRANSLATION_FAULT(m4u_slave_id));
+ M4UMSG("fault: port=%s, mva=0x%x, pa=0x%x, layer=%d, wr=%d, 0x%x\n",
+ m4u_get_port_name(m4u_port), fault_mva, fault_pa, layer, write, regval);
+
+ if (M4U_PORT_DISP_OVL0 == m4u_port
#if defined(CONFIG_ARCH_MT6753)
- || M4U_PORT_DISP_OVL1 == m4u_port
+ || M4U_PORT_DISP_OVL1 == m4u_port || M4U_PORT_DISP_OD_W == m4u_port
#endif
- )
- {
- unsigned int valid_mva = 0;
- unsigned int valid_size = 0;
- unsigned int valid_mva_end = 0;
- m4u_query_mva_info(fault_mva-1, 0, &valid_mva, &valid_size);
- if(0 != valid_mva && 0 != valid_size)
- {
- valid_mva_end = valid_mva+valid_size;
- }
-
- if(0 != valid_mva_end && fault_mva < valid_mva_end+SZ_4K)
- {
- M4UMSG("bypass disp TF, valid mva=0x%x, size=0x%x, mva_end=0x%x\n", valid_mva, valid_size, valid_mva_end);
- bypass_DISP_TF = 1;
- }
- }
-
- if(gM4uPort[m4u_port].enable_tf == 1 && bypass_DISP_TF == 0)
- {
- m4u_dump_pte_nolock(m4u_get_domain_by_port(m4u_port), fault_mva);
-
- m4u_print_port_status(NULL, 1);
-
-
- //call user's callback to dump user registers
- if(m4u_port < M4U_PORT_UNKNOWN && gM4uPort[m4u_port].fault_fn)
- gM4uPort[m4u_port].fault_fn(m4u_port, fault_mva, gM4uPort[m4u_port].fault_data);
-
- m4u_dump_buf_info(NULL);
- m4u_aee_print("\nCRDISPATCH_KEY:M4U_%s\ntranslation fault: port=%s, mva=0x%x, pa=0x%x\n",
- m4u_get_port_name(m4u_port), m4u_get_port_name(m4u_port),
- fault_mva, fault_pa);
- }
-
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, m4u_port, fault_mva);
- }
- if(IntrSrc & F_INT_MAIN_MULTI_HIT_FAULT(m4u_slave_id))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAIN_MULTI_HIT_FAULT(m4u_slave_id));
- }
- if(IntrSrc & F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(m4u_slave_id))
- {
- if(! (IntrSrc & F_INT_TRANSLATION_FAULT(m4u_slave_id)))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(m4u_slave_id));
-
- }
- }
- if(IntrSrc & F_INT_ENTRY_REPLACEMENT_FAULT(m4u_slave_id))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_ENTRY_REPLACEMENT_FAULT(m4u_slave_id));
- }
- if(IntrSrc & F_INT_TLB_MISS_FAULT(m4u_slave_id))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_TLB_MISS_FAULT(m4u_slave_id));
- }
- if(IntrSrc & F_INT_MISS_FIFO_ERR(m4u_slave_id))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MISS_FIFO_ERR(m4u_slave_id));
- }
- if(IntrSrc & F_INT_PFH_FIFO_ERR(m4u_slave_id))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_PFH_FIFO_ERR(m4u_slave_id));
- }
-
- if(IntrSrc & F_INT_MAU(m4u_slave_id, 0))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 0));
-
- __mau_dump_status(m4u_index, m4u_slave_id, 0);
- }
- if(IntrSrc & F_INT_MAU(m4u_slave_id, 1))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 1));
- __mau_dump_status(m4u_index, m4u_slave_id, 1);
- }
- if(IntrSrc & F_INT_MAU(m4u_slave_id, 2))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 2));
- __mau_dump_status(m4u_index, m4u_slave_id, 2);
- }
- if(IntrSrc & F_INT_MAU(m4u_slave_id, 3))
- {
- MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 3));
- __mau_dump_status(m4u_index, m4u_slave_id, 3);
- }
-
- m4u_clear_intr(m4u_index);
- m4u_isr_record();
- }
-
- return IRQ_HANDLED;
-}
-
-
-m4u_domain_t * m4u_get_domain_by_port(M4U_PORT_ID port)
-{
- return &gM4uDomain;
-}
-
-m4u_domain_t * m4u_get_domain_by_id(int id)
-{
- return &gM4uDomain;
-}
-
-int m4u_get_domain_nr()
-{
- return 1;
-}
-
-
-
-int m4u_reg_init(m4u_domain_t* m4u_domain, unsigned long ProtectPA, int m4u_id)
-{
- unsigned int regval;
- int i;
+ ) {
+ unsigned int valid_mva = 0;
+ unsigned int valid_size = 0;
+ unsigned int valid_mva_end = 0;
+
+ m4u_query_mva_info(fault_mva-1, 0, &valid_mva, &valid_size);
+ if (0 != valid_mva && 0 != valid_size)
+ valid_mva_end = valid_mva+valid_size-1;
+
+ if ((0 != valid_mva_end && fault_mva < valid_mva_end+SZ_4K)
+ || m4u_pte_invalid(m4u_get_domain_by_port(m4u_port), fault_mva)) {
+ M4UMSG("bypass disp TF, valid mva=0x%x, size=0x%x, mva_end=0x%x\n",
+ valid_mva, valid_size, valid_mva_end);
+ bypass_DISP_TF = 1;
+ }
+ }
+
+ if (gM4uPort[m4u_port].enable_tf == 1 && bypass_DISP_TF == 0) {
+ m4u_dump_pte_nolock(m4u_get_domain_by_port(m4u_port), fault_mva);
+
+ /* m4u_print_port_status(NULL, 1); */
+
+ /* call user's callback to dump user registers */
+ if (m4u_port < M4U_PORT_UNKNOWN && gM4uPort[m4u_port].fault_fn)
+ gM4uPort[m4u_port].fault_fn(m4u_port, fault_mva, gM4uPort[m4u_port].fault_data);
+
+ m4u_dump_buf_info(NULL);
+ if (m4u_port < M4U_PORT_UNKNOWN && NULL == gM4uPort[m4u_port].fault_data) {
+ m4u_aee_print(
+ "\nCRDISPATCH_KEY:M4U_%s\n, translation fault: port=%s, mva=0x%x, pa=0x%x\n",
+ m4u_get_port_name(m4u_port), m4u_get_port_name(m4u_port),
+ fault_mva, fault_pa);
+ } else {
+ m4u_aee_print(
+ "\nCRDISPATCH_KEY:M4U_%s\n, translation fault: port=%s, mva=0x%x, pa=0x%x\n",
+ (char *)gM4uPort[m4u_port].fault_data,
+ m4u_get_port_name(m4u_port), fault_mva, fault_pa);
+ }
+ }
+
+ MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, m4u_port, fault_mva);
+ }
+
+ if (IntrSrc & F_INT_MAIN_MULTI_HIT_FAULT(m4u_slave_id))
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAIN_MULTI_HIT_FAULT(m4u_slave_id));
+
+ if (IntrSrc & F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(m4u_slave_id))
+ if (!(IntrSrc & F_INT_TRANSLATION_FAULT(m4u_slave_id)))
+ MMU_INT_REPORT(m4u_index, m4u_slave_id,
+ F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(m4u_slave_id));
+
+ if (IntrSrc & F_INT_ENTRY_REPLACEMENT_FAULT(m4u_slave_id))
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_ENTRY_REPLACEMENT_FAULT(m4u_slave_id));
+
+ if (IntrSrc & F_INT_TLB_MISS_FAULT(m4u_slave_id))
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_TLB_MISS_FAULT(m4u_slave_id));
+
+ if (IntrSrc & F_INT_MISS_FIFO_ERR(m4u_slave_id))
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MISS_FIFO_ERR(m4u_slave_id));
+
+ if (IntrSrc & F_INT_PFH_FIFO_ERR(m4u_slave_id))
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_PFH_FIFO_ERR(m4u_slave_id));
- M4UINFO("m4u_reg_init, ProtectPA = 0x%lx\n", ProtectPA);
-
- //m4u clock is in infra domain, we never close this clock.
- m4u_clock_on();
+ if (IntrSrc & F_INT_MAU(m4u_slave_id, 0)) {
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 0));
+ __mau_dump_status(m4u_index, m4u_slave_id, 0);
+ }
+
+ if (IntrSrc & F_INT_MAU(m4u_slave_id, 1)) {
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 1));
+ __mau_dump_status(m4u_index, m4u_slave_id, 1);
+ }
+
+ if (IntrSrc & F_INT_MAU(m4u_slave_id, 2)) {
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 2));
+ __mau_dump_status(m4u_index, m4u_slave_id, 2);
+ }
+
+ if (IntrSrc & F_INT_MAU(m4u_slave_id, 3)) {
+ MMU_INT_REPORT(m4u_index, m4u_slave_id, F_INT_MAU(m4u_slave_id, 3));
+ __mau_dump_status(m4u_index, m4u_slave_id, 3);
+ }
+
+ m4u_clear_intr(m4u_index);
+ m4u_isr_record();
+ }
+
+ return IRQ_HANDLED;
+}
+
+m4u_domain_t *m4u_get_domain_by_port(M4U_PORT_ID port)
+{
+ return &gM4uDomain;
+}
+
+m4u_domain_t *m4u_get_domain_by_id(int id)
+{
+ return &gM4uDomain;
+}
+
+int m4u_get_domain_nr(void)
+{
+ return 1;
+}
+
+int m4u_reg_init(m4u_domain_t *m4u_domain, unsigned long ProtectPA, int m4u_id)
+{
+ unsigned int regval;
+ int i;
+
+ M4UINFO("m4u_reg_init, ProtectPA = 0x%lx\n", ProtectPA);
+
+ /* m4u clock is in infra domain, we never close this clock. */
+ m4u_clock_on();
#ifdef M4U_FPGAPORTING
#if 0
- if(0 == m4u_id)
- {
- unsigned long MMconfigBaseAddr;
- struct device_node *node = NULL;
- node = of_find_compatible_node(NULL, NULL,"mediatek,MMSYS_CONFIG");
- MMconfigBaseAddr = (unsigned long)of_iomap(node, 0);
- M4UINFO("MMconfigBaseAddr: 0x%lx\n", MMconfigBaseAddr);
- M4U_WriteReg32(MMconfigBaseAddr, 0x108, 0xffffffff);
- }
+ if (0 == m4u_id) {
+ unsigned long MMconfigBaseAddr;
+ struct device_node *node = NULL;
+
+ node = of_find_compatible_node(NULL, NULL, "mediatek,mmsys_config");
+ MMconfigBaseAddr = (unsigned long)of_iomap(node, 0);
+ M4UINFO("MMconfigBaseAddr: 0x%lx\n", MMconfigBaseAddr);
+ M4U_WriteReg32(MMconfigBaseAddr, 0x108, 0xffffffff);
+ }
#endif
#endif
-//=============================================
-// SMI registers
-//=============================================
+/* ============================================= */
+/* SMI registers */
+/* ============================================= */
/*bus selection:
- control which m4u_slave each larb routes to.
- this register is in smi_common domain
- Threre is only one AXI channel in K2, so don't need to set
+ control which m4u_slave each larb routes to.
+ this register is in smi_common domain
+ Threre is only one AXI channel in K2, so don't need to set
*/
-//=========================================
-//larb init
-//=========================================
- if(0 == m4u_id)
- {
- struct device_node *node = NULL;
- for(i=0; i<SMI_LARB_NR; i++)
- {
- node = of_find_compatible_node(NULL, NULL,gM4U_SMILARB[i]);
- if(NULL == node)
- M4UINFO("init larb %d error\n", i);
- gLarbBaseAddr[i] = (unsigned long)of_iomap(node, 0);
- //set mm engine domain
- larb_clock_on(i);
- M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_0, DOMAIN_VALUE);
- M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_1, DOMAIN_VALUE);
- M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_2, DOMAIN_VALUE);
- M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_3, DOMAIN_VALUE);
- larb_clock_off(i);
- M4UINFO("init larb %d, 0x%lx\n", i, gLarbBaseAddr[i]);
- }
- }
-
-//=========================================
-//perisys init
-//=========================================
- if(1 == m4u_id)
- {
- struct device_node *node = NULL;
-
- node = of_find_compatible_node(NULL, NULL,"mediatek,PERICFG");
- gPericfgBaseAddr = (unsigned long)of_iomap(node, 0);
-
- M4UINFO("gPericfgBaseAddr: 0x%lx\n", gPericfgBaseAddr);
- }
-
-//=============================================
-// m4u registers
-//=============================================
- M4UINFO("m4u hw init id = %d, base address: 0x%lx, pgd_pa: 0x%x\n", m4u_id, gM4UBaseAddr[m4u_id], (unsigned int)m4u_domain->pgd_pa);
-
- {
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMUg_PT_BASE, (unsigned int)m4u_domain->pgd_pa);
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMUg_PT_BASE_SEC, (unsigned int)m4u_domain->pgd_pa);
-
- regval = M4U_ReadReg32(gM4UBaseAddr[m4u_id], REG_MMU_CTRL_REG);
-
- if(0 == m4u_id)
- {// mm_iommu
- regval = regval|F_MMU_CTRL_PFH_DIS(0) \
- |F_MMU_CTRL_MONITOR_EN(0) \
- |F_MMU_CTRL_MONITOR_CLR(0) \
- |F_MMU_CTRL_TF_PROTECT_SEL(2) \
- |F_MMU_CTRL_INT_HANG_EN(0);
- }
-
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_CTRL_REG, regval);
-
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_MMU_COHERENCE_EN, 1);
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_MMU_TABLE_WALK_DIS, 0);
-
- //enable all interrupts
- m4u_enable_intr(m4u_id);
-
- //set translation fault proctection buffer address
- if(!gM4U_4G_DRAM_Mode)
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_IVRP_PADDR, (unsigned int)F_MMU_IVRP_PA_SET(ProtectPA));
- else
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_IVRP_PADDR, (unsigned int)F_MMU_IVRP_4G_DRAM_PA_SET(ProtectPA));
-
- //enable DCM
- M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_DCM_DIS, 0);
-
- m4u_invalid_tlb_all(m4u_id);
-
- }
-
- //special settings for mmu0 (multimedia iommu)
- if(0 == m4u_id)
- {
- unsigned long m4u_base=gM4UBaseAddr[0];
- //2 disable in-order-write
- M4U_WriteReg32(m4u_base, REG_MMU_IN_ORDER_WR_EN, 0);
-
-
- //3 non-standard AXI mode
- M4U_WriteReg32(m4u_base, REG_MMU_STANDARD_AXI_MODE, 0);
- //4 write command throttling mode
- m4uHw_set_field_by_mask(m4u_base, REG_MMU_WR_LEN, F_BIT_SET(5), 0);
- }
-
- return 0;
+/* ========================================= */
+/* larb init */
+/* ========================================= */
+ if (0 == m4u_id) {
+ struct device_node *node = NULL;
+
+ for (i = 0; i < SMI_LARB_NR; i++) {
+ node = of_find_compatible_node(NULL, NULL, gM4U_SMILARB[i]);
+ if (NULL == node)
+ M4UINFO("init larb %d error\n", i);
+ else {
+ gLarbBaseAddr[i] = (unsigned long)of_iomap(node, 0);
+ /* set mm engine domain */
+ larb_clock_on(i);
+ M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_0, DOMAIN_VALUE);
+ M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_1, DOMAIN_VALUE);
+ M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_2, DOMAIN_VALUE);
+ M4U_WriteReg32(gLarbBaseAddr[i], SMI_LARB_DOMN_3, DOMAIN_VALUE);
+ larb_clock_off(i);
+ M4UINFO("init larb %d, 0x%lx\n", i, gLarbBaseAddr[i]);
+ }
+ }
+ }
+
+/* ========================================= */
+/* perisys init */
+/* ========================================= */
+ if (1 == m4u_id) {
+ struct device_node *node = NULL;
+
+ node = of_find_compatible_node(NULL, NULL, "mediatek,PERICFG");
+ gPericfgBaseAddr = (unsigned long)of_iomap(node, 0);
+
+ M4UINFO("gPericfgBaseAddr: 0x%lx\n", gPericfgBaseAddr);
+ }
+
+/* ============================================= */
+/* m4u registers */
+/* ============================================= */
+ M4UINFO("m4u hw init id = %d, base address: 0x%lx, pgd_pa: 0x%x\n",
+ m4u_id, gM4UBaseAddr[m4u_id], (unsigned int)m4u_domain->pgd_pa);
+
+ {
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMUg_PT_BASE, (unsigned int)m4u_domain->pgd_pa);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMUg_PT_BASE_SEC, (unsigned int)m4u_domain->pgd_pa);
+
+ regval = M4U_ReadReg32(gM4UBaseAddr[m4u_id], REG_MMU_CTRL_REG);
+
+ if (0 == m4u_id) { /* mm_iommu */
+ regval = regval|F_MMU_CTRL_PFH_DIS(0)
+ |F_MMU_CTRL_MONITOR_EN(0)
+ |F_MMU_CTRL_MONITOR_CLR(0)
+ |F_MMU_CTRL_TF_PROTECT_SEL(2)
+ |F_MMU_CTRL_INT_HANG_EN(0);
+ }
+
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_CTRL_REG, regval);
+
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_MMU_COHERENCE_EN, 1);
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_MMU_TABLE_WALK_DIS, 0);
+
+ /* enable all interrupts */
+ m4u_enable_intr(m4u_id);
+
+ /* set translation fault proctection buffer address */
+ if (!gM4U_4G_DRAM_Mode)
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_IVRP_PADDR,
+ (unsigned int)F_MMU_IVRP_PA_SET(ProtectPA));
+ else
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_IVRP_PADDR,
+ (unsigned int)F_MMU_IVRP_4G_DRAM_PA_SET(ProtectPA));
+
+ /* enable DCM */
+ M4U_WriteReg32(gM4UBaseAddr[m4u_id], REG_MMU_DCM_DIS, 0);
+
+ m4u_invalid_tlb_all(m4u_id);
+ }
+
+ /* special settings for mmu0 (multimedia iommu) */
+ if (0 == m4u_id) {
+ unsigned long m4u_base = gM4UBaseAddr[0];
+ /* 2 disable in-order-write */
+ M4U_WriteReg32(m4u_base, REG_MMU_IN_ORDER_WR_EN, 0);
+
+ /* 3 non-standard AXI mode */
+ M4U_WriteReg32(m4u_base, REG_MMU_STANDARD_AXI_MODE, 0);
+ /* 4 write command throttling mode */
+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_WR_LEN, F_BIT_SET(5), 0);
+ }
+
+ return 0;
}
-int m4u_domain_init(struct m4u_device *m4u_dev, void* priv_reserve)
+int m4u_domain_init(struct m4u_device *m4u_dev, void *priv_reserve)
{
- M4UINFO("m4u_domain_init\n");
+ M4UINFO("m4u_domain_init\n");
- memset(&gM4uDomain, 0, sizeof(gM4uDomain));
- gM4uDomain.pgsize_bitmap = M4U_PGSIZES;
- mutex_init(&gM4uDomain.pgtable_mutex);
+ memset(&gM4uDomain, 0, sizeof(gM4uDomain));
+ gM4uDomain.pgsize_bitmap = M4U_PGSIZES;
+ mutex_init(&gM4uDomain.pgtable_mutex);
- m4u_pgtable_init(m4u_dev, &gM4uDomain);
-
- m4u_mvaGraph_init(priv_reserve);
+ m4u_pgtable_init(m4u_dev, &gM4uDomain);
- return 0;
+ m4u_mvaGraph_init(priv_reserve);
+
+ return 0;
}
int m4u_reset(int m4u_id)
{
- m4u_invalid_tlb_all(m4u_id);
- m4u_clear_intr(m4u_id);
+ m4u_invalid_tlb_all(m4u_id);
+ m4u_clear_intr(m4u_id);
- return 0;
+ return 0;
}
int m4u_hw_init(struct m4u_device *m4u_dev, int m4u_id)
{
- unsigned long pProtectVA;
- phys_addr_t ProtectPA;
-
- gM4UBaseAddr[m4u_id] = m4u_dev->m4u_base[m4u_id];
-
- pProtectVA = (unsigned long) kmalloc(TF_PROTECT_BUFFER_SIZE*2, GFP_KERNEL|__GFP_ZERO);
- if(NULL==(void*)pProtectVA)
- {
- M4UMSG("Physical memory not available.\n");
- return -1;
- }
- pProtectVA = (pProtectVA+(TF_PROTECT_BUFFER_SIZE-1))&(~(TF_PROTECT_BUFFER_SIZE-1));
- ProtectPA = virt_to_phys((void *)pProtectVA);
- if(ProtectPA & (TF_PROTECT_BUFFER_SIZE-1))
- {
- M4UMSG("protect buffer (0x%pa) not align.\n",&ProtectPA);
- return -1;
- }
-
- M4UINFO("protect memory va=0x%pa, pa=0x%pa.\n", &pProtectVA, &ProtectPA);
-
- pM4URegBackUp = (unsigned int*)kmalloc(M4U_REG_BACKUP_SIZE, GFP_KERNEL|__GFP_ZERO);
- if(pM4URegBackUp==NULL)
- {
- M4UMSG("Physical memory not available size=%d.\n", (int)M4U_REG_BACKUP_SIZE);
- return -1;
- }
-
- spin_lock_init(&gM4u_reg_lock);
-
- m4u_reg_init(&gM4uDomain,ProtectPA, m4u_id);
-
- if(0 == m4u_id)
- m4u_register_larb_monitor();
-
- if (request_irq(m4u_dev->irq_num[m4u_id], MTK_M4U_isr, IRQF_TRIGGER_LOW, "m4u", NULL))
- {
- M4UMSG("request M4U%d IRQ line failed\n",m4u_id);
- return -ENODEV;
- }
- else
- {
- M4UMSG("request_irq, irq_num=%d\n", m4u_dev->irq_num[m4u_id]);
- }
-
- m4u_isr_pause_timer_init();
-
- m4u_monitor_start(m4u_id);
-
- //mau_start_monitor(0, 0, 0, 0, 1, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff);
- //mau_start_monitor(0, 0, 1, 1, 1, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff);
- //mau_start_monitor(0, 0, 2, 0, 0, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff);
-
- // config MDP related port default use M4U
-
- if(0 == m4u_id)
- {
- M4U_PORT_STRUCT port;
- port.Direction = 0;
- port.Distance = 1;
- port.domain = 0;
- port.Security = 0;
- port.Virtuality = 1;
-
- port.ePortID = M4U_PORT_MDP_RDMA;
- m4u_config_port(&port);
-
- port.ePortID = M4U_PORT_MDP_WDMA;
- m4u_config_port(&port);
-
- port.ePortID = M4U_PORT_MDP_WROT;
- m4u_config_port(&port);
-
- }
-
-
- return 0;
+ unsigned long pProtectVA;
+ phys_addr_t ProtectPA;
+
+#if !defined(CONFIG_MTK_LEGACY)
+ int i;
+
+ gM4uDev->infra_m4u = devm_clk_get(gM4uDev->pDev[m4u_id], "infra_m4u");
+ if (IS_ERR(gM4uDev->infra_m4u)) {
+ M4UMSG("cannot get infra m4u clock\n");
+ return PTR_ERR(gM4uDev->infra_m4u);
+ }
+
+ for (i = SMI_COMMON_CLK; i < SMI_CLK_NUM; i++) {
+ gM4uDev->smi_clk[i] = devm_clk_get(gM4uDev->pDev[m4u_id], smi_clk_name[i]);
+ if (IS_ERR(gM4uDev->smi_clk[i])) {
+ M4UMSG("cannot get %s clock\n", smi_clk_name[i]);
+ return PTR_ERR(gM4uDev->smi_clk[i]);
+ }
+ }
+ smi_common_clock_on();
+ smi_larb0_clock_on();
+#endif
+
+#ifdef M4U_4GBDRAM
+ gM4U_4G_DRAM_Mode = enable_4G();
+#endif
+ M4UMSG("4G DRAM Mode is: %d\n", gM4U_4G_DRAM_Mode);
+
+ gM4UBaseAddr[m4u_id] = m4u_dev->m4u_base[m4u_id];
+
+ pProtectVA = (unsigned long) kmalloc(TF_PROTECT_BUFFER_SIZE*2, GFP_KERNEL|__GFP_ZERO);
+ if (NULL == (void *)pProtectVA) {
+ M4UMSG("Physical memory not available.\n");
+ return -1;
+ }
+ pProtectVA = (pProtectVA+(TF_PROTECT_BUFFER_SIZE-1))&(~(TF_PROTECT_BUFFER_SIZE-1));
+ ProtectPA = virt_to_phys((void *)pProtectVA);
+ if (ProtectPA & (TF_PROTECT_BUFFER_SIZE-1)) {
+ M4UMSG("protect buffer (0x%pa) not align.\n", &ProtectPA);
+ return -1;
+ }
+
+ M4UINFO("protect memory va=0x%pa, pa=0x%pa.\n", &pProtectVA, &ProtectPA);
+
+ pM4URegBackUp = kmalloc(M4U_REG_BACKUP_SIZE, GFP_KERNEL|__GFP_ZERO);
+ if (pM4URegBackUp == NULL) {
+ M4UMSG("Physical memory not available size=%d.\n", (int)M4U_REG_BACKUP_SIZE);
+ return -1;
+ }
+
+ spin_lock_init(&gM4u_reg_lock);
+
+ m4u_reg_init(&gM4uDomain, ProtectPA, m4u_id);
+
+ if (request_irq(m4u_dev->irq_num[m4u_id], MTK_M4U_isr, IRQF_TRIGGER_LOW, "m4u", NULL)) {
+ M4UMSG("request M4U%d IRQ line failed\n", m4u_id);
+ return -ENODEV;
+ }
+
+ M4UMSG("request_irq, irq_num=%d\n", m4u_dev->irq_num[m4u_id]);
+
+ m4u_isr_pause_timer_init();
+
+ m4u_monitor_start(m4u_id);
+
+ /* mau_start_monitor(0, 0, 0, 0, 1, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff); */
+ /* mau_start_monitor(0, 0, 1, 1, 1, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff); */
+ /* mau_start_monitor(0, 0, 2, 0, 0, 0, 0, 0x0, 0x1000, 0xffffffff, 0xffffffff); */
+
+ /* config MDP related port default use M4U */
+ if (0 == m4u_id) {
+ M4U_PORT_STRUCT port;
+
+ port.Direction = 0;
+ port.Distance = 1;
+ port.domain = 0;
+ port.Security = 0;
+ port.Virtuality = 1;
+
+ port.ePortID = M4U_PORT_MDP_RDMA;
+ m4u_config_port(&port);
+
+ port.ePortID = M4U_PORT_MDP_WDMA;
+ m4u_config_port(&port);
+
+ port.ePortID = M4U_PORT_MDP_WROT;
+ m4u_config_port(&port);
+ }
+ return 0;
}
int m4u_hw_deinit(struct m4u_device *m4u_dev, int m4u_id)
{
#if 1
- free_irq(m4u_dev->irq_num[m4u_id], NULL);
+ free_irq(m4u_dev->irq_num[m4u_id], NULL);
#else
- free_irq(MM_IOMMU_IRQ_B_ID, NULL);
- free_irq(PERISYS_IOMMU_IRQ_B_ID, NULL);
-#endif
- return 0;
+ free_irq(MM_IOMMU_IRQ_B_ID, NULL);
+ free_irq(PERISYS_IOMMU_IRQ_B_ID, NULL);
+#endif
+ return 0;
}
int m4u_dump_reg_for_smi_hang_issue(void)
{
- /*NOTES: m4u_monitor_start() must be called before using m4u */
- /*please check m4u_hw_init() to ensure that */
+ /*NOTES: m4u_monitor_start() must be called before using m4u */
+ /*please check m4u_hw_init() to ensure that */
- M4UMSG("====== dump m4u reg start =======>\n");
+ M4UMSG("====== dump m4u reg start =======>\n");
- if(0 == gM4UBaseAddr[0])
- {
- M4UMSG("gM4UBaseAddr[0] is NULL\n");
- return 0;
- }
- M4UMSG("0x44 = 0x%x\n", M4U_ReadReg32(gM4UBaseAddr[0], 0x44));
-
- m4u_print_perf_counter(0, 0, "m4u");
- m4u_dump_rs_info(0, 0);
+ if (0 == gM4UBaseAddr[0]) {
+ M4UMSG("gM4UBaseAddr[0] is NULL\n");
+ return 0;
+ }
+ M4UMSG("0x44 = 0x%x\n", M4U_ReadReg32(gM4UBaseAddr[0], 0x44));
- return 0;
-}
+ m4u_print_perf_counter(0, 0, "m4u");
+ m4u_dump_rs_info(0, 0);
+ return 0;
+} \ No newline at end of file
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_hw.h b/drivers/misc/mediatek/m4u/mt6735/m4u_hw.h
index aeebafde1..2553d2223 100644
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_hw.h
+++ b/drivers/misc/mediatek/m4u/mt6735/m4u_hw.h
@@ -3,160 +3,177 @@
#define M4U_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-#define M4U_SLAVE_NUM(m4u_id) ((m4u_id) ? 1 : 1) //m4u0 has 1 slaves, iommu(m4u1) has 1 slave
+#define M4U_SLAVE_NUM(m4u_id) ((m4u_id) ? 1 : 1) /* m4u0 has 1 slaves, iommu(m4u1) has 1 slave */
#define M4U0_SEQ_NR (SEQ_NR_PER_MM_SLAVE*M4U_SLAVE_NUM(0))
#define M4U1_SEQ_NR (SEQ_NR_PER_PERI_SLAVE*M4U_SLAVE_NUM(1))
-#define M4U_SEQ_NUM(m4u_id) ((m4u_id) ? M4U1_SEQ_NR : M4U0_SEQ_NR)
+#define M4U_SEQ_NUM(m4u_id) ((m4u_id) ? M4U1_SEQ_NR : M4U0_SEQ_NR)
#define M4U0_MAU_NR 4
#define M4U_SEQ_ALIGN_MSK (0x100000-1)
#define M4U_SEQ_ALIGN_SIZE 0x100000
-typedef struct _M4U_PERF_COUNT
+typedef struct _M4U_PERF_COUNT {
+ unsigned int transaction_cnt;
+ unsigned int main_tlb_miss_cnt;
+ unsigned int pfh_tlb_miss_cnt;
+ unsigned int pfh_cnt;
+ unsigned int rs_perf_cnt;
+} M4U_PERF_COUNT;
+
+typedef struct __mmu_tlb {
+ unsigned int tag;
+ unsigned int desc;
+} mmu_tlb_t;
+
+typedef struct _pfh_tlb {
+ unsigned int va;
+ unsigned int va_msk;
+ char layer;
+ char x16;
+ char sec;
+ char pfh;
+ char valid;
+ unsigned int desc[MMU_PAGE_PER_LINE];
+ int set;
+ int way;
+ unsigned int page_size;
+ unsigned int tag;
+} mmu_pfh_tlb_t;
+
+typedef struct {
+ char *name;
+ unsigned m4u_id:2;
+ unsigned m4u_slave:2;
+ unsigned larb_id:4;
+ unsigned larb_port:8;
+ unsigned tf_id:12; /* 12 bits */
+ bool enable_tf;
+ m4u_reclaim_mva_callback_t *reclaim_fn;
+ void *reclaim_data;
+ m4u_fault_callback_t *fault_fn;
+ void *fault_data;
+} m4u_port_t;
+
+typedef struct _M4U_RANGE_DES /* sequential entry range */
{
- unsigned int transaction_cnt;
- unsigned int main_tlb_miss_cnt;
- unsigned int pfh_tlb_miss_cnt;
- unsigned int pfh_cnt;
- unsigned int rs_perf_cnt;
-}M4U_PERF_COUNT;
-
-typedef struct __mmu_tlb
-{
- unsigned int tag;
- unsigned int desc;
-}mmu_tlb_t;
-
-
-typedef struct _pfh_tlb
-{
- unsigned int va;
- unsigned int va_msk;
- char layer;
- char x16;
- char sec;
- char pfh;
- char valid;
- unsigned int desc[MMU_PAGE_PER_LINE];
- int set;
- int way;
- unsigned int page_size;
- unsigned int tag;
-}mmu_pfh_tlb_t;
-
-typedef struct
-{
- char *name;
- unsigned m4u_id:2;
- unsigned m4u_slave:2;
- unsigned larb_id:4;
- unsigned larb_port:8;
- unsigned tf_id:12; //12 bits
- bool enable_tf;
- m4u_reclaim_mva_callback_t *reclaim_fn;
- void* reclaim_data;
- m4u_fault_callback_t *fault_fn;
- void* fault_data;
-}m4u_port_t;
-
-
-typedef struct _M4U_RANGE_DES //sequential entry range
-{
- unsigned int Enabled;
- M4U_PORT_ID port;
- unsigned int MVAStart;
- unsigned int MVAEnd;
- //unsigned int entryCount;
+ unsigned int Enabled;
+ M4U_PORT_ID port;
+ unsigned int MVAStart;
+ unsigned int MVAEnd;
+ /* unsigned int entryCount; */
} M4U_RANGE_DES_T;
-typedef struct _M4U_MAU_STATUS //mau entry
+typedef struct _M4U_MAU_STATUS /* mau entry */
{
- bool Enabled;
- M4U_PORT_ID port;
- unsigned int MVAStart;
- unsigned int MVAEnd;
+ bool Enabled;
+ M4U_PORT_ID port;
+ unsigned int MVAStart;
+ unsigned int MVAEnd;
} M4U_MAU_STATUS_T;
-
extern m4u_port_t gM4uPort[];
extern int gM4u_port_num;
static inline char *m4u_get_port_name(M4U_PORT_ID portID)
{
- if (portID < gM4u_port_num)
- return gM4uPort[portID].name;
- else
- return "m4u_port_unknown";
+ if (portID < gM4u_port_num)
+ return gM4uPort[portID].name;
+ else
+ return "m4u_port_unknown";
}
static inline int m4u_get_port_by_tf_id(int m4u_id, int tf_id)
{
- int i, tf_id_old;
- tf_id_old = tf_id;
-
- if(m4u_id==0)
- {
- tf_id &= F_MMU0_INT_ID_TF_MSK;
- }
-
- for(i=0; i<gM4u_port_num; i++)
- {
- if((gM4uPort[i].tf_id == tf_id) && (gM4uPort[i].m4u_id == m4u_id))
- return i;
- }
- M4UMSG("error: m4u_id=%d, tf_id=0x%x\n", m4u_id, tf_id_old);
- return gM4u_port_num;
+ int i, tf_id_old;
+
+ tf_id_old = tf_id;
+
+ if (m4u_id == 0)
+ tf_id &= F_MMU0_INT_ID_TF_MSK;
+
+ for (i = 0; i < gM4u_port_num; i++)
+ if ((gM4uPort[i].tf_id == tf_id) && (gM4uPort[i].m4u_id == m4u_id))
+ return i;
+ M4UMSG("error: m4u_id=%d, tf_id=0x%x\n", m4u_id, tf_id_old);
+ return gM4u_port_num;
}
static inline int m4u_port_2_larb_port(M4U_PORT_ID port)
{
- return gM4uPort[port].larb_port;
-}
+ if (port < 0 || port > M4U_PORT_UNKNOWN)
+ return 0;
+ return gM4uPort[port].larb_port;
+}
static inline int m4u_port_2_larb_id(M4U_PORT_ID port)
{
- return gM4uPort[port].larb_id;
+ if (port < 0 || port > M4U_PORT_UNKNOWN)
+ return 0;
+
+ return gM4uPort[port].larb_id;
}
static inline int larb_2_m4u_slave_id(int larb)
{
- int i;
- for(i=0; i<gM4u_port_num; i++)
- {
- if(gM4uPort[i].larb_id == larb)
- return gM4uPort[i].m4u_slave;
- }
- return 0;
-}
+ int i;
+ for (i = 0; i < gM4u_port_num; i++)
+ if (gM4uPort[i].larb_id == larb)
+ return gM4uPort[i].m4u_slave;
+ return 0;
+}
static inline int m4u_port_2_m4u_id(M4U_PORT_ID port)
{
- return gM4uPort[port].m4u_id;
+ if (port < 0 || port > M4U_PORT_UNKNOWN)
+ return 0;
+
+ return gM4uPort[port].m4u_id;
}
static inline int m4u_port_2_m4u_slave_id(M4U_PORT_ID port)
{
- return gM4uPort[port].m4u_slave;
+ if (port < 0 || port > M4U_PORT_UNKNOWN)
+ return 0;
+
+ return gM4uPort[port].m4u_slave;
}
static inline int larb_port_2_m4u_port(int larb, int larb_port)
{
- int i;
- for(i=0; i<gM4u_port_num; i++)
- {
- if(gM4uPort[i].larb_id==larb && gM4uPort[i].larb_port==larb_port)
- return i;
- }
- //M4UMSG("unkown larb port: larb=%d, larb_port=%d\n", larb, larb_port);
- return M4U_PORT_UNKNOWN;
+ int i;
+
+ for (i = 0; i < gM4u_port_num; i++)
+ if (gM4uPort[i].larb_id == larb && gM4uPort[i].larb_port == larb_port)
+ return i;
+ /* M4UMSG("unknown larb port: larb=%d, larb_port=%d\n", larb, larb_port); */
+ return M4U_PORT_UNKNOWN;
}
void m4u_print_perf_counter(int m4u_index, int m4u_slave_id, const char *msg);
-int m4u_dump_reg(int m4u_index);
+int m4u_dump_reg(int m4u_index, unsigned int start);
+void smi_common_clock_on(void);
+void smi_common_clock_off(void);
+void smi_larb0_clock_on(void);
+void smi_larb0_clock_off(void);
+
+extern unsigned int gM4UTagCount[];
+extern const char *gM4U_SMILARB[];
+extern M4U_RANGE_DES_T gM4u0_seq[];
+extern M4U_RANGE_DES_T *gM4USeq[];
+extern m4u_port_t gM4uPort[];
+extern struct m4u_device *gM4uDev;
+#if !defined(CONFIG_MTK_LEGACY)
+extern const char *smi_clk_name[];
#endif
+
+#ifdef M4U_TEE_SERVICE_ENABLE
+extern int m4u_tee_en;
+#endif
+
+#endif \ No newline at end of file
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_mva.c b/drivers/misc/mediatek/m4u/mt6735/m4u_mva.c
deleted file mode 100644
index e4695e835..000000000
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_mva.c
+++ /dev/null
@@ -1,417 +0,0 @@
-
-#include <linux/spinlock.h>
-#include <linux/printk.h>
-#include "m4u_priv.h"
-
-// ((va&0xfff)+size+0xfff)>>12
-#define mva_pageOffset(mva) ((mva)&0xfff)
-
-#define MVA_BLOCK_SIZE_ORDER 20 //1M
-#define MVA_MAX_BLOCK_NR 4095 //4GB
-
-#define MVA_BLOCK_SIZE (1<<MVA_BLOCK_SIZE_ORDER) //0x40000
-#define MVA_BLOCK_ALIGN_MASK (MVA_BLOCK_SIZE-1) //0x3ffff
-#define MVA_BLOCK_NR_MASK (MVA_MAX_BLOCK_NR) //0xfff
-#define MVA_BUSY_MASK (1<<15) //0x8000
-
-#define MVA_IS_BUSY(index) ((mvaGraph[index]&MVA_BUSY_MASK)!=0)
-#define MVA_SET_BUSY(index) (mvaGraph[index] |= MVA_BUSY_MASK)
-#define MVA_SET_FREE(index) (mvaGraph[index] & (~MVA_BUSY_MASK))
-#define MVA_GET_NR(index) (mvaGraph[index] & MVA_BLOCK_NR_MASK)
-
-#define MVAGRAPH_INDEX(mva) (mva>>MVA_BLOCK_SIZE_ORDER)
-
-
-static short mvaGraph[MVA_MAX_BLOCK_NR+1];
-static void* mvaInfoGraph[MVA_MAX_BLOCK_NR+1];
-static DEFINE_SPINLOCK(gMvaGraph_lock);
-
-
-
-void m4u_mvaGraph_init(void* priv_reserve)
-{
- unsigned long irq_flags;
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
- memset(mvaGraph, 0, sizeof(short)*(MVA_MAX_BLOCK_NR+1));
- memset(mvaInfoGraph, 0, sizeof(void*)*(MVA_MAX_BLOCK_NR+1));
- mvaGraph[0] = 1|MVA_BUSY_MASK;
- mvaInfoGraph[0] = priv_reserve;
- mvaGraph[1] = MVA_MAX_BLOCK_NR;
- mvaInfoGraph[1] = priv_reserve;
- mvaGraph[MVA_MAX_BLOCK_NR] = MVA_MAX_BLOCK_NR;
- mvaInfoGraph[MVA_MAX_BLOCK_NR] = priv_reserve;
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
-}
-
-void m4u_mvaGraph_dump_raw(void)
-{
- int i;
- unsigned long irq_flags;
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
- printk("[M4U_K] dump raw data of mvaGraph:============>\n");
- for(i=0; i<MVA_MAX_BLOCK_NR+1; i++)
- printk("0x%4x: 0x%08x \n", i, mvaGraph[i]);
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
-}
-
-
-void m4u_mvaGraph_dump(void)
-{
- unsigned int addr=0, size=0;
- short index=1, nr=0;
- int i,max_bit, is_busy;
- short frag[12] = {0};
- short nr_free=0, nr_alloc=0;
- unsigned long irq_flags;
-
- printk("[M4U_K] mva allocation info dump:====================>\n");
- printk("start size blocknum busy \n");
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
- for(index=1; index<MVA_MAX_BLOCK_NR+1; index += nr)
- {
- addr = index << MVA_BLOCK_SIZE_ORDER;
- nr = MVA_GET_NR(index);
- size = nr << MVA_BLOCK_SIZE_ORDER;
- if(MVA_IS_BUSY(index))
- {
- is_busy = 1;
- nr_alloc += nr;
- }
- else // mva region is free
- {
- is_busy=0;
- nr_free += nr;
-
- max_bit=0;
- for(i=0; i<12; i++)
- {
- if(nr & (1<<i))
- max_bit = i;
- }
- frag[max_bit]++;
- }
-
- printk("0x%08x 0x%08x %4d %d\n", addr, size, nr, is_busy);
-
- }
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
-
- printk("\n");
- printk("[M4U_K] mva alloc summary: (unit: blocks)========================>\n");
- printk("free: %d , alloc: %d, total: %d \n", nr_free, nr_alloc, nr_free+nr_alloc);
- printk("[M4U_K] free region fragments in 2^x blocks unit:===============\n");
- printk(" 0 1 2 3 4 5 6 7 8 9 10 11 \n");
- printk("%4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d \n",
- frag[0],frag[1],frag[2],frag[3],frag[4],frag[5],frag[6],frag[7],frag[8],frag[9],frag[10],frag[11]);
- printk("[M4U_K] mva alloc dump done=========================<\n");
-
-}
-
-void* mva_get_priv_ext(unsigned int mva)
-{
- void *priv = NULL;
- int index;
- unsigned long irq_flags;
-
- index = MVAGRAPH_INDEX(mva);
- if(index==0 || index>MVA_MAX_BLOCK_NR)
- {
- M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva);
- return NULL;
- }
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
-
- //find prev head/tail of this region
- while(mvaGraph[index]==0)
- index--;
-
- if(MVA_IS_BUSY(index))
- {
- priv = mvaInfoGraph[index];
- }
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
- return priv;
-
-}
-
-
-int mva_for_each_priv(mva_buf_fn_t *fn, void* data)
-{
- short index=1, nr=0;
- unsigned int mva;
- void *priv;
- unsigned long irq_flags;
- int ret;
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
-
- for(index=1; index<MVA_MAX_BLOCK_NR+1; index += nr)
- {
- mva = index << MVA_BLOCK_SIZE_ORDER;
- nr = MVA_GET_NR(index);
- if(MVA_IS_BUSY(index))
- {
- priv = mvaInfoGraph[index];
- ret = fn(priv, mva, mva+nr*MVA_BLOCK_SIZE, data);
- if(ret)
- break;
- }
- }
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
- return 0;
-}
-
-void* mva_get_priv(unsigned int mva)
-{
- void *priv = NULL;
- int index;
- unsigned long irq_flags;
-
- index = MVAGRAPH_INDEX(mva);
- if(index==0 || index>MVA_MAX_BLOCK_NR)
- {
- M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva);
- return NULL;
- }
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
-
- if(MVA_IS_BUSY(index))
- {
- priv = mvaInfoGraph[index];
- }
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
- return priv;
-
-}
-
-
-unsigned int m4u_do_mva_alloc(unsigned long va, unsigned int size, void *priv)
-{
- short s,end;
- short new_start, new_end;
- short nr = 0;
- unsigned int mvaRegionStart;
- unsigned long startRequire, endRequire, sizeRequire;
- unsigned long irq_flags;
-
- if(size == 0) return 0;
-
- ///-----------------------------------------------------
- ///calculate mva block number
- startRequire = va & (~M4U_PAGE_MASK);
- endRequire = (va+size-1)| M4U_PAGE_MASK;
- sizeRequire = endRequire-startRequire+1;
- nr = (sizeRequire+MVA_BLOCK_ALIGN_MASK)>>MVA_BLOCK_SIZE_ORDER;//(sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0);
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
-
- ///-----------------------------------------------
- ///find first match free region
- for(s=1; (s<(MVA_MAX_BLOCK_NR+1))&&(mvaGraph[s]<nr); s+=(mvaGraph[s]&MVA_BLOCK_NR_MASK))
- ;
- if(s > MVA_MAX_BLOCK_NR)
- {
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
- M4UMSG("mva_alloc error: no available MVA region for %d blocks!\n", nr);
- MMProfileLogEx(M4U_MMP_Events[M4U_MMP_M4U_ERROR], MMProfileFlagPulse, size, s);
-
- return 0;
- }
-
- ///-----------------------------------------------
- ///alloc a mva region
- end = s + mvaGraph[s] - 1;
-
- if(unlikely(nr == mvaGraph[s]))
- {
- MVA_SET_BUSY(s);
- MVA_SET_BUSY(end);
- mvaInfoGraph[s] = priv;
- mvaInfoGraph[end] = priv;
- }
- else
- {
- new_end = s + nr - 1;
- new_start = new_end + 1;
- //note: new_start may equals to end
- mvaGraph[new_start] = (mvaGraph[s]-nr);
- mvaGraph[new_end] = nr | MVA_BUSY_MASK;
- mvaGraph[s] = mvaGraph[new_end];
- mvaGraph[end] = mvaGraph[new_start];
-
- mvaInfoGraph[s] = priv;
- mvaInfoGraph[new_end] = priv;
- }
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
-
- mvaRegionStart = (unsigned int)s;
-
- return (mvaRegionStart<<MVA_BLOCK_SIZE_ORDER) + mva_pageOffset(va);
-
-}
-
-unsigned int m4u_do_mva_alloc_fix(unsigned int mva, unsigned int size, void *priv)
-{
- short nr = 0;
- unsigned int startRequire, endRequire, sizeRequire;
- unsigned long irq_flags;
- short startIdx = mva >> MVA_BLOCK_SIZE_ORDER;
- short endIdx;
- short region_start, region_end;
-
- if(size == 0) return 0;
- if(startIdx==0 || startIdx>MVA_MAX_BLOCK_NR)
- {
- M4UMSG("mvaGraph index is 0. index=0x%x\n", startIdx);
- return 0;
- }
-
-
- ///-----------------------------------------------------
- ///calculate mva block number
- startRequire = mva & (~MVA_BLOCK_ALIGN_MASK);
- endRequire = (mva+size-1)| MVA_BLOCK_ALIGN_MASK;
- sizeRequire = endRequire-startRequire+1;
- nr = (sizeRequire+MVA_BLOCK_ALIGN_MASK)>>MVA_BLOCK_SIZE_ORDER;//(sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0);
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
-
- region_start = startIdx;
- //find prev head of this region
- while(mvaGraph[region_start]==0)
- region_start--;
-
- if(MVA_IS_BUSY(region_start) || (MVA_GET_NR(region_start) < nr+startIdx-region_start))
- {
- M4UMSG("mva is inuse index=0x%x, mvaGraph=0x%x\n", region_start, mvaGraph[region_start]);
- mva = 0;
- goto out;
- }
-
- //carveout startIdx~startIdx+nr-1 out of region_start
- endIdx = startIdx+nr-1;
- region_end = region_start + MVA_GET_NR(region_start) -1;
-
- if(startIdx==region_start && endIdx==region_end)
- {
- MVA_SET_BUSY(startIdx);
- MVA_SET_BUSY(endIdx);
- }
- else if(startIdx==region_start)
- {
- mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
- mvaGraph[endIdx] = mvaGraph[startIdx];
- mvaGraph[endIdx+1] = region_end - endIdx;
- mvaGraph[region_end] = mvaGraph[endIdx+1];
- }
- else if(endIdx == region_end)
- {
- mvaGraph[region_start] = startIdx - region_start;
- mvaGraph[startIdx-1] = mvaGraph[region_start];
- mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
- mvaGraph[endIdx] = mvaGraph[startIdx];
- }
- else
- {
- mvaGraph[region_start] = startIdx - region_start;
- mvaGraph[startIdx-1] = mvaGraph[region_start];
- mvaGraph[startIdx] = nr | MVA_BUSY_MASK;
- mvaGraph[endIdx] = mvaGraph[startIdx];
- mvaGraph[endIdx+1] = region_end - endIdx;
- mvaGraph[region_end] = mvaGraph[endIdx+1];
- }
-
- mvaInfoGraph[startIdx] = priv;
- mvaInfoGraph[endIdx] = priv;
-
-
-out:
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
-
- return mva;
-
-}
-
-
-#define RightWrong(x) ( (x) ? "correct" : "error")
-int m4u_do_mva_free(unsigned int mva, unsigned int size)
-{
- short startIdx = mva >> MVA_BLOCK_SIZE_ORDER;
- short nr = mvaGraph[startIdx] & MVA_BLOCK_NR_MASK;
- short endIdx = startIdx + nr - 1;
- unsigned int startRequire, endRequire, sizeRequire;
- short nrRequire;
- unsigned long irq_flags;
-
- spin_lock_irqsave(&gMvaGraph_lock, irq_flags);
- ///--------------------------------
- ///check the input arguments
- ///right condition: startIdx is not NULL && region is busy && right module && right size
- startRequire = mva & (unsigned int)(~M4U_PAGE_MASK);
- endRequire = (mva+size-1)| (unsigned int)M4U_PAGE_MASK;
- sizeRequire = endRequire-startRequire+1;
- nrRequire = (sizeRequire+MVA_BLOCK_ALIGN_MASK)>>MVA_BLOCK_SIZE_ORDER;//(sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0);
- if(!( startIdx != 0 //startIdx is not NULL
- && MVA_IS_BUSY(startIdx) // region is busy
- && (nr==nrRequire) //right size
- )
- )
- {
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
- M4UMSG("error to free mva========================>\n");
- M4UMSG("BufSize=%d(unit:0x%xBytes) (expect %d) [%s]\n",
- nrRequire, MVA_BLOCK_SIZE, nr, RightWrong(nrRequire==nr));
- M4UMSG("mva=0x%x, (IsBusy?)=%d (expect %d) [%s]\n",
- mva, MVA_IS_BUSY(startIdx),1, RightWrong(MVA_IS_BUSY(startIdx)));
- m4u_mvaGraph_dump();
- //m4u_mvaGraph_dump_raw();
- return -1;
- }
-
- mvaInfoGraph[startIdx] = NULL;
- mvaInfoGraph[endIdx] = NULL;
-
- ///--------------------------------
- ///merge with followed region
- if( (endIdx+1 <= MVA_MAX_BLOCK_NR)&&(!MVA_IS_BUSY(endIdx+1)))
- {
- nr += mvaGraph[endIdx+1];
- mvaGraph[endIdx] = 0;
- mvaGraph[endIdx+1] = 0;
- }
-
- ///--------------------------------
- ///merge with previous region
- if( (startIdx-1>0)&&(!MVA_IS_BUSY(startIdx-1)) )
- {
- int pre_nr = mvaGraph[startIdx-1];
- mvaGraph[startIdx] = 0;
- mvaGraph[startIdx-1] = 0;
- startIdx -= pre_nr;
- nr += pre_nr;
- }
- ///--------------------------------
- ///set region flags
- mvaGraph[startIdx] = nr;
- mvaGraph[startIdx+nr-1] = nr;
-
- spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags);
-
- return 0;
-
-}
-
-
-
-
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.c b/drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.c
deleted file mode 100644
index 2e83ac2a8..000000000
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.c
+++ /dev/null
@@ -1,1062 +0,0 @@
-#include <asm/cacheflush.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <mach/mt_irq.h>
-
-#include "m4u_priv.h"
-
-typedef struct
-{
- imu_pgd_t *pgd;
- imu_pte_t *pte;
- unsigned int mva;
- unsigned long pa;
- unsigned int size;
- int valid;
-}m4u_pte_info_t;
-
-
-static inline void m4u_set_pgd_val(imu_pgd_t*pgd, unsigned int val)
-{
- COM_WriteReg32((unsigned long)&(imu_pgd_val(*pgd)) , val);
-}
-
-static inline void read_lock_domain(m4u_domain_t *domain)
-{
- mutex_lock(&domain->pgtable_mutex);
-}
-static inline void read_unlock_domain(m4u_domain_t *domain)
-{
- mutex_unlock(&domain->pgtable_mutex);
-}
-static inline void write_lock_domain(m4u_domain_t *domain)
-{
- mutex_lock(&domain->pgtable_mutex);
-}
-static inline void write_unlock_domain(m4u_domain_t *domain)
-{
- mutex_unlock(&domain->pgtable_mutex);
-}
-
-//should not hold pg_lock when call this func.
-static inline int m4u_get_pt_type(m4u_domain_t *domain, unsigned int mva)
-{
- imu_pgd_t *pgd;
- imu_pte_t *pte;
- int ret;
-
- read_lock_domain(domain);
-
- pgd = imu_pgd_offset(domain, mva);
-
- if(F_PGD_TYPE_IS_PAGE(*pgd))
- {
- pte = imu_pte_offset_map(pgd, mva);
- if(F_PTE_TYPE_GET(imu_pte_val(*pte))==F_PTE_TYPE_LARGE)
- {
- imu_pte_unmap(pte);
- ret = MMU_PT_TYPE_LARGE_PAGE;
- }
- else if(F_PTE_TYPE_GET(imu_pte_val(*pte))==F_PTE_TYPE_SMALL)
- {
- imu_pte_unmap(pte);
- ret = MMU_PT_TYPE_SMALL_PAGE;
- }
- else
- {
- imu_pte_unmap(pte);
- ret = -1;
- }
- }
- else if(F_PGD_TYPE_IS_SECTION(*pgd))
- {
- ret = MMU_PT_TYPE_SECTION;
- }
- else if(F_PGD_TYPE_IS_SUPERSECTION(*pgd))
- {
- ret = MMU_PT_TYPE_SUPERSECTION;
- }
- else
- {
- ret = -1;
- }
- read_unlock_domain(domain);
- return ret;
-}
-
-static inline unsigned int m4u_get_pt_type_size(int type)
-{
- if(type == MMU_PT_TYPE_SMALL_PAGE)
- return MMU_SMALL_PAGE_SIZE;
- else if(type == MMU_PT_TYPE_LARGE_PAGE)
- return MMU_LARGE_PAGE_SIZE;
- else if(type == MMU_PT_TYPE_SECTION)
- return MMU_SECTION_SIZE;
- else if(type == MMU_PT_TYPE_SUPERSECTION)
- return MMU_SUPERSECTION_SIZE;
- else
- return -1;
-}
-
-
-
-/***********************************************************/
-/** print pte info to log or sequncial file
- if data is NULL, info is out put to kernel log by printk
- if pte is valid, we will print like va->pgd->pte->pa
- if pte is invalid, we print as many info as we can.
-* @return NULL
-* @remark
-* @see
-* @author K Zhang @date 2013/11/18
-************************************************************/
-void* __m4u_print_pte(m4u_pte_info_t *info, void* data)
-{
- if(info->valid)
- {
- if(info->size==SZ_4K)
- {
- M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pte(0x%x)-->pa(0x%lx) small\n",
- info->mva, imu_pgd_val(*info->pgd), imu_pte_val(*info->pte), info->pa );
- }
- else if(info->size == SZ_64K)
- {
- M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pte(0x%x)-->pa(0x%lx) large\n",
- info->mva, imu_pgd_val(*info->pgd), imu_pte_val(*info->pte), info->pa );
- }
- else if(info->size == SZ_1M)
- {
- M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pa(0x%lx) section\n",
- info->mva, imu_pgd_val(*info->pgd), info->pa);
- }
- else if(info->size == SZ_16M)
- {
- M4U_PRINT_LOG_OR_SEQ(data, "mva(0x%x)-->pgd(0x%x)-->pa(0x%lx) super\n",
- info->mva, imu_pgd_val(*info->pgd), info->pa);
- }
- }
- else
- {
- M4U_PRINT_LOG_OR_SEQ(data, "va(0x%x)",info->mva);
- M4U_PRINT_LOG_OR_SEQ(data, "-->pgd(0x%x)",imu_pgd_val(*info->pgd));
- if(info->pte)
- M4U_PRINT_LOG_OR_SEQ(data, "-->pte(0x%x)", imu_pte_val(*info->pte));
- M4U_PRINT_LOG_OR_SEQ(data, " invalid \n");
- }
-
- return NULL;
-}
-
-//domain->pgtable_mutex should be held
-int m4u_get_pte_info(m4u_domain_t *domain, unsigned int mva, m4u_pte_info_t *pte_info)
-{
- imu_pgd_t *pgd;
- imu_pte_t *pte = NULL;
- unsigned int pa = 0;
- unsigned int size;
- int valid=1;
-
- pgd = imu_pgd_offset(domain, mva);
-
- if(F_PGD_TYPE_IS_PAGE(*pgd))
- {
- pte = imu_pte_offset_map(pgd, mva);
- if(F_PTE_TYPE_GET(imu_pte_val(*pte))==F_PTE_TYPE_LARGE)
- {
- pa = imu_pte_val(*pte)&F_PTE_PA_LARGE_MSK;
- pa |= mva&(~F_PTE_PA_LARGE_MSK);
- size = MMU_LARGE_PAGE_SIZE;
- }
- else if(F_PTE_TYPE_GET(imu_pte_val(*pte))==F_PTE_TYPE_SMALL)
- {
- pa = imu_pte_val(*pte)&F_PTE_PA_SMALL_MSK;
- pa |= mva&(~F_PTE_PA_SMALL_MSK);
- size = MMU_SMALL_PAGE_SIZE;
- }
- else
- {
- valid=0;
- size = MMU_SMALL_PAGE_SIZE;
- }
- }
- else
- {
- pte = NULL;
- if(F_PGD_TYPE_IS_SECTION(*pgd))
- {
- pa = imu_pgd_val(*pgd)&F_PGD_PA_SECTION_MSK;
- pa |= mva&(~F_PGD_PA_SECTION_MSK);
- size = MMU_SECTION_SIZE;
- }
- else if(F_PGD_TYPE_IS_SUPERSECTION(*pgd))
- {
- pa = imu_pgd_val(*pgd)&F_PGD_PA_SUPERSECTION_MSK;
- pa |= mva&(~F_PGD_PA_SUPERSECTION_MSK);
- size = MMU_SUPERSECTION_SIZE;
- }
- else
- {
- valid=0;
- size = MMU_SECTION_SIZE;
- }
- }
-
- pte_info->pgd = pgd;
- pte_info->pte = pte;
- pte_info->mva = mva;
- pte_info->pa = pa;
- pte_info->size = size;
- pte_info->valid = valid;
- return 0;
-}
-
-typedef void* (m4u_pte_fn_t)(m4u_pte_info_t *pte_info, void* data);
-
-/***********************************************************/
-/** interate all pte, and call fn for each pte.
-* @param domain
-* @param fn -- to be called for each pte
-* @param data -- private data for fn
-*
-* @return NULL of sucess, non-NULL if interrupted by fn.
-* @remark
- 1. fn will only be called when pte is valid.
- 2. if fn return non-NULL, the iteration will return imediately.
-* @see
-* @author K Zhang @date 2013/11/18
-************************************************************/
-void* m4u_for_each_pte(m4u_domain_t *domain, m4u_pte_fn_t *fn, void* data)
-{
- unsigned int mva=0;
- void* ret;
- m4u_pte_info_t pte_info;
-
- read_lock_domain(domain);
- while(1)
- {
- m4u_get_pte_info(domain, mva, &pte_info);
-
- if(pte_info.valid)
- {
- ret = fn(&pte_info, data);
- if(ret)
- {
- read_unlock_domain(domain);
- return ret;
- }
- }
-
- if(mva + pte_info.size < mva) //over flow
- break;
- else
- mva += pte_info.size;
- }
-
- read_unlock_domain(domain);
- return NULL;
-}
-
-//dump pte info for mva, no matter it's valid or not
-//this function doesn't lock pgtable lock.
-void m4u_dump_pte_nolock(m4u_domain_t *domain, unsigned int mva)
-{
- m4u_pte_info_t pte_info;
-
- m4u_get_pte_info(domain, mva, &pte_info);
-
- __m4u_print_pte(&pte_info, NULL);
-}
-
-void m4u_dump_pte(m4u_domain_t *domain, unsigned int mva)
-{
- read_lock_domain(domain);
- m4u_dump_pte_nolock(domain, mva);
- read_unlock_domain(domain);
-}
-
-unsigned long m4u_get_pte(m4u_domain_t *domain, unsigned int mva)
-{
- m4u_pte_info_t pte_info;
-
- read_lock_domain(domain);
- m4u_get_pte_info(domain, mva, &pte_info);
- read_unlock_domain(domain);
-
- return pte_info.pa;
-}
-
-/***********************************************************/
-/** dump pagetable to sequncial file or kernel log.
-* @param domain -- domain to dump
-* @param seq -- seq file. if NULL, we will dump to kernel log
-*
-* @remark this func will lock pgtable_lock, it may sleep.
-* @author K Zhang @date 2013/11/18
-************************************************************/
-void m4u_dump_pgtable(m4u_domain_t *domain, struct seq_file *seq)
-{
- M4U_PRINT_LOG_OR_SEQ(seq, "m4u dump pgtable start ==============>\n");
- m4u_for_each_pte(domain, __m4u_print_pte, seq);
- M4U_PRINT_LOG_OR_SEQ(seq, "m4u dump pgtable done ==============>\n");
-}
-
-/* M4U_PROT_CACHE indicates M4U_PROT_SHARE, which route transaction to CCI*/
-static inline unsigned int m4u_prot_fixup(unsigned int prot)
-{
- //don't support read/write protect
-/*
- if(unlikely(!(prot & (M4U_PROT_READ|M4U_PROT_WRITE))))
- prot |= M4U_PROT_READ|M4U_PROT_WRITE;
- if(unlikely((prot&M4U_PROT_WRITE) && !(prot&M4U_PROT_READ)))
- prot |= M4U_PROT_WRITE;
-*/
- if(prot & M4U_PROT_CACHE)
- prot |= M4U_PROT_SHARE;
-
- return prot;
-}
-
-
-
-/***********************************************************/
-/** convert m4u_prot to hardware pgd/pte attribute
-* @param prot -- m4u_prot flags
-*
-* @return pgd or pte attribute
-* @remark
-* @see
-* @author K Zhang @date 2013/11/18
-************************************************************/
-static inline unsigned int __m4u_get_pgd_attr_16M(unsigned int prot)
-{
- unsigned int pgprot;
- pgprot = F_PGD_TYPE_SUPERSECTION;
- pgprot |= (prot & M4U_PROT_SEC) ? 0 : F_PGD_NS_BIT_SECTION(1);
- pgprot |= (prot & M4U_PROT_SHARE) ? F_PGD_S_BIT : 0;
- pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT|F_PGD_B_BIT): 0;
- return pgprot;
-}
-static inline unsigned int __m4u_get_pgd_attr_1M(unsigned int prot)
-{
- unsigned int pgprot;
- pgprot = F_PGD_TYPE_SECTION;
- pgprot |= (prot & M4U_PROT_SEC) ? 0 : F_PGD_NS_BIT_SECTION(1);
- pgprot |= (prot & M4U_PROT_SHARE) ? F_PGD_S_BIT : 0;
- pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT|F_PGD_B_BIT): 0;
- return pgprot;
-}
-static inline unsigned int __m4u_get_pgd_attr_page(unsigned int prot)
-{
- unsigned int pgprot;
- pgprot = F_PGD_TYPE_PAGE;
- pgprot |= (prot & M4U_PROT_SEC) ? 0 : F_PGD_NS_BIT_PAGE(1);
- return pgprot;
-}
-static inline unsigned int __m4u_get_pte_attr_64K(unsigned int prot)
-{
- unsigned int pgprot;
- pgprot = F_PTE_TYPE_LARGE;
- pgprot |= (prot & M4U_PROT_SHARE) ? F_PTE_S_BIT : 0;
- pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT|F_PGD_B_BIT): 0;
- return pgprot;
-}
-static inline unsigned int __m4u_get_pte_attr_4K(unsigned int prot)
-{
- unsigned int pgprot;
- pgprot = F_PTE_TYPE_SMALL;
- pgprot |= (prot & M4U_PROT_SHARE) ? F_PTE_S_BIT : 0;
- pgprot |= (prot & M4U_PROT_CACHE) ? (F_PGD_C_BIT|F_PGD_B_BIT): 0;
- return pgprot;
-}
-
-
-
-/***********************************************************/
-/** cache flush for modified pte.
- notes: because pte is allocated using slab, cache sync is needed.
-*
-* @author K Zhang @date 2013/11/18
-************************************************************/
-int m4u_clean_pte(m4u_domain_t *domain, unsigned int mva, unsigned int size)
-{
- imu_pgd_t *pgd;
- unsigned int end_plus_1=mva+size;
-
- while(mva < end_plus_1)
- {
- pgd = imu_pgd_offset(domain, mva);
-
- if(F_PGD_TYPE_IS_PAGE(*pgd))
- {
- imu_pte_t *pte, *pte_end;
- unsigned int next_mva, sync_entry_nr;
-
- pte = imu_pte_offset_map(pgd, mva);
- if(!pte)
- {
- //invalid pte: goto next pgd entry
- mva = m4u_calc_next_mva(mva, end_plus_1, MMU_SECTION_SIZE);
- continue;
- }
-
- next_mva = m4u_calc_next_mva(mva, end_plus_1, MMU_SECTION_SIZE);
- sync_entry_nr = (next_mva - mva)/MMU_SMALL_PAGE_SIZE;
- pte_end = pte + sync_entry_nr;
- //do cache sync for [pte, pte_end)
- dmac_flush_range((void*)pte, (void*)pte_end);
- //M4UMSG("dmac_flush_range: 0x%x ~ 0x%x\n", pte, pte_end);
-
- imu_pte_unmap(pte);
- mva = next_mva;
-
- }
- else if(F_PGD_TYPE_IS_SUPERSECTION(*pgd))
- {
- //for superseciton: don't need to sync.
- mva = m4u_calc_next_mva(mva, end_plus_1, MMU_SUPERSECTION_SIZE);
- }
- else
- {
- //for section/invalid: don't need to sync
- mva = m4u_calc_next_mva(mva, end_plus_1, MMU_SECTION_SIZE);
- }
- }
-
- return 0;
-}
-
-struct kmem_cache *gM4u_pte_kmem = NULL;
-int m4u_pte_allocator_init(void)
-{
- gM4u_pte_kmem = kmem_cache_create("m4u_pte", IMU_BYTES_PER_PTE, IMU_BYTES_PER_PTE, 0, NULL);
- M4UINFO("%s: gM4u_pte_kmem = 0x%p, IMU_BYTES_PER_PTE = %d.\n", __FUNCTION__, gM4u_pte_kmem, (int)IMU_BYTES_PER_PTE);
-
- if(IS_ERR_OR_NULL(gM4u_pte_kmem))
- {
- M4UMSG("error in %s: ret = %p.\n", __FUNCTION__, gM4u_pte_kmem);
- return -1;
- }
-
- return 0;
-}
-
-/***********************************************************/
-/** allocate a new pte
-* @param domain
-* @param pgd -- pgd to allocate for
-* @param pgprot
-*
-* @return 0 -- pte is allocated
- 1 -- pte is not allocated, because it's allocated by others
- <0 -- error
-* @remark
-* @see
-* @author K Zhang @date 2013/11/18
-************************************************************/
-int m4u_alloc_pte(m4u_domain_t* domain, imu_pgd_t *pgd, unsigned int pgprot)
-{
- void* pte_new_va;
- phys_addr_t pte_new;
- //pte_new_va = (unsigned int)kzalloc(IMU_BYTES_PER_PTE, GFP_KERNEL);
- //pte_new_va = (unsigned int)get_zeroed_page(GFP_KERNEL);
- pte_new_va = kmem_cache_zalloc(gM4u_pte_kmem, GFP_KERNEL);
- if(unlikely(!pte_new_va))
- {
- m4u_aee_print("%s: fail, nomemory\n", __FUNCTION__);
- return -ENOMEM;
- }
- pte_new = __pa(pte_new_va);
-
- //check pte alignment -- must 1K align
- if(unlikely(pte_new & (IMU_BYTES_PER_PTE-1)))
- {
- m4u_aee_print("%s: fail, not algin pa=0x%p, va=0x%p\n", __FUNCTION__, (void*)pte_new, pte_new_va);
- //kfree(pte_new_va);
- kmem_cache_free(gM4u_pte_kmem, (void *)pte_new_va);
- return -ENOMEM;
- }
-
- //lock and check again
- //because someone else may have allocated for this pgd first
- if(likely(!imu_pgd_val(*pgd)))
- {
- m4u_set_pgd_val(pgd, (unsigned int)(pte_new) | pgprot);
- M4ULOG_MID("%s: pgd: 0x%p, pte_va:0x%p, pte_pa: 0x%pa, value: 0x%x\n", __FUNCTION__, pgd, pte_new_va, &pte_new,(unsigned int)(pte_new) | pgprot);
-
- return 0;
-
- }
- else
- {
- //allocated by other thread
- //kfree(__va(pte_new));
- M4ULOG_LOW("m4u pte allocated by others: pgd=0x%p\n", pgd);
- kmem_cache_free(gM4u_pte_kmem, (void *)pte_new_va);
- return 1;
- }
-}
-
-int m4u_free_pte(m4u_domain_t* domain, imu_pgd_t *pgd)
-{
- imu_pte_t *pte_old;
-
- pte_old = imu_pte_map(pgd);
- m4u_set_pgd_val(pgd, 0);
-
- //kfree(pte_old);
- //free_page(pte_old);
- kmem_cache_free(gM4u_pte_kmem, pte_old);
-
- return 0;
-}
-
-
-/***********************************************************/
-/** m4u_map_XX functions.
- map mva<->pa
-notes: these function dosen't clean pte and invalid tlb
- for performance concern.
- callers should clean pte + invalid tlb after mapping.
-
-* @author K Zhang @date 2013/11/19
-************************************************************/
-int m4u_map_16M(m4u_domain_t* m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
-{
- int i;
- imu_pgd_t* pgd;
- unsigned int pgprot;
- unsigned int padscpt;
-
- if( (mva&(~F_PGD_PA_SUPERSECTION_MSK)) != ((unsigned int)pa&(~F_PGD_PA_SUPERSECTION_MSK)))
- {
- m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "supersection");
- return -EINVAL;
- }
-
- mva &= F_PGD_PA_SUPERSECTION_MSK;
- if( pa > 0xffffffffL)
- padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK|F_PGD_BIT32_BIT);
- else
- padscpt = (unsigned int)pa & F_PGD_PA_SUPERSECTION_MSK;
-
- pgprot = __m4u_get_pgd_attr_16M(prot);
- pgd = imu_pgd_offset(m4u_domain, mva);
-
- M4ULOG_LOW("%s: mva: 0x%x, pgd: 0x%p (0x%p + 0x%x), pa: 0x%lx, value: 0x%x\n", __FUNCTION__, mva, pgd, (m4u_domain)->pgd, imu_pgd_index(mva), pa, padscpt | pgprot);
-
- for(i=0; i<16; i++)
- {
- if(unlikely(imu_pgd_val(*pgd)))
- {
- m4u_aee_print("%s: mva=0x%x, pgd=0x%x, i=%d\n", __FUNCTION__, mva, imu_pgd_val(*pgd), i);
- goto err_out;
- }
- m4u_set_pgd_val(pgd, padscpt | pgprot);
- pgd++;
- }
-
- return 0;
-
-err_out:
- for(pgd--; i>0; i--)
- {
- m4u_set_pgd_val(pgd, 0);
- pgd--;
- }
- return -1;
-}
-
-int m4u_map_1M(m4u_domain_t* m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
-{
- imu_pgd_t* pgd;
- unsigned int pgprot;
- unsigned int padscpt;
-
- if( (mva&(~F_PGD_PA_SECTION_MSK)) != ((unsigned int)pa&(~F_PGD_PA_SECTION_MSK)))
- {
- m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "section");
- return -EINVAL;
- }
-
- mva &= F_PGD_PA_SECTION_MSK;
- if( pa > 0xffffffffL)
- padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK|F_PGD_BIT32_BIT);
- else
- padscpt = (unsigned int)pa & F_PGD_PA_SECTION_MSK;
-
- pgprot = __m4u_get_pgd_attr_1M(prot);
- pgd = imu_pgd_offset(m4u_domain, mva);
-
- if(unlikely(imu_pgd_val(*pgd)))
- {
- m4u_aee_print("%s: mva=0x%x, pgd=0x%x\n", __FUNCTION__, mva, imu_pgd_val(*pgd));
- return -1;
- }
-
- m4u_set_pgd_val(pgd, padscpt | pgprot);
-
- M4ULOG_LOW("%s: mva: 0x%x, pgd: 0x%p (0x%p + 0x%x), pa: 0x%lx, value: 0x%x\n", __FUNCTION__, mva, pgd, (m4u_domain)->pgd, imu_pgd_index(mva), pa, padscpt | pgprot);
-
- return 0;
-}
-
-int m4u_map_64K(m4u_domain_t* m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
-{
- int ret,i;
- imu_pgd_t* pgd;
- imu_pte_t* pte;
- unsigned int pte_new, pgprot;
- unsigned int padscpt;
-
- if( (mva&(~F_PTE_PA_LARGE_MSK)) != ((unsigned int)pa&(~F_PTE_PA_LARGE_MSK)))
- {
- m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "large page");
- return -EINVAL;
- }
-
- mva &= F_PTE_PA_LARGE_MSK;
- if( pa > 0xffffffffL)
- padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK|F_PTE_BIT32_BIT);
- else
- padscpt = (unsigned int)pa & F_PTE_PA_LARGE_MSK;
-
- pgprot = __m4u_get_pgd_attr_page(prot);
- pgd = imu_pgd_offset(m4u_domain, mva);
- if(!imu_pgd_val(*pgd))
- {
- ret = m4u_alloc_pte(m4u_domain, pgd, pgprot);
- if(ret<0)
- return ret;
- else if(ret>0)
- pte_new=0;
- else
- pte_new=1;
- }
- else
- {
- if(unlikely((imu_pgd_val(*pgd)&(~F_PGD_PA_PAGETABLE_MSK)) != pgprot))
- {
- m4u_aee_print("%s: mva=0x%x, pgd=0x%x, pgprot=0x%x\n", __FUNCTION__, mva, imu_pgd_val(*pgd), pgprot);
- return -1;
- }
- pte_new=0;
- }
-
- pgprot = __m4u_get_pte_attr_64K(prot);
- pte = imu_pte_offset_map(pgd, mva);
-
- M4ULOG_LOW("%s: mva: 0x%x, pte: 0x%p (0x%p + 0x%x), pa: 0x%lx, value: 0x%x\n", __FUNCTION__, mva, &imu_pte_val(*pte), imu_pte_map(pgd), imu_pte_index(mva), pa, padscpt | pgprot);
-
- for(i=0; i<16; i++)
- {
- if(unlikely(imu_pte_val(pte[i])))
- {
- m4u_aee_print("%s: pte=0x%x, i=%d\n", __FUNCTION__, imu_pte_val(pte[i]), i);
- goto err_out;
- }
- imu_pte_val(pte[i]) = padscpt | pgprot;
- }
- imu_pte_unmap(pte);
-
- return 0;
-
-err_out:
- for(i--; i>=0; i--)
- imu_pte_val(pte[i]) = 0;
- imu_pte_unmap(pte);
-
- if(pte_new)
- {
- m4u_free_pte(m4u_domain, pgd);
- }
- return -1;
-}
-
-int m4u_map_4K(m4u_domain_t* m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot)
-{
- int ret, pte_new;
- imu_pgd_t* pgd;
- imu_pte_t* pte;
- unsigned int pgprot;
- unsigned int padscpt;
-
- if((mva&(~F_PTE_PA_SMALL_MSK)) != ((unsigned int)pa&(~F_PTE_PA_SMALL_MSK)))
- {
- m4u_aee_print("error to mk_pte: mva=0x%x, pa=0x%lx, type=%s\n", mva, pa, "small page");
- return -EINVAL;
- }
-
- mva &= F_PTE_PA_SMALL_MSK;
- if( pa > 0xffffffffL)
- padscpt = (unsigned int)pa & (F_PTE_PA_SMALL_MSK|F_PTE_BIT32_BIT);
- else
- padscpt = (unsigned int)pa & F_PTE_PA_SMALL_MSK;
-
- pgprot = __m4u_get_pgd_attr_page(prot);
- pgd = imu_pgd_offset(m4u_domain, mva);
- if(!imu_pgd_val(*pgd))
- {
- ret = m4u_alloc_pte(m4u_domain, pgd, pgprot);
- if(ret<0)
- return ret;
- else if(ret>0)
- pte_new=0;
- else
- pte_new=1;
- }
- else
- {
- if(unlikely((imu_pgd_val(*pgd)&(~F_PGD_PA_PAGETABLE_MSK)) != pgprot))
- {
- m4u_aee_print("%s: mva=0x%x, pgd=0x%x, pgprot=0x%x\n", __FUNCTION__, mva, imu_pgd_val(*pgd), pgprot);
- return -1;
- }
- pte_new=0;
- }
-
- pgprot = __m4u_get_pte_attr_4K(prot);
- pte = imu_pte_offset_map(pgd, mva);
-
- if(unlikely(imu_pte_val(*pte)))
- {
- m4u_aee_print("%s: pte=0x%x\n", __FUNCTION__, imu_pte_val(*pte));
- goto err_out;
- }
-
- imu_pte_val(*pte) = padscpt | pgprot;
-
- M4ULOG_LOW("%s: mva: 0x%x, pte: 0x%p (0x%p + 0x%x), pa: 0x%lx, value: 0x%x\n", __FUNCTION__, mva, &imu_pte_val(*pte), imu_pte_map(pgd), imu_pte_index(mva), pa, padscpt | imu_pte_val(*pte));
-
- imu_pte_unmap(pte);
-
- return 0;
-
-err_out:
- imu_pte_unmap(pte);
- if(pte_new)
- {
- m4u_free_pte(m4u_domain, pgd);
- }
- return -1;
-}
-
-//notes: both iova & paddr should be aligned.
-static inline int m4u_map_phys_align(m4u_domain_t *m4u_domain, unsigned int iova,
- unsigned long paddr, unsigned int size, unsigned int prot)
-{
- int ret;
-
- if(size==SZ_16M)
- ret = m4u_map_16M(m4u_domain, iova, paddr, prot);
- else if(size==SZ_1M)
- ret = m4u_map_1M(m4u_domain, iova, paddr, prot);
- else if(size==SZ_64K)
- ret = m4u_map_64K(m4u_domain, iova, paddr, prot);
- else if(size==SZ_4K)
- ret = m4u_map_4K(m4u_domain, iova, paddr, prot);
- else
- {
- m4u_aee_print("%s: fail size=0x%x\n", __FUNCTION__, size);
- return -1;
- }
-
- return ret;
-}
-
-
-/***********************************************************/
-/** map a physical continous memory to iova (mva).
-* @param m4u_domain domain
-* @param iova -- iova (mva)
-* @param paddr -- physical address
-* @param size -- size
-* @param prot -- m4u_prot
-*
-* @return 0 on sucess, others on fail
-* @remark
-* @see refer to kernel/drivers/iommu/iommu.c iommu_map()
-* @author K Zhang @date 2013/11/19
-************************************************************/
-int m4u_map_phys_range(m4u_domain_t *m4u_domain, unsigned int iova,
- unsigned long paddr, unsigned int size, unsigned int prot)
-{
- unsigned int min_pagesz;
- int ret = 0;
-
- /* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(m4u_domain->pgsize_bitmap);
-
- /*
- * both the virtual address and the physical one, as well as
- * the size of the mapping, must be aligned (at least) to the
- * size of the smallest page supported by the hardware
- */
- if (!IS_ALIGNED(iova | (unsigned int)paddr | size, min_pagesz)) {
- M4UMSG("unaligned: iova 0x%d pa 0x%lx size 0x%x min_pagesz "
- "0x%x\n", iova, paddr,
- size, min_pagesz);
- return -EINVAL;
- }
-
- while (size) {
- unsigned long pgsize, addr_merge = (unsigned long)iova | paddr;
- unsigned int pgsize_idx;
-
- /* Max page size that still fits into 'size' */
- pgsize_idx = __fls(size);
-
- /* need to consider alignment requirements ? */
- if (likely(addr_merge)) {
- /* Max page size allowed by both iova and paddr */
- unsigned int align_pgsize_idx = __ffs(addr_merge);
-
- pgsize_idx = min(pgsize_idx, align_pgsize_idx);
- }
-
- /* build a mask of acceptable page sizes */
- pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
- /* throw away page sizes not supported by the hardware */
- pgsize &= m4u_domain->pgsize_bitmap;
-
- /* make sure we're still sane */
- BUG_ON(!pgsize);
-
- /* pick the biggest page */
- pgsize_idx = __fls(pgsize);
- pgsize = 1UL << pgsize_idx;
-
- M4ULOG_LOW("mapping: iova 0x%x pa 0x%lx pgsize %lu\n", iova,
- paddr, pgsize);
-
- ret = m4u_map_phys_align(m4u_domain, iova, paddr, pgsize, prot);
- if (ret)
- break;
-
- iova += pgsize;
- paddr += pgsize;
- size -= pgsize;
- }
-
- /* unroll mapping in case something went wrong */
- if (ret)
- m4u_unmap(m4u_domain, iova, size);
- return ret;
-}
-
-
-
-int m4u_map_sgtable(m4u_domain_t *m4u_domain, unsigned int mva,
- struct sg_table *sg_table, unsigned int size, unsigned int prot)
-{
- int i, ret;
- struct scatterlist *sg;
- unsigned int map_mva=mva, map_end=mva+size;
-
- prot = m4u_prot_fixup(prot);
-
- write_lock_domain(m4u_domain);
-
- for_each_sg(sg_table->sgl, sg, sg_table->nents, i)
- {
- dma_addr_t pa;
- unsigned int len;
-
- pa = get_sg_phys(sg);
- len = sg_dma_len(sg);
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
- if(0 == sg_dma_address(sg))
- {
- len = sg->length;
- }
-#endif
-
- M4ULOG_LOW("%s: for_each_sg i: %d, len: %d, mva: 0x%x\n", __FUNCTION__, i, len, map_mva);
-
-
- if(map_mva+len > map_end)
- {
- M4UMSG("%s: map_mva(0x%x)+len(0x%x)>end(0x%x)\n", __FUNCTION__, map_mva, len, map_end);
- break;
- }
- if(len == SZ_4K) //for most cases
- {
- ret = m4u_map_4K(m4u_domain, map_mva, pa, prot);
- }
- else
- {
- ret = m4u_map_phys_range(m4u_domain, map_mva, pa, len, prot);
- }
-
- if(ret)
- {
- M4UMSG("%s: ret: %d\n", __FUNCTION__, ret);
- goto err_out;
- }
- else
- {
- map_mva += len;
- }
- }
-
- if(map_mva < map_end)
- {
- M4UMSG("%s: map_mva(0x%x) < map_end(0x%x)\n", __FUNCTION__, map_mva, map_end);
- goto err_out;
- }
-
- m4u_clean_pte(m4u_domain, mva, size);
-
- m4u_invalid_tlb_by_range(m4u_domain, mva, mva+size-1);
-
- write_unlock_domain(m4u_domain);
-
- return 0;
-
-err_out:
- write_unlock_domain(m4u_domain);
-
- m4u_unmap(m4u_domain, mva, size);
- return -EINVAL;
-}
-
-
-
-int m4u_check_free_pte(m4u_domain_t *domain, imu_pgd_t *pgd)
-{
- imu_pte_t *pte;
- int i;
-
- pte = imu_pte_map(pgd);
- for(i=0; i<IMU_PTRS_PER_PTE; i++)
- {
- if(imu_pte_val(*pte)!=0)
- break;
- }
- if(i==IMU_PTRS_PER_PTE)
- {
- m4u_free_pte(domain, pgd);
- m4u_set_pgd_val(pgd, 0);
- return 0;
- }
- else
- {
- return 1;
- }
-}
-
-int m4u_unmap(m4u_domain_t *domain, unsigned int mva, unsigned int size)
-{
- imu_pgd_t *pgd;
- int i, ret;
- unsigned int start=mva;
- unsigned int end_plus_1=mva+size;
-
- write_lock_domain(domain);
- while(mva < end_plus_1)
- {
- pgd = imu_pgd_offset(domain, mva);
-
- if(F_PGD_TYPE_IS_PAGE(*pgd))
- {
- imu_pte_t *pte;
- unsigned int pte_offset;
- unsigned int num_to_clean;
-
- pte_offset = imu_pte_index(mva);
- num_to_clean = min((unsigned int)((end_plus_1-mva)/PAGE_SIZE), (unsigned int)(IMU_PTRS_PER_PTE-pte_offset));
-
- pte = imu_pte_offset_map(pgd, mva);
-
- memset(pte, 0, num_to_clean<<2);
-
- ret = m4u_check_free_pte(domain, pgd);
- if(ret==1)
- { //pte is not freed, need to flush pte
- m4u_clean_pte(domain, mva, num_to_clean<<PAGE_SHIFT);
- }
-
- mva += num_to_clean<<PAGE_SHIFT;
- }
- else if(F_PGD_TYPE_IS_SECTION(*pgd))
- {
- m4u_set_pgd_val(pgd, 0);
- mva += MMU_SECTION_SIZE;
- }
- else if(F_PGD_TYPE_IS_SUPERSECTION(*pgd))
- {
- imu_pgd_t *start = imu_supersection_start(pgd);
- if(unlikely(start != pgd))
- m4u_aee_print("%s: suppersec not align, mva=0x%x, pgd=0x%x\n", __FUNCTION__, mva, imu_pgd_val(*pgd));
-
- for(i=0; i<16; i++)
- imu_pgd_val(start[i]) = 0;
-
- mva = (mva+MMU_SUPERSECTION_SIZE)&(~(MMU_SUPERSECTION_SIZE-1)); //must align
- }
- else
- {
- mva += MMU_SECTION_SIZE;
- }
- }
-
- m4u_invalid_tlb_by_range(domain, start, end_plus_1-1);
-
- write_unlock_domain(domain);
- return 0;
-}
-
-
-
-
-
-int m4u_debug_pgtable_show(struct seq_file *s, void *unused)
-{
- m4u_dump_pgtable(s->private, s);
- return 0;
-}
-
-int m4u_debug_pgtable_open(struct inode *inode, struct file *file)
-{
- return single_open(file, m4u_debug_pgtable_show, inode->i_private);
-}
-
-
-struct file_operations m4u_debug_pgtable_fops = {
- .open = m4u_debug_pgtable_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-int m4u_pgtable_init(struct m4u_device *m4u_dev, m4u_domain_t *m4u_domain)
-{
-
- //======= alloc pagetable=======================
- m4u_domain->pgd= dma_alloc_coherent(m4u_dev->pDev[0], M4U_PGD_SIZE, &(m4u_domain->pgd_pa), GFP_KERNEL);
-
- if(!(m4u_domain->pgd))
- {
- M4UMSG("dma_alloc_coherent error! dma memory not available.\n");
- return -1;
- }
- if((unsigned int)(m4u_domain->pgd_pa) & (M4U_PGD_SIZE-1))
- {
- M4UMSG("dma_alloc_coherent memory not align. 0x%pad.\n", &m4u_domain->pgd_pa);
- return -1;
- }
-
- M4UINFO("dma_alloc_coherent success! pagetable_va=0x%p, pagetable_pa=0x%pad.\n", m4u_domain->pgd, &m4u_domain->pgd_pa);
-
- memset((void*)m4u_domain->pgd, 0, M4U_PGD_SIZE);
- //======= alloc pagetable done=======================
-
- if(0 != m4u_pte_allocator_init())
- return -1;
-
- debugfs_create_file("pgtable", 0644, m4u_dev->debug_root, m4u_domain, &m4u_debug_pgtable_fops);
-
- return 0;
-}
-
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.h b/drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.h
deleted file mode 100644
index b2940ad25..000000000
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_pgtable.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef __M4U_PGTABLE_H__
-#define __M4U_PGTABLE_H__
-
-#include "m4u_reg.h"
-
-//=================================================================
-//2 level pagetable: pgd -> pte
-
-#define F_PTE_TYPE_MSK F_MSK(1,0)
-#define F_PTE_TYPE_SET(val) F_VAL(val,1,0)
-#define F_PTE_TYPE_GET(regval) F_MSK_SHIFT(regval,1,0)
-#define F_PTE_TYPE_LARGE (0x1)
-#define F_PTE_TYPE_SMALL (0x2)
-#define F_PTE_B_BIT F_BIT_SET(2)
-#define F_PTE_C_BIT F_BIT_SET(3)
-#define F_PTE_AP_MSK F_MSK(5,4)
-#define F_PTE_AP_SET(val) F_VAL(val,5,4)
-#define F_PTE_AP_GET(regval) F_MSK_SHIFT(regval,5,4)
-#define F_PTE_TEX_MSK F_MSK(8,6)
-#define F_PTE_TEX_SET(val) F_VAL(val,8,6)
-#define F_PTE_TEX_GET(regval) F_MSK_SHIFT(regval,8,6)
-#define F_PTE_BIT32_BIT F_BIT_SET(9)
-#define F_PTE_S_BIT F_BIT_SET(10)
-#define F_PTE_NG_BIT F_BIT_SET(11)
-#define F_PTE_PA_LARGE_MSK F_MSK(31,16)
-#define F_PTE_PA_LARGE_SET(val) F_VAL(val,31,16)
-#define F_PTE_PA_LARGE_GET(regval) F_MSK_SHIFT(regval,31,16)
-#define F_PTE_PA_SMALL_MSK F_MSK(31,12)
-#define F_PTE_PA_SMALL_SET(val) F_VAL(val,31,12)
-#define F_PTE_PA_SMALL_GET(regval) F_MSK_SHIFT(regval,31,12)
-#define F_PTE_TYPE_IS_LARGE_PAGE(pte) ((imu_pte_val(pte)&0x3)==F_PTE_TYPE_LARGE)
-#define F_PTE_TYPE_IS_SMALL_PAGE(pte) ((imu_pte_val(pte)&0x3)==F_PTE_TYPE_SMALL)
-
-
-#define F_PGD_TYPE_PAGE (0x1)
-#define F_PGD_TYPE_PAGE_MSK (0x3)
-#define F_PGD_TYPE_SECTION (0x2)
-#define F_PGD_TYPE_SUPERSECTION (0x2|(1<<18))
-#define F_PGD_TYPE_SECTION_MSK (0x3|(1<<18))
-#define F_PGD_TYPE_IS_PAGE(pgd) ((imu_pgd_val(pgd)&3)==1)
-#define F_PGD_TYPE_IS_SECTION(pgd) \
- (F_PGD_TYPE_IS_PAGE(pgd) ? 0 : ((imu_pgd_val(pgd)&F_PGD_TYPE_SECTION_MSK)==F_PGD_TYPE_SECTION))
-#define F_PGD_TYPE_IS_SUPERSECTION(pgd) \
- (F_PGD_TYPE_IS_PAGE(pgd) ? 0 : ((imu_pgd_val(pgd)&F_PGD_TYPE_SECTION_MSK)==F_PGD_TYPE_SUPERSECTION))
-
-
-#define F_PGD_B_BIT F_BIT_SET(2)
-#define F_PGD_C_BIT F_BIT_SET(3)
-#define F_PGD_AP_MSK F_MSK(11,10)
-#define F_PGD_AP_SET(val) F_VAL(val,11,10)
-#define F_PGD_AP_GET(regval) F_MSK_SHIFT(regval,11,10)
-#define F_PGD_TEX_MSK F_MSK(14,12)
-#define F_PGD_TEX_SET(val) F_VAL(val,14,12)
-#define F_PGD_TEX_GET(regval) F_MSK_SHIFT(regval,14,12)
-#define F_PGD_BIT32_BIT F_BIT_SET(9)
-#define F_PGD_S_BIT F_BIT_SET(16)
-#define F_PGD_NG_BIT F_BIT_SET(17)
-#define F_PGD_NS_BIT_PAGE(ns) F_BIT_VAL(ns, 3)
-#define F_PGD_NS_BIT_SECTION(ns) F_BIT_VAL(ns, 19)
-#define F_PGD_NS_BIT_SUPERSECTION(ns) F_BIT_VAL(ns, 19)
-
-
-
-#define F_PGD_PA_PAGETABLE_MSK F_MSK(31,10)
-#define F_PGD_PA_PAGETABLE_SET(val) F_VAL(val,31,10)
-#define F_PGD_PA_SECTION_MSK F_MSK(31,20)
-#define F_PGD_PA_SECTION_SET(val) F_VAL(val,31,20)
-#define F_PGD_PA_SUPERSECTION_MSK F_MSK(31,24)
-#define F_PGD_PA_SUPERSECTION_SET(val) F_VAL(val,31,24)
-
-//pagetable walk
-#define IMU_PGDIR_SHIFT 20
-#define IMU_PAGE_SHIFT 12
-#define IMU_PTRS_PER_PGD 4096
-#define IMU_PTRS_PER_PTE 256
-#define IMU_BYTES_PER_PTE (IMU_PTRS_PER_PTE*sizeof(imu_pteval_t))
-
-#define MMU_PT_TYPE_SUPERSECTION (1<<4)
-#define MMU_PT_TYPE_SECTION (1<<3)
-#define MMU_PT_TYPE_LARGE_PAGE (1<<2)
-#define MMU_PT_TYPE_SMALL_PAGE (1<<1)
-
-#define MMU_SMALL_PAGE_SIZE (SZ_4K)
-#define MMU_LARGE_PAGE_SIZE (SZ_64K)
-#define MMU_SECTION_SIZE (SZ_1M)
-#define MMU_SUPERSECTION_SIZE (SZ_16M)
-
-
-typedef unsigned int imu_pteval_t;
-typedef struct {imu_pteval_t imu_pte;} imu_pte_t;
-typedef struct {imu_pteval_t imu_pgd;} imu_pgd_t;
-
-#define imu_pte_val(x) ((x).imu_pte)
-#define imu_pgd_val(x) ((x).imu_pgd)
-
-#define __imu_pte(x) ((imu_pte_t){(x)})
-#define __imu_pgd(x) ((imu_pgd_t){(x)})
-
-#define imu_pte_none(pte) (!imu_pte_val(pte))
-#define imu_pte_type(pte) (imu_pte_val(pte)&0x3)
-
-#define imu_pgd_index(addr) ((addr) >> IMU_PGDIR_SHIFT)
-#define imu_pgd_offset(domain, addr) ((domain)->pgd + imu_pgd_index(addr))
-
-#define imu_pte_index(addr) (((addr)>>IMU_PAGE_SHIFT)&(IMU_PTRS_PER_PTE - 1))
-#define imu_pte_offset_map(pgd, addr) (imu_pte_map(pgd) + imu_pte_index(addr))
-
-static inline imu_pte_t *imu_pte_map(imu_pgd_t *pgd)
-{
- return (imu_pte_t*)__va(imu_pgd_val(*pgd)&F_PGD_PA_PAGETABLE_MSK);
-}
-
-static inline int imu_pte_unmap(imu_pte_t* pte)
-{
- return 0;
-}
-
-static inline unsigned int imu_pgd_entry_pa(imu_pgd_t pgd)
-{
- if(F_PGD_TYPE_IS_PAGE(pgd))
- return imu_pgd_val(pgd)&F_PGD_PA_PAGETABLE_MSK;
- else if(F_PGD_TYPE_IS_SECTION(pgd))
- return imu_pgd_val(pgd)&F_PGD_PA_SECTION_MSK;
- else if(F_PGD_TYPE_IS_SUPERSECTION(pgd))
- return imu_pgd_val(pgd)&F_PGD_PA_SUPERSECTION_MSK;
- else
- return 0;
-}
-
-static inline imu_pgd_t *imu_supersection_start(imu_pgd_t *pgd)
-{
- return (imu_pgd_t *)(round_down((unsigned long)pgd, (16*4)));
-}
-static inline imu_pte_t *imu_largepage_start(imu_pte_t *pte)
-{
- return (imu_pte_t *)(round_down((unsigned long)pte, (16*4)));
-}
-
-static inline unsigned int m4u_calc_next_mva(unsigned int addr, unsigned int end, unsigned int size)
-{
-/* addr + size may equal 0x100000000*/
- unsigned long long __boundary = ((unsigned long long)addr+(unsigned long long)size)&(~((unsigned long long)size-1));
- unsigned long long min = min(__boundary, end);
- return (unsigned int)min;
-}
-
-#endif
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_port.h b/drivers/misc/mediatek/m4u/mt6735/m4u_port.h
new file mode 100644
index 000000000..8b16e16ca
--- /dev/null
+++ b/drivers/misc/mediatek/m4u/mt6735/m4u_port.h
@@ -0,0 +1,16 @@
+#ifndef __M4U_PORT_H__
+#define __M4U_PORT_H__
+
+#if defined(CONFIG_ARCH_MT6735)
+#include "mt6735/m4u_port.h"
+#endif
+
+#if defined(CONFIG_ARCH_MT6735M)
+#include "mt6735m/m4u_port.h"
+#endif
+
+#if defined(CONFIG_ARCH_MT6753)
+#include "mt6753/m4u_port.h"
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/m4u/mt6735/m4u_priv.h b/drivers/misc/mediatek/m4u/mt6735/m4u_priv.h
index cb0b7f759..cee5967ac 100644
--- a/drivers/misc/mediatek/m4u/mt6735/m4u_priv.h
+++ b/drivers/misc/mediatek/m4u/mt6735/m4u_priv.h
@@ -7,30 +7,36 @@
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
-#include <linux/mmprofile.h>
-#include <mach/m4u.h>
+#if defined(CONFIG_TRUSTONIC_TEE_SUPPORT) && defined(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT)
+#define M4U_TEE_SERVICE_ENABLE
+#endif
+
+#include "m4u.h"
#include "m4u_reg.h"
-#include "m4u_pgtable.h"
+#include "../2.0/m4u_pgtable.h"
#include "m4u_platform.h"
-#define M4UMSG(string, args...) pr_err("M4U"string,##args)
-#define M4UINFO(string, args...) pr_info("M4U"string,##args)
-
+#define M4UMSG(string, args...) pr_err("M4U"string, ##args)
+#define M4UINFO(string, args...) pr_debug("M4U"string, ##args)
#include "m4u_hw.h"
-#if defined(CONFIG_TRUSTONIC_TEE_SUPPORT) && defined(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT)
-#define M4U_TEE_SERVICE_ENABLE
-#endif
-
-
-//#define M4U_FPGAPORTING
+/* #define M4U_FPGAPORTING */
#define M4U_PROFILE
+#define M4U_DVT 0
+
#ifndef M4U_PROFILE
#define MMProfileLogEx(...)
#define MMProfileEnable(...)
#define MMProfileStart(...)
+#define MMP_Event unsigned int
+#else
+#include <linux/mmprofile.h>
+
+extern void MMProfileEnable(int enable);
+extern void MMProfileStart(int start);
+
#endif
@@ -38,7 +44,7 @@
#define dmac_map_area __dma_map_area
#endif
-#ifndef dmac_unmap_area
+#ifndef dmac_unmap_area
#define dmac_unmap_area __dma_unmap_area
#endif
@@ -61,156 +67,154 @@
#define register_larb_monitor(...)
#endif
-#if !defined(CONFIG_MTK_LEGACY)
+
+#ifdef CONFIG_MTK_LEGACY
+#include <mach/mt_clkmgr.h>
+#else
#include <linux/clk.h>
+#endif
+
+#if !defined(CONFIG_MTK_LEGACY)
enum {
- SMI_COMMON_CLK,
- DISP0_SMI_LARB0_CLK,
- VDEC0_VDEC_CLK,
- VDEC1_LARB_CLK,
- LARB2_SMI_CLK,
- VENC_VENC_CLK,
- VENC_LARB_CLK,
- SMI_CLK_NUM,
+ SMI_COMMON_CLK,
+ DISP0_SMI_LARB0_CLK,
+ VDEC0_VDEC_CLK,
+ VDEC1_LARB_CLK,
+ LARB2_SMI_CLK,
+ VENC_VENC_CLK,
+ VENC_LARB_CLK,
+ SMI_CLK_NUM,
};
#endif /* !defined(CONFIG_MTK_LEGACY) */
-struct m4u_device
-{
- struct miscdevice dev;
- struct proc_dir_entry *m4u_dev_proc_entry;
- struct device *pDev[TOTAL_M4U_NUM];
- struct dentry *debug_root;
- unsigned long m4u_base[TOTAL_M4U_NUM];
- unsigned int irq_num[TOTAL_M4U_NUM];
+struct m4u_device {
+ struct miscdevice dev;
+ struct proc_dir_entry *m4u_dev_proc_entry;
+ struct device *pDev[TOTAL_M4U_NUM];
+ struct dentry *debug_root;
+ unsigned long m4u_base[TOTAL_M4U_NUM];
+ unsigned int irq_num[TOTAL_M4U_NUM];
#if !defined(CONFIG_MTK_LEGACY)
- struct clk *infra_m4u;
- struct clk *smi_clk[SMI_CLK_NUM];
+ struct clk *infra_m4u;
+ struct clk *smi_clk[SMI_CLK_NUM];
#endif
};
-
-typedef struct
-{
- imu_pgd_t *pgd;
- dma_addr_t pgd_pa;
- struct mutex pgtable_mutex;
- unsigned int pgsize_bitmap;
-
-}m4u_domain_t;
-
-
-typedef struct
-{
- struct list_head link;
- unsigned long va;
- unsigned int mva;
- unsigned int size;
- M4U_PORT_ID port;
- unsigned int prot;
- unsigned int flags;
- struct sg_table *sg_table;
-
- unsigned int mva_align;
- unsigned int size_align;
- int seq_id;
- unsigned long mapped_kernel_va_for_debug;
+typedef struct {
+ imu_pgd_t *pgd;
+ dma_addr_t pgd_pa;
+ struct mutex pgtable_mutex;
+ unsigned int pgsize_bitmap;
+} m4u_domain_t;
+
+typedef struct {
+ struct list_head link;
+ unsigned long va;
+ unsigned int mva;
+ unsigned int size;
+ M4U_PORT_ID port;
+ unsigned int prot;
+ unsigned int flags;
+ struct sg_table *sg_table;
+
+ unsigned int mva_align;
+ unsigned int size_align;
+ int seq_id;
+ unsigned long mapped_kernel_va_for_debug;
} m4u_buf_info_t;
-typedef struct _M4U_MAU
-{
- M4U_PORT_ID port;
- bool write;
- unsigned int mva;
- unsigned int size;
- bool enable;
- bool force;
-}M4U_MAU_STRUCT;
-
-typedef struct _M4U_TF
-{
- M4U_PORT_ID port;
- bool fgEnable;
-}M4U_TF_STRUCT;
+typedef struct _M4U_MAU {
+ M4U_PORT_ID port;
+ bool write;
+ unsigned int mva;
+ unsigned int size;
+ bool enable;
+ bool force;
+} M4U_MAU_STRUCT;
+typedef struct _M4U_TF {
+ M4U_PORT_ID port;
+ bool fgEnable;
+} M4U_TF_STRUCT;
-//================================
-//=== define in m4u_mva.c=========
+/* ================================ */
+/* === define in m4u_mva.c========= */
-typedef int (mva_buf_fn_t)(void* priv, unsigned int mva_start, unsigned int mva_end, void *data);
+typedef int (mva_buf_fn_t)(void *priv, unsigned int mva_start, unsigned int mva_end, void *data);
-void m4u_mvaGraph_init(void* priv_reserve);
+void m4u_mvaGraph_init(void *priv_reserve);
void m4u_mvaGraph_dump_raw(void);
void m4u_mvaGraph_dump(void);
-void* mva_get_priv_ext(unsigned int mva);
-int mva_for_each_priv(mva_buf_fn_t *fn, void* data);
-void* mva_get_priv(unsigned int mva);
+void *mva_get_priv_ext(unsigned int mva);
+int mva_foreach_priv(mva_buf_fn_t *fn, void *data);
+void *mva_get_priv(unsigned int mva);
unsigned int m4u_do_mva_alloc(unsigned long va, unsigned int size, void *priv);
unsigned int m4u_do_mva_alloc_fix(unsigned int mva, unsigned int size, void *priv);
int m4u_do_mva_free(unsigned int mva, unsigned int size);
-//=================================
-//==== define in m4u_pgtable.c=====
+/* ================================= */
+/* ==== define in m4u_pgtable.c===== */
void m4u_dump_pgtable(m4u_domain_t *domain, struct seq_file *seq);
void m4u_dump_pte_nolock(m4u_domain_t *domain, unsigned int mva);
+int m4u_pte_invalid(m4u_domain_t *domain, unsigned int mva);
void m4u_dump_pte(m4u_domain_t *domain, unsigned int mva);
int m4u_pgtable_init(struct m4u_device *m4u_dev, m4u_domain_t *m4u_domain);
-int m4u_map_4K(m4u_domain_t* m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot);
+int m4u_map_4K(m4u_domain_t *m4u_domain, unsigned int mva, unsigned long pa, unsigned int prot);
int m4u_clean_pte(m4u_domain_t *domain, unsigned int mva, unsigned int size);
unsigned long m4u_get_pte(m4u_domain_t *domain, unsigned int mva);
-//=================================
-//==== define in m4u_hw.c =====
-void m4u_invalid_tlb_by_range(m4u_domain_t *m4u_domain,unsigned int mva_start,unsigned int mva_end);
-m4u_domain_t * m4u_get_domain_by_port(M4U_PORT_ID port);
-m4u_domain_t * m4u_get_domain_by_id(int id);
+/* ================================= */
+/* ==== define in m4u_hw.c ===== */
+void m4u_invalid_tlb_by_range(m4u_domain_t *m4u_domain, unsigned int mva_start, unsigned int mva_end);
+m4u_domain_t *m4u_get_domain_by_port(M4U_PORT_ID port);
+m4u_domain_t *m4u_get_domain_by_id(int id);
int m4u_get_domain_nr(void);
int m4u_reclaim_notify(int port, unsigned int mva, unsigned int size);
int m4u_hw_init(struct m4u_device *m4u_dev, int m4u_id);
int m4u_hw_deinit(struct m4u_device *m4u_dev, int m4u_id);
int m4u_reg_backup(void);
int m4u_reg_restore(void);
-int m4u_insert_seq_range(M4U_PORT_ID port,unsigned int MVAStart,unsigned int MVAEnd);
+int m4u_insert_seq_range(M4U_PORT_ID port, unsigned int MVAStart, unsigned int MVAEnd);
int m4u_invalid_seq_range_by_id(int port, int seq_id);
void m4u_print_port_status(struct seq_file *seq, int only_print_active);
int m4u_dump_main_tlb(int m4u_id, int m4u_slave_id);
int m4u_dump_pfh_tlb(int m4u_id);
-int m4u_domain_init(struct m4u_device *m4u_dev, void* priv_reserve);
+int m4u_domain_init(struct m4u_device *m4u_dev, void *priv_reserve);
int config_mau(M4U_MAU_STRUCT mau);
int m4u_enable_tf(int port, bool fgenable);
-//=================================
-//==== define in m4u.c =====
-int m4u_dump_buf_info(struct seq_file * seq);
+extern int gM4U_4G_DRAM_Mode;
+
+/* ================================= */
+/* ==== define in m4u.c ===== */
+int m4u_dump_buf_info(struct seq_file *seq);
int m4u_map_sgtable(m4u_domain_t *m4u_domain, unsigned int mva,
- struct sg_table *sg_table, unsigned int size, unsigned int prot);
+ struct sg_table *sg_table, unsigned int size, unsigned int prot);
int m4u_unmap(m4u_domain_t *domain, unsigned int mva, unsigned int size);
-void m4u_get_pgd(m4u_client_t* client, M4U_PORT_ID port, void** pgd_va, void** pgd_pa, unsigned int* size);
-unsigned long m4u_mva_to_pa(m4u_client_t* client, M4U_PORT_ID port, unsigned int mva);
+void m4u_get_pgd(m4u_client_t *client, M4U_PORT_ID port, void **pgd_va, void **pgd_pa, unsigned int *size);
+unsigned long m4u_mva_to_pa(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva);
int m4u_query_mva_info(unsigned int mva, unsigned int size, unsigned int *real_mva, unsigned int *real_size);
-//=================================
-//==== define in m4u_debug.c =====
+/* ================================= */
+/* ==== define in m4u_debug.c ===== */
int m4u_debug_init(struct m4u_device *m4u_dev);
-
-
-
static inline dma_addr_t get_sg_phys(struct scatterlist *sg)
{
- dma_addr_t pa;
- pa = sg_dma_address(sg);
- if(pa == 0)
- pa = sg_phys(sg);
- return pa;
+ dma_addr_t pa;
+
+ pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
}
#define M4U_PGD_SIZE (16*1024)
@@ -222,91 +226,89 @@ static inline dma_addr_t get_sg_phys(struct scatterlist *sg)
extern int gM4U_log_level;
extern int gM4U_log_to_uart;
#define _M4ULOG(level, string, args...) \
-do{\
- if(level > gM4U_log_level)\
- {\
- if(level > gM4U_log_to_uart)\
- pr_warn("M4U"string, ##args);\
- else\
- pr_debug("M4U"string, ##args);\
- }\
-}while(0)
+ do {\
+ if (level > gM4U_log_level) {\
+ if (level > gM4U_log_to_uart)\
+ pr_warn("M4U"string, ##args);\
+ else\
+ pr_err("M4U"string, ##args);\
+ } \
+ } while (0)
#define M4ULOG_LOW(string, args...) _M4ULOG(M4U_LOG_LEVEL_LOW, string, ##args)
#define M4ULOG_MID(string, args...) _M4ULOG(M4U_LOG_LEVEL_MID, string, ##args)
#define M4ULOG_HIGH(string, args...) _M4ULOG(M4U_LOG_LEVEL_HIGH, string, ##args)
+#define M4UERR(string, args...) pr_err("M4U error: "string, ##args)
-#define M4UERR(string, args...) do {\
- pr_err("M4U error: "string,##args); \
- aee_kernel_exception("M4U", "[M4U] error:"string,##args); \
-}while(0)
-
-#define m4u_aee_print(string, args...) do{\
- char m4u_name[100];\
- snprintf(m4u_name,100, "[M4U]"string, ##args); \
- aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_MMPROFILE_BUFFER, m4u_name, "[M4U] error"string, ##args); \
- pr_err("M4U error: "string,##args); \
-}while(0)
- /*aee_kernel_warning(m4u_name, "[M4U] error:"string,##args); */
+/*aee_kernel_exception("M4U", "[M4U] error:"string,##args); */
+#define m4u_aee_print(string, args...) do {\
+ char m4u_name[100];\
+ snprintf(m4u_name, 100, "[M4U]"string, ##args); \
+ aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_MMPROFILE_BUFFER | DB_OPT_DUMP_DISPLAY, \
+ m4u_name, "[M4U] error"string, ##args); \
+ pr_err("M4U error: "string, ##args); \
+} while (0)
+/*aee_kernel_warning(m4u_name, "[M4U] error:"string,##args); */
#define M4U_PRINT_LOG_OR_SEQ(seq_file, fmt, args...) \
- do{\
- if(seq_file)\
- seq_printf(seq_file, fmt, ##args);\
- else\
- printk(fmt, ##args);\
- }while(0)
-
-
-//=======================================
-//==== other macros ============
-#define M4U_GET_PAGE_NUM(va,size) ((((va)&(PAGE_SIZE-1))+(size)+(PAGE_SIZE-1))>>12)
+ do {\
+ if (seq_file)\
+ seq_printf(seq_file, fmt, ##args);\
+ else\
+ pr_warn(fmt, ##args);\
+ } while (0)
+
+/* ======================================= */
+/* ==== other macros ============ */
+#define M4U_GET_PAGE_NUM(va, size) ((((va)&(PAGE_SIZE-1))+(size)+(PAGE_SIZE-1))>>12)
#define M4U_PAGE_MASK 0xfffL
typedef enum {
- M4U_MMP_ALLOC_MVA=0,
- M4U_MMP_DEALLOC_MVA,
- M4U_MMP_CONFIG_PORT,
- M4U_MMP_M4U_ERROR,
- M4U_MMP_CACHE_SYNC,
- M4U_MMP_TOGGLE_CG,
- M4U_MMP_MAX,
-}M4U_MMP_TYPE;
-extern MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
-
-
-typedef struct
-{
- M4U_PORT_ID port;
- unsigned long BufAddr;
- unsigned int BufSize;
- unsigned int prot;
- unsigned int MVAStart;
- unsigned int MVAEnd;
- unsigned int flags;
-
-}M4U_MOUDLE_STRUCT;
-
-
-typedef struct
-{
- M4U_PORT_ID port;
- M4U_CACHE_SYNC_ENUM eCacheSync;
- unsigned long va;
- unsigned int size;
- unsigned int mva;
-}M4U_CACHE_STRUCT;
-
-
-//IOCTL commnad
+ M4U_MMP_ALLOC_MVA = 0,
+ M4U_MMP_DEALLOC_MVA,
+ M4U_MMP_CONFIG_PORT,
+ M4U_MMP_M4U_ERROR,
+ M4U_MMP_CACHE_SYNC,
+ M4U_MMP_TOGGLE_CG,
+ M4U_MMP_MAX,
+} M4U_MMP_TYPE;
+
+typedef struct {
+ M4U_PORT_ID port;
+ unsigned long BufAddr;
+ unsigned int BufSize;
+ unsigned int prot;
+ unsigned int MVAStart;
+ unsigned int MVAEnd;
+ unsigned int flags;
+} M4U_MOUDLE_STRUCT;
+
+typedef struct {
+ M4U_PORT_ID port;
+ M4U_CACHE_SYNC_ENUM eCacheSync;
+ unsigned long va;
+ unsigned int size;
+ unsigned int mva;
+} M4U_CACHE_STRUCT;
+
+typedef struct _M4U_DMA {
+ M4U_PORT_ID port;
+ M4U_DMA_TYPE eDMAType;
+ M4U_DMA_DIR eDMADir;
+ unsigned long va;
+ unsigned int size;
+ unsigned int mva;
+} M4U_DMA_STRUCT;
+
+/* IOCTL commnad */
#define MTK_M4U_MAGICNO 'g'
#define MTK_M4U_T_POWER_ON _IOW(MTK_M4U_MAGICNO, 0, int)
#define MTK_M4U_T_POWER_OFF _IOW(MTK_M4U_MAGICNO, 1, int)
#define MTK_M4U_T_DUMP_REG _IOW(MTK_M4U_MAGICNO, 2, int)
#define MTK_M4U_T_DUMP_INFO _IOW(MTK_M4U_MAGICNO, 3, int)
-#define MTK_M4U_T_ALLOC_MVA _IOWR(MTK_M4U_MAGICNO,4, int)
+#define MTK_M4U_T_ALLOC_MVA _IOWR(MTK_M4U_MAGICNO, 4, int)
#define MTK_M4U_T_DEALLOC_MVA _IOW(MTK_M4U_MAGICNO, 5, int)
#define MTK_M4U_T_INSERT_TLB_RANGE _IOW(MTK_M4U_MAGICNO, 6, int)
#define MTK_M4U_T_INVALID_TLB_RANGE _IOW(MTK_M4U_MAGICNO, 7, int)
@@ -329,18 +331,23 @@ typedef struct
#define MTK_M4U_T_CONFIG_PORT_ARRAY _IOW(MTK_M4U_MAGICNO, 26, int)
#define MTK_M4U_T_CONFIG_MAU _IOW(MTK_M4U_MAGICNO, 27, int)
#define MTK_M4U_T_CONFIG_TF _IOW(MTK_M4U_MAGICNO, 28, int)
+#define MTK_M4U_T_DMA_OP _IOW(MTK_M4U_MAGICNO, 29, int)
#define MTK_M4U_T_SEC_INIT _IOW(MTK_M4U_MAGICNO, 50, int)
#ifdef M4U_TEE_SERVICE_ENABLE
-int m4u_config_port_tee(M4U_PORT_STRUCT* pM4uPort);
+int m4u_config_port_tee(M4U_PORT_STRUCT *pM4uPort);
int m4u_larb_backup_sec(unsigned int larb_idx);
int m4u_larb_restore_sec(unsigned int larb_idx);
-int m4u_config_port_array_tee(unsigned char* port_array);
+int m4u_config_port_array_tee(unsigned char *port_array);
int m4u_sec_init(void);
-
#endif
+/* #include <mmprofile.h> */
+extern MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
+#if !defined(CONFIG_MTK_LEGACY)
+extern const char *smi_clk_name[];
#endif
+#endif
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735/Makefile b/drivers/misc/mediatek/m4u/mt6735/mt6735/Makefile
index dc4971ca1..cc8f8d03a 100755..100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735/Makefile
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735/Makefile
@@ -1,7 +1,5 @@
-include $(srctree)/drivers/misc/mediatek/Makefile.custom
-
ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/mt6735
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/mmp/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/2.0
obj-y += m4u_platform.o
-
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.c b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.c
index c124607ac..4501ddb6c 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.c
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.c
@@ -4,67 +4,69 @@
unsigned int gM4UTagCount[] = {32};
-const char* gM4U_SMILARB[] = {
- "mediatek,SMI_LARB0", "mediatek,SMI_LARB1", "mediatek,SMI_LARB2", "mediatek,SMI_LARB3"};
+const char *gM4U_SMILARB[] = {
+ "mediatek,smi_larb0", "mediatek,smi_larb1",
+ "mediatek,smi_larb2", "mediatek,smi_larb3"};
-M4U_RANGE_DES_T gM4u0_seq[M4U0_SEQ_NR] = {{0}};
+M4U_RANGE_DES_T gM4u0_seq[M4U0_SEQ_NR] = {{0} };
M4U_RANGE_DES_T *gM4USeq[] = {gM4u0_seq};
-#define M4U0_PORT_INIT(slave, larb, port) 0,slave,larb,port,(((larb)<<7)|((port)<<2)),1
+#define M4U0_PORT_INIT(name, slave, larb, port) {\
+ name, 0, slave, larb, port, (((larb)<<7)|((port)<<2)), 1 \
+}
-m4u_port_t gM4uPort[] =
-{
- { "DISP_OVL0", M4U0_PORT_INIT( 0, 0, 0 ), },
- { "DISP_RDMA0", M4U0_PORT_INIT( 0, 0, 1 ), },
- { "DISP_WDMA0", M4U0_PORT_INIT( 0, 0, 2 ), },
- { "DISP_RDMA1", M4U0_PORT_INIT( 0, 0, 3 ), },
- { "MDP_RDMA", M4U0_PORT_INIT( 0, 0, 4 ), },
- { "MDP_WDMA", M4U0_PORT_INIT( 0, 0, 5 ), },
- { "MDP_WROT", M4U0_PORT_INIT( 0, 0, 6 ), },
+m4u_port_t gM4uPort[] = {
+ M4U0_PORT_INIT("DISP_OVL0", 0, 0, 0),
+ M4U0_PORT_INIT("DISP_RDMA0", 0, 0, 1),
+ M4U0_PORT_INIT("DISP_WDMA0", 0, 0, 2),
+ M4U0_PORT_INIT("DISP_RDMA1", 0, 0, 3),
+ M4U0_PORT_INIT("MDP_RDMA", 0, 0, 4),
+ M4U0_PORT_INIT("MDP_WDMA", 0, 0, 5),
+ M4U0_PORT_INIT("MDP_WROT", 0, 0, 6),
- { "VDEC_MC", M4U0_PORT_INIT( 0, 1, 0 ), },
- { "VDEC_PP", M4U0_PORT_INIT( 0, 1, 1 ), },
- { "VDEC_AVC_MV", M4U0_PORT_INIT( 0, 1, 2 ), },
- { "VDEC_PRED_RD", M4U0_PORT_INIT( 0, 1, 3 ), },
- { "VDEC_PRED_WR", M4U0_PORT_INIT( 0, 1, 4 ), },
- { "VDEC_VLD", M4U0_PORT_INIT( 0, 1, 5 ), },
- { "VDEC_PPWRAP", M4U0_PORT_INIT( 0, 1, 6 ), },
+ M4U0_PORT_INIT("VDEC_MC", 0, 1, 0),
+ M4U0_PORT_INIT("VDEC_PP", 0, 1, 1),
+ M4U0_PORT_INIT("VDEC_AVC_MV", 0, 1, 2),
+ M4U0_PORT_INIT("VDEC_PRED_RD", 0, 1, 3),
+ M4U0_PORT_INIT("VDEC_PRED_WR", 0, 1, 4),
+ M4U0_PORT_INIT("VDEC_VLD", 0, 1, 5),
+ M4U0_PORT_INIT("VDEC_PPWRAP", 0, 1, 6),
- { "CAM_IMGO", M4U0_PORT_INIT( 0, 2, 0 ), },
- { "CAM_RRZO", M4U0_PORT_INIT( 0, 2, 1 ), },
- { "CAM_AAO", M4U0_PORT_INIT( 0, 2, 2 ), },
- { "CAM_LCSO", M4U0_PORT_INIT( 0, 2, 3 ), },
- { "CAM_ESFKO", M4U0_PORT_INIT( 0, 2, 4 ), },
- { "CAM_IMGO_S", M4U0_PORT_INIT( 0, 2, 5 ), },
- { "CAM_LSCI", M4U0_PORT_INIT( 0, 2, 6 ), },
- { "CAM_LSCI_D", M4U0_PORT_INIT( 0, 2, 7 ), },
- { "CAM_BPCI", M4U0_PORT_INIT( 0, 2, 8 ), },
- { "CAM_BPCI_D", M4U0_PORT_INIT( 0, 2, 9 ), },
- { "CAM_UFDI", M4U0_PORT_INIT( 0, 2, 10 ), },
- { "CAM_IMGI", M4U0_PORT_INIT( 0, 2, 11 ), },
- { "CAM_IMG2O", M4U0_PORT_INIT( 0, 2, 12 ), },
- { "CAM_IMG3O", M4U0_PORT_INIT( 0, 2, 13 ), },
- { "CAM_VIPI", M4U0_PORT_INIT( 0, 2, 14 ), },
- { "CAM_VIP2I", M4U0_PORT_INIT( 0, 2, 15 ), },
- { "CAM_VIP3I", M4U0_PORT_INIT( 0, 2, 16 ), },
- { "CAM_LCEI", M4U0_PORT_INIT( 0, 2, 17 ), },
- { "CAM_RB", M4U0_PORT_INIT( 0, 2, 18 ), },
- { "CAM_RP", M4U0_PORT_INIT( 0, 2, 19 ), },
- { "CAM_WR", M4U0_PORT_INIT( 0, 2, 20 ), },
+ M4U0_PORT_INIT("CAM_IMGO", 0, 2, 0),
+ M4U0_PORT_INIT("CAM_RRZO", 0, 2, 1),
+ M4U0_PORT_INIT("CAM_AAO", 0, 2, 2),
+ M4U0_PORT_INIT("CAM_LCSO", 0, 2, 3),
+ M4U0_PORT_INIT("CAM_ESFKO", 0, 2, 4),
+ M4U0_PORT_INIT("CAM_IMGO_S", 0, 2, 5),
+ M4U0_PORT_INIT("CAM_LSCI", 0, 2, 6),
+ M4U0_PORT_INIT("CAM_LSCI_D", 0, 2, 7),
+ M4U0_PORT_INIT("CAM_BPCI", 0, 2, 8),
+ M4U0_PORT_INIT("CAM_BPCI_D", 0, 2, 9),
+ M4U0_PORT_INIT("CAM_UFDI", 0, 2, 10),
+ M4U0_PORT_INIT("CAM_IMGI", 0, 2, 11),
+ M4U0_PORT_INIT("CAM_IMG2O", 0, 2, 12),
+ M4U0_PORT_INIT("CAM_IMG3O", 0, 2, 13),
+ M4U0_PORT_INIT("CAM_VIPI", 0, 2, 14),
+ M4U0_PORT_INIT("CAM_VIP2I", 0, 2, 15),
+ M4U0_PORT_INIT("CAM_VIP3I", 0, 2, 16),
+ M4U0_PORT_INIT("CAM_LCEI", 0, 2, 17),
+ M4U0_PORT_INIT("CAM_RB", 0, 2, 18),
+ M4U0_PORT_INIT("CAM_RP", 0, 2, 19),
+ M4U0_PORT_INIT("CAM_WR", 0, 2, 20),
- { "VENC_RCPU", M4U0_PORT_INIT( 0, 3, 0 ), },
- { "VENC_REC", M4U0_PORT_INIT( 0, 3, 1 ), },
- { "VENC_BSDMA", M4U0_PORT_INIT( 0, 3, 2 ), },
- { "VENC_SV_COMV", M4U0_PORT_INIT( 0, 3, 3 ), },
- { "VENC_RD_COMV", M4U0_PORT_INIT( 0, 3, 4 ), },
- { "JPGENC_RDMA", M4U0_PORT_INIT( 0, 3, 5 ), },
- { "JPGENC_BSDMA", M4U0_PORT_INIT( 0, 3, 6 ), },
- { "JPGDEC_WDMA", M4U0_PORT_INIT( 0, 3, 7 ), },
- { "JPGDEC_BSDMA", M4U0_PORT_INIT( 0, 3, 8 ), },
- { "VENC_CUR_LUMA", M4U0_PORT_INIT( 0, 3, 9 ), },
- { "VENC_CUR_CHROMA", M4U0_PORT_INIT( 0, 3, 10 ), },
- { "VENC_REF_LUMA", M4U0_PORT_INIT( 0, 3, 11 ), },
- { "VENC_REF_CHROMA", M4U0_PORT_INIT( 0, 3, 12 ), },
+ M4U0_PORT_INIT("VENC_RCPU", 0, 3, 0),
+ M4U0_PORT_INIT("VENC_REC", 0, 3, 1),
+ M4U0_PORT_INIT("VENC_BSDMA", 0, 3, 2),
+ M4U0_PORT_INIT("VENC_SV_COMV", 0, 3, 3),
+ M4U0_PORT_INIT("VENC_RD_COMV", 0, 3, 4),
+ M4U0_PORT_INIT("JPGENC_RDMA", 0, 3, 5),
+ M4U0_PORT_INIT("JPGENC_BSDMA", 0, 3, 6),
+ M4U0_PORT_INIT("JPGDEC_WDMA", 0, 3, 7),
+ M4U0_PORT_INIT("JPGDEC_BSDMA", 0, 3, 8),
+ M4U0_PORT_INIT("VENC_CUR_LUMA", 0, 3, 9),
+ M4U0_PORT_INIT("VENC_CUR_CHROMA", 0, 3, 10),
+ M4U0_PORT_INIT("VENC_REF_LUMA", 0, 3, 11),
+ M4U0_PORT_INIT("VENC_REF_CHROMA", 0, 3, 12),
- { "UNKOWN", M4U0_PORT_INIT( 0, 4, 0 ), },
+ M4U0_PORT_INIT("UNKNOWN", 0, 4, 0),
};
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.h b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.h
index d3f3a6e04..942a02ea0 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_platform.h
@@ -5,18 +5,18 @@
#define M4U_BASE0 0xf0205000
-#define LARB0_BASE 0xf4015000
-#define LARB1_BASE 0xf6010000
-#define LARB2_BASE 0xf5001000
-#define LARB3_BASE 0xf7001000
+#define LARB0_BASE 0xf4015000
+#define LARB1_BASE 0xf6010000
+#define LARB2_BASE 0xf5001000
+#define LARB3_BASE 0xf7001000
-//mau related
+/* mau related */
#define MAU_NR_PER_M4U_SLAVE 4
-//smi
+/* smi */
#define SMI_LARB_NR 4
-//seq range related
+/* seq range related */
#define SEQ_NR_PER_MM_SLAVE 8
#define SEQ_NR_PER_PERI_SLAVE 0
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_port.h b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_port.h
index 32bc7c3cf..91eea83bd 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_port.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_port.h
@@ -1,69 +1,65 @@
#ifndef __M4U_PORT_D1_H__
#define __M4U_PORT_D1_H__
-//====================================
-// about portid
-//====================================
+/* ==================================== */
+/* about portid */
+/* ==================================== */
-enum
-{
- M4U_PORT_DISP_OVL0 ,
- M4U_PORT_DISP_RDMA0 ,
- M4U_PORT_DISP_WDMA0 ,
- M4U_PORT_DISP_RDMA1 ,
- M4U_PORT_MDP_RDMA ,
- M4U_PORT_MDP_WDMA ,
- M4U_PORT_MDP_WROT ,
+enum {
+ M4U_PORT_DISP_OVL0 ,
+ M4U_PORT_DISP_RDMA0 ,
+ M4U_PORT_DISP_WDMA0 ,
+ M4U_PORT_DISP_RDMA1 ,
+ M4U_PORT_MDP_RDMA ,
+ M4U_PORT_MDP_WDMA ,
+ M4U_PORT_MDP_WROT ,
-
- M4U_PORT_HW_VDEC_MC_EXT ,
- M4U_PORT_HW_VDEC_PP_EXT ,
- M4U_PORT_HW_VDEC_AVC_MV_EXT ,
- M4U_PORT_HW_VDEC_PRED_RD_EXT ,
- M4U_PORT_HW_VDEC_PRED_WR_EXT ,
- M4U_PORT_HW_VDEC_VLD_EXT ,
- M4U_PORT_HW_VDEC_PPWRAP_EXT ,
-
- M4U_PORT_IMGO ,
- M4U_PORT_RRZO ,
- M4U_PORT_AAO ,
- M4U_PORT_LCSO ,
- M4U_PORT_ESFKO ,
- M4U_PORT_IMGO_S ,
- M4U_PORT_LSCI ,
- M4U_PORT_LSCI_D ,
- M4U_PORT_BPCI ,
- M4U_PORT_BPCI_D ,
- M4U_PORT_UFDI ,
- M4U_PORT_IMGI ,
- M4U_PORT_IMG2O ,
- M4U_PORT_IMG3O ,
- M4U_PORT_VIPI ,
- M4U_PORT_VIP2I ,
- M4U_PORT_VIP3I ,
- M4U_PORT_LCEI ,
- M4U_PORT_RB ,
- M4U_PORT_RP ,
- M4U_PORT_WR ,
-
- M4U_PORT_VENC_RCPU ,
- M4U_PORT_VENC_REC ,
- M4U_PORT_VENC_BSDMA ,
- M4U_PORT_VENC_SV_COMV ,
- M4U_PORT_VENC_RD_COMV ,
- M4U_PORT_JPGENC_RDMA ,
- M4U_PORT_JPGENC_BSDMA ,
- M4U_PORT_JPGDEC_WDMA ,
- M4U_PORT_JPGDEC_BSDMA ,
- M4U_PORT_VENC_CUR_LUMA ,
- M4U_PORT_VENC_CUR_CHROMA ,
- M4U_PORT_VENC_REF_LUMA ,
- M4U_PORT_VENC_REF_CHROMA ,
-
- M4U_PORT_UNKNOWN ,
-
-};
+ M4U_PORT_HW_VDEC_MC_EXT ,
+ M4U_PORT_HW_VDEC_PP_EXT ,
+ M4U_PORT_HW_VDEC_AVC_MV_EXT ,
+ M4U_PORT_HW_VDEC_PRED_RD_EXT ,
+ M4U_PORT_HW_VDEC_PRED_WR_EXT ,
+ M4U_PORT_HW_VDEC_VLD_EXT ,
+ M4U_PORT_HW_VDEC_PPWRAP_EXT ,
+
+ M4U_PORT_IMGO ,
+ M4U_PORT_RRZO ,
+ M4U_PORT_AAO ,
+ M4U_PORT_LCSO ,
+ M4U_PORT_ESFKO ,
+ M4U_PORT_IMGO_S ,
+ M4U_PORT_LSCI ,
+ M4U_PORT_LSCI_D ,
+ M4U_PORT_BPCI ,
+ M4U_PORT_BPCI_D ,
+ M4U_PORT_UFDI ,
+ M4U_PORT_IMGI ,
+ M4U_PORT_IMG2O ,
+ M4U_PORT_IMG3O ,
+ M4U_PORT_VIPI ,
+ M4U_PORT_VIP2I ,
+ M4U_PORT_VIP3I ,
+ M4U_PORT_LCEI ,
+ M4U_PORT_RB ,
+ M4U_PORT_RP ,
+ M4U_PORT_WR ,
+
+ M4U_PORT_VENC_RCPU ,
+ M4U_PORT_VENC_REC ,
+ M4U_PORT_VENC_BSDMA ,
+ M4U_PORT_VENC_SV_COMV ,
+ M4U_PORT_VENC_RD_COMV ,
+ M4U_PORT_JPGENC_RDMA ,
+ M4U_PORT_JPGENC_BSDMA ,
+ M4U_PORT_JPGDEC_WDMA ,
+ M4U_PORT_JPGDEC_BSDMA ,
+ M4U_PORT_VENC_CUR_LUMA ,
+ M4U_PORT_VENC_CUR_CHROMA ,
+ M4U_PORT_VENC_REF_LUMA ,
+ M4U_PORT_VENC_REF_CHROMA ,
+
+ M4U_PORT_UNKNOWN ,
+};
#define M4U_PORT_NR M4U_PORT_UNKNOWN
#endif
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_reg.h b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_reg.h
index 1dabc831e..96578ee99 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_reg.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735/m4u_reg.h
@@ -1,19 +1,17 @@
#ifndef _MT6735_M4U_REG_D1_H__
#define _MT6735_M4U_REG_D1_H__
-#include "mach/mt_reg_base.h"
-
-//=================================================
-//common macro definitions
-#define F_VAL(val,msb,lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
+/* ================================================= */
+/* common macro definitions */
+#define F_VAL(val, msb, lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
#define F_BIT_SET(bit) (1<<(bit))
-#define F_BIT_VAL(val,bit) ((!!(val))<<(bit))
-#define F_MSK_SHIFT(regval,msb,lsb) (((regval)&F_MSK(msb,lsb))>>lsb)
+#define F_BIT_VAL(val, bit) ((!!(val))<<(bit))
+#define F_MSK_SHIFT(regval, msb, lsb) (((regval)&F_MSK(msb, lsb))>>lsb)
-//=====================================================
-//M4U register definition
-//=====================================================
+/* ===================================================== */
+/* M4U register definition */
+/* ===================================================== */
#define REG_MMUg_PT_BASE (0x0)
#define F_MMUg_PT_VA_MSK 0xffff0000
@@ -32,27 +30,27 @@
#define REG_MMU_PROG_DSC 0x18
-#define REG_MMU_INVLD (0x20)
- #define F_MMU_INV_ALL 0x2
- #define F_MMU_INV_RANGE 0x1
+#define REG_MMU_INVLD (0x20)
+ #define F_MMU_INV_ALL 0x2
+ #define F_MMU_INV_RANGE 0x1
-#define REG_MMU_INVLD_SA (0x24)
-#define REG_MMU_INVLD_EA (0x28)
+#define REG_MMU_INVLD_SA (0x24)
+#define REG_MMU_INVLD_EA (0x28)
#define REG_MMU_INVLD_SEC (0x2c)
- #define F_MMU_INV_SEC_ALL 0x2
- #define F_MMU_INV_SEC_RANGE 0x1
-
-#define REG_MMU_INVLD_SA_SEC (0x30)
+ #define F_MMU_INV_SEC_ALL 0x2
+ #define F_MMU_INV_SEC_RANGE 0x1
+
+#define REG_MMU_INVLD_SA_SEC (0x30)
#define REG_MMU_INVLD_EA_SEC (0x34)
-#define REG_INVLID_SEL (0x38)
- #define F_MMU_INV_EN_L1 (1<<0)
- #define F_MMU_INV_EN_L2 (1<<1)
+#define REG_INVLID_SEL (0x38)
+ #define F_MMU_INV_EN_L1 (1<<0)
+ #define F_MMU_INV_EN_L2 (1<<1)
#define REG_INVLID_SEL_SEC (0x3c)
- #define F_MMU_INV_SEC_EN_L1 (1<<0)
- #define F_MMU_INV_SEC_EN_L2 (1<<1)
+ #define F_MMU_INV_SEC_EN_L1 (1<<0)
+ #define F_MMU_INV_SEC_EN_L2 (1<<1)
#define F_MMU_INV_SEC_INV_DONE (1<<2)
#define F_MMU_INV_SEC_INV_INT_CLR (1<<3)
#define F_MMU_INV_SEC_INV_INT_EN (1<<4)
@@ -79,7 +77,7 @@
#define REG_MMU_WR_LEN (0x54)
#define F_MMU_MMU0_WRITE_THROTTLING_DIS F_BIT_SET(5)
- #define F_MMU_MMU0_WRITE_LEN F_MSK(4,0)
+ #define F_MMU_MMU0_WRITE_LEN F_MSK(4, 0)
#define REG_MMU_HW_DEBUG (0x58)
#define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
@@ -92,16 +90,16 @@
#define REG_MMU_LEGACY_4KB_MODE (0x60)
#define REG_MMU_DBG0 (0X64)
- #define F_MMU_L2_TLB_DBG_SIGNALS F_MSK(13,0)
+ #define F_MMU_L2_TLB_DBG_SIGNALS F_MSK(13, 0)
#define REG_MMU_DBG1 (0x68)
- #define F_MMU_MMU0_AXI_INTERFACE_DBG_SIGNALS F_MSK(12,0)
+ #define F_MMU_MMU0_AXI_INTERFACE_DBG_SIGNALS F_MSK(12, 0)
#define REG_MMU_DBG2 (0x6c)
- #define F_MMU_MMU0_GLOBAL_DATA_COUNT_DBG_COUNTER F_MSK(11,0)
+ #define F_MMU_MMU0_GLOBAL_DATA_COUNT_DBG_COUNTER F_MSK(11, 0)
#define REG_MMU_SMI_COMMON_DBG0 (0x78)
- #define F_MMU_SMI_COMMON_DGB_SISGNALS F_MSK(23,0)
+ #define F_MMU_SMI_COMMON_DGB_SISGNALS F_MSK(23, 0)
#define REG_MMU_MMU_COHERENCE_EN 0x80
#define REG_MMU_IN_ORDER_WR_EN 0x84
@@ -113,38 +111,38 @@
#define F_READ_ENTRY_MM0_MAIN F_BIT_SET(27)
#define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(27+id)
#define F_READ_ENTRY_PFH F_BIT_SET(26)
- #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx,16,12)
- #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx,10,5)
- //#define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4)
- //#define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2)
+ #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx, 16, 12)
+ #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx, 10, 5)
+ /* #define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4) */
+ /* #define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2) */
#define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
- #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1,0)
+ #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1, 0)
#define REG_MMU_DES_RDATA 0x104
#define REG_MMU_PFH_TAG_RDATA 0x108
- #define F_PFH_TAG_VA_GET(mmu, tag) ((mmu==0)?F_MMU0_PFH_TAG_VA_GET(tag): F_MMU1_PFH_TAG_VA_GET(tag))
+ #define F_PFH_TAG_VA_GET(mmu, tag) ((mmu == 0)?F_MMU0_PFH_TAG_VA_GET(tag) : F_MMU1_PFH_TAG_VA_GET(tag))
#define F_MMU0_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(0)+1))
- #define F_MMU1_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(1)+1))
- #define F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu) ((mmu=0)?F_MSK(31, 29):F_MSK(31, 28))
+ #define F_MMU1_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(1)+1))
+ #define F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu) ((mmu = 0)?F_MSK(31, 29):F_MSK(31, 28))
#define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
- #define F_PFH_TAG_16X_BIT F_BIT_SET(2) //this bit is always 0 -- cost down.
+ #define F_PFH_TAG_16X_BIT F_BIT_SET(2) /* this bit is always 0 -- cost down. */
#define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
#define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
-// tag releated macro
+/* tag related macro */
#define MMU0_SET_ORDER 6
#define MMU1_SET_ORDER 5
- #define MMU_SET_ORDER(mmu) ((mmu==0) ? MMU0_SET_ORDER : MMU1_SET_ORDER)
+ #define MMU_SET_ORDER(mmu) ((mmu == 0) ? MMU0_SET_ORDER : MMU1_SET_ORDER)
#define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
#define MMU_SET_LSB_OFFSET 15
#define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
- #define MMU_PFH_VA_TO_SET(mmu,va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
+ #define MMU_PFH_VA_TO_SET(mmu, va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
#define MMU_PAGE_PER_LINE 8
#define MMU_WAY_NR 4
#define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
-
+
#define REG_MMU_CTRL_REG 0x110
#define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
#define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 1)
@@ -160,7 +158,7 @@
#define REG_MMU_IVRP_PADDR 0x114
#define F_MMU_IVRP_PA_SET(PA) (PA>>1)
#define F_MMU_IVRP_4G_DRAM_PA_SET(PA) ((PA>>1)|(1<<31))
-
+
#define REG_MMU_INT_L2_CONTROL 0x120
#define F_INT_L2_CLR_BIT (1<<12)
#define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
@@ -176,15 +174,15 @@
#define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(4+(((MMU)<<1)|((MMU)<<2)))
+ #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(4+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_MISS_FIFO_ERR(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_MAU(mmu, set) F_BIT_SET(7+(set)+(mmu<<2)) //(14+(set)+(mmu*4))
+ #define F_INT_MAU(mmu, set) F_BIT_SET(7+(set)+(mmu<<2)) /* (14+(set)+(mmu*4)) */
#define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
#define F_INT_MMU0_MAU_MSK F_MSK(10, 7)
-
+
#define REG_MMU_CPE_DONE_SEC 0x128
#define REG_MMU_CPE_DONE 0x12C
@@ -198,51 +196,53 @@
#define REG_MMU_TBWALK_FAULT_VA 0x138
#define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
#define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
-
+
#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
#define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
#define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
#define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
-
+
#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
- #define F_MMU0_INT_ID_TF_MSK (~0x3) //only for MM iommu.
+ #define F_MMU0_INT_ID_TF_MSK (~0x3) /* only for MM iommu. */
#define REG_MMU_PF_MSCNT 0x160
#define REG_MMU_PF_CNT 0x164
-#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) //(0x168+((mmu)*12)
+#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) /* (0x168+((mmu)*12) */
#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
#define REG_MMU_PFH_VLD_0 (0x180)
-#define REG_MMU_PFH_VLD(mmu, set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<((mmu==0)?(MMU0_SET_ORDER - 3):(MMU1_SET_ORDER - 3)))) //+((set/32)*4)+(way*16)
- #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) // set%32
+#define REG_MMU_PFH_VLD(mmu, set, way) \
+ (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<((mmu == 0) \
+ ? (MMU0_SET_ORDER - 3):(MMU1_SET_ORDER - 3))))
+ #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) /* set%32 */
#define MMU01_SQ_OFFSET (0x600-0x300)
-#define REG_MMU_SQ_START(mmu,x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_SQ_START(mmu, x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
#define F_SQ_VA_MASK F_MSK(31, 20)
#define F_SQ_EN_BIT (1<<19)
- //#define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13)
+ /* #define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13) */
#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
#define MMU_TOTAL_RS_NR 8
-#define REG_MMU_RSx_VA(mmu,x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_VA(mmu, x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
#define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
#define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
-
-#define REG_MMU_RSx_PA(mmu,x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+
+#define REG_MMU_RSx_PA(mmu, x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
-#define REG_MMU_RSx_2ND_BASE(mmu,x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_2ND_BASE(mmu, x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
-#define REG_MMU_RSx_ST(mmu,x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_ST(mmu, x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 22, 20)
#define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
#define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
-#define REG_MMU_MAIN_TAG(mmu,x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_MAIN_TAG(mmu, x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
#define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
#define F_MAIN_TLB_LOCK_BIT (1<<11)
#define F_MAIN_TLB_VALID_BIT (1<<10)
@@ -251,23 +251,23 @@
#define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
#define F_MAIN_TLB_INV_DES_BIT (1<<6)
#define F_MAIN_TLB_SQ_EN_BIT (1<<5)
- #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4,1)
+ #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4, 1)
#define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
-#define REG_MMU_MAU_START(mmu,mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_START_BIT32(mmu,mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END(mmu,mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END_BIT32(mmu,mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_PORT_EN(mmu,mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ID(mmu,mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START(mmu, mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START_BIT32(mmu, mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END(mmu, mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END_BIT32(mmu, mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_PORT_EN(mmu, mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ID(mmu, mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
#define F_MMU_MAU_ASSERT_ID_LARB(regval) F_MSK_SHIFT(regval, 7, 5)
#define F_MMU_MAU_ASSERT_ID_PORT(regval) F_MSK_SHIFT(regval, 4, 0)
-#define REG_MMU_MAU_ADDR(mmu,mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR_BIT32(mmu,mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR(mmu, mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR_BIT32(mmu, mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
- #define F_MAU_LARB_VAL(mau,larb) ((larb)<<(mau*8))
+ #define F_MAU_LARB_VAL(mau, larb) ((larb)<<(mau*8))
#define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
@@ -284,77 +284,77 @@
#define REG_MMU_PFH_DIST4 0xb10
#define REG_MMU_PFH_DIST5 0xb14
#define REG_MMU_PFH_DIST(port) (REG_MMU_PFH_DIST0+(((port)>>3)<<2))
- #define F_MMU_PFH_DIST_VAL(port,val) ((val&0xf)<<(((port)&0x7)<<2))
+ #define F_MMU_PFH_DIST_VAL(port, val) ((val&0xf)<<(((port)&0x7)<<2))
#define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
#define REG_MMU_PFH_DIST_NR(nr) (REG_MMU_PFH_DIST0 + ((nr)<<2))
#define REG_MMU_PFH_DIR0 0xd00
#define REG_MMU_PFH_DIR1 0xd04
-#define REG_MMU_PFH_DIR(port) (((port)<32) ? REG_MMU_PFH_DIR0: REG_MMU_PFH_DIR1)
-#define F_MMU_PFH_DIR(port,val) ((!!(val))<<((port)&0x1f))
+#define REG_MMU_PFH_DIR(port) (((port) < 32) ? REG_MMU_PFH_DIR0 : REG_MMU_PFH_DIR1)
+#define F_MMU_PFH_DIR(port, val) ((!!(val))<<((port)&0x1f))
#define REG_MMU_PFH_DIR_NR(nr) (REG_MMU_PFH_DIR0 + ((nr)<<2))
-//================================================================
-// SMI larb
-//================================================================
+/* ================================================================ */
+/* SMI larb */
+/* ================================================================ */
-#define SMI_LARB_MMU_EN (0xfc0 )
+#define SMI_LARB_MMU_EN (0xfc0)
#define F_SMI_MMU_EN(port, en) ((en)<<((port)))
-#define SMI_LARB_SEC_EN (0xfc4 )
+#define SMI_LARB_SEC_EN (0xfc4)
#define F_SMI_SEC_EN(port, en) ((en)<<((port)))
-#define SMI_LARB_DOMN_0 (0xfd0 )
-#define SMI_LARB_DOMN_1 (0xfd4 )
-#define SMI_LARB_DOMN_2 (0xfd8 )
-#define SMI_LARB_DOMN_3 (0xfdc )
+#define SMI_LARB_DOMN_0 (0xfd0)
+#define SMI_LARB_DOMN_1 (0xfd4)
+#define SMI_LARB_DOMN_2 (0xfd8)
+#define SMI_LARB_DOMN_3 (0xfdc)
#define REG_SMI_LARB_DOMN_OF_PORT(port) (SMI_LARB_DOMN_0+(((port)>>3)<<2))
#define F_SMI_DOMN(port, domain) ((domain&0xf)<<(((port)&0x7)<<2))
-//=========================================================================
-// peripheral system
-//=========================================================================
+/* ========================================================================= */
+/* peripheral system */
+/* ========================================================================= */
#define REG_PERIAXI_BUS_CTL3 (0x208)
#define F_PERI_MMU_EN(port, en) ((en)<<((port)))
-#include <mach/sync_write.h>
+#include <mt-plat/sync_write.h>
-static inline unsigned int COM_ReadReg32(unsigned long addr)
+static inline unsigned int COM_ReadReg32(unsigned long addr)
{
- return ioread32((void*)addr);
+ return ioread32((void *)addr);
}
static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
-{
- mt_reg_sync_writel(Val, (void*)addr);
+{
+ mt_reg_sync_writel(Val, (void *)addr);
}
-static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned int Offset)
+static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned int Offset)
{
- unsigned int val;
- val = COM_ReadReg32((M4uBase+Offset));
- return val;
+ unsigned int val;
+
+ val = COM_ReadReg32((M4uBase+Offset));
+ return val;
}
-static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned int Offset, unsigned int Val)
-{
- //printk("M4U_WriteReg32: M4uBase: 0x%lx, Offset:0x%x, val:0x%x\n", M4uBase, Offset, Val);
- COM_WriteReg32((M4uBase+Offset), Val);
+static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned int Offset, unsigned int Val)
+{
+ COM_WriteReg32((M4uBase+Offset), Val);
}
static inline void m4uHw_set_field_by_mask(unsigned long M4UBase, unsigned int reg,
- unsigned long mask, unsigned int val)
+ unsigned long mask, unsigned int val)
{
- unsigned int regval;
- regval = M4U_ReadReg32(M4UBase, reg);
- regval = (regval & (~mask))|val;
- M4U_WriteReg32(M4UBase, reg, regval);
+ unsigned int regval;
+
+ regval = M4U_ReadReg32(M4UBase, reg);
+ regval = (regval & (~mask))|val;
+ M4U_WriteReg32(M4UBase, reg, regval);
}
static inline unsigned int m4uHw_get_field_by_mask(unsigned long M4UBase, unsigned int reg,
- unsigned int mask)
+ unsigned int mask)
{
- return M4U_ReadReg32(M4UBase, reg)&mask;
+ return M4U_ReadReg32(M4UBase, reg)&mask;
}
-#endif //_MT6735_M4U_REG_D1_H__
-
+#endif /* _MT6735_M4U_REG_D1_H__ */
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735m/Makefile b/drivers/misc/mediatek/m4u/mt6735/mt6735m/Makefile
index ed241266e..cc8f8d03a 100755..100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735m/Makefile
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735m/Makefile
@@ -1,6 +1,5 @@
-include $(srctree)/drivers/misc/mediatek/Makefile.custom
-
ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/mt6735
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/mmp/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/2.0
obj-y += m4u_platform.o
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.c b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.c
index 3cbc2c04e..aea944bf1 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.c
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.c
@@ -4,46 +4,55 @@
unsigned int gM4UTagCount[] = {32};
-const char* gM4U_SMILARB[] = {
- "mediatek,SMI_LARB0", "mediatek,SMI_LARB1", "mediatek,SMI_LARB2"};
+const char *gM4U_SMILARB[] = {
+ "mediatek,smi_larb0", "mediatek,smi_larb1", "mediatek,smi_larb2"};
-M4U_RANGE_DES_T gM4u0_seq[M4U0_SEQ_NR] = {{0}};
+M4U_RANGE_DES_T gM4u0_seq[M4U0_SEQ_NR] = {{0} };
M4U_RANGE_DES_T *gM4USeq[] = {gM4u0_seq};
-#define M4U0_PORT_INIT(slave, larb, port) 0,slave,larb,port,(((larb)<<7)|((port)<<2)),1
-
-m4u_port_t gM4uPort[] =
-{
- { "DISP_OVL0_PORT0", M4U0_PORT_INIT( 0, 0, 0 ), },
- { "DISP_OVL0_PORT1", M4U0_PORT_INIT( 0, 0, 1 ), },
- { "DISP_RDMA0", M4U0_PORT_INIT( 0, 0, 2 ), },
- { "DISP_WDMA0", M4U0_PORT_INIT( 0, 0, 3 ), },
- { "MDP_RDMA", M4U0_PORT_INIT( 0, 0, 4 ), },
- { "MDP_WDMA", M4U0_PORT_INIT( 0, 0, 5 ), },
- { "MDP_WROT", M4U0_PORT_INIT( 0, 0, 6 ), },
- { "DISP_FAKE", M4U0_PORT_INIT( 0, 0, 7 ), },
-
- { "VDEC_MC", M4U0_PORT_INIT( 0, 1, 0 ), },
- { "VDEC_PP", M4U0_PORT_INIT( 0, 1, 1 ), },
- { "VDEC_AVC_MV", M4U0_PORT_INIT( 0, 1, 2 ), },
- { "VDEC_PRED_RD", M4U0_PORT_INIT( 0, 1, 3 ), },
- { "VDEC_PRED_WR", M4U0_PORT_INIT( 0, 1, 4 ), },
- { "VDEC_VLD", M4U0_PORT_INIT( 0, 1, 5 ), },
- { "VDEC_PPWRAP", M4U0_PORT_INIT( 0, 1, 6 ), },
-
- { "IMGO", M4U0_PORT_INIT( 0, 2, 0 ), },
- { "IMGO2O", M4U0_PORT_INIT( 0, 2, 1 ), },
- { "LSCI", M4U0_PORT_INIT( 0, 2, 2 ), },
- { "VENC_BSDMA_VDEC_POST0", M4U0_PORT_INIT( 0, 2, 3 ), },
- { "JPGENC_RDMA", M4U0_PORT_INIT( 0, 2, 4 ), },
- { "CAM_IMGI", M4U0_PORT_INIT( 0, 2, 5 ), },
- { "CAM_ESFKO", M4U0_PORT_INIT( 0, 2, 6 ), },
- { "CAM_AAO", M4U0_PORT_INIT( 0, 2, 7 ), },
- { "JPGENC_BSDMA", M4U0_PORT_INIT( 0, 2, 8 ), },
- { "VENC_MVQP", M4U0_PORT_INIT( 0, 2, 9 ), },
- { "VENC_MC", M4U0_PORT_INIT( 0, 2, 10 ), },
- { "VENC_CDMA_VDEC_CDMA", M4U0_PORT_INIT( 0, 2, 11 ), },
- { "VENC_REC_VDEC_WDMA", M4U0_PORT_INIT( 0, 2, 12 ), },
-
- { "UNKOWN", M4U0_PORT_INIT( 0, 4, 0 ), },
+#define M4U0_PORT_INIT(name, slave, larb, port) {\
+ name, 0, slave, larb, port, (((larb)<<7)|((port)<<2)), 1\
+}
+
+m4u_port_t gM4uPort[] = {
+ M4U0_PORT_INIT("DISP_OVL0_PORT0", 0, 0, 0),
+ M4U0_PORT_INIT("DISP_OVL0_PORT1", 0, 0, 1),
+ M4U0_PORT_INIT("DISP_RDMA0", 0, 0, 2),
+ M4U0_PORT_INIT("DISP_WDMA0", 0, 0, 3),
+ M4U0_PORT_INIT("MDP_RDMA", 0, 0, 4),
+ M4U0_PORT_INIT("MDP_WDMA", 0, 0, 5),
+ M4U0_PORT_INIT("MDP_WROT", 0, 0, 6),
+ M4U0_PORT_INIT("DISP_FAKE", 0, 0, 7),
+
+ M4U0_PORT_INIT("VDEC_MC", 0, 1, 0),
+ M4U0_PORT_INIT("VDEC_PP", 0, 1, 1),
+ M4U0_PORT_INIT("VDEC_AVC_MV", 0, 1, 2),
+ M4U0_PORT_INIT("VDEC_PRED_RD", 0, 1, 3),
+ M4U0_PORT_INIT("VDEC_PRED_WR", 0, 1, 4),
+ M4U0_PORT_INIT("VDEC_VLD", 0, 1, 5),
+ M4U0_PORT_INIT("VDEC_PPWRAP", 0, 1, 6),
+
+ M4U0_PORT_INIT("IMGO", 0, 2, 0),
+ M4U0_PORT_INIT("IMGO2O", 0, 2, 1),
+ M4U0_PORT_INIT("LSCI", 0, 2, 2),
+ M4U0_PORT_INIT("VENC_BSDMA_VDEC_POST0", 0, 2, 3),
+ M4U0_PORT_INIT("JPGENC_RDMA", 0, 2, 4),
+ M4U0_PORT_INIT("CAM_IMGI", 0, 2, 5),
+ M4U0_PORT_INIT("CAM_ESFKO", 0, 2, 6),
+ M4U0_PORT_INIT("CAM_AAO", 0, 2, 7),
+ M4U0_PORT_INIT("JPGENC_BSDMA", 0, 2, 8),
+ M4U0_PORT_INIT("VENC_MVQP", 0, 2, 9),
+ M4U0_PORT_INIT("VENC_MC", 0, 2, 10),
+ M4U0_PORT_INIT("VENC_CDMA_VDEC_CDMA", 0, 2, 11),
+ M4U0_PORT_INIT("VENC_REC_VDEC_WDMA", 0, 2, 12),
+ M4U0_PORT_INIT("CAM_IMG3O", 0, 2, 13),
+ M4U0_PORT_INIT("CAM_VIPI", 0, 2, 14),
+ M4U0_PORT_INIT("CAM_VIP2I", 0, 2, 15),
+ M4U0_PORT_INIT("CAM_VIP3I", 0, 2, 16),
+ M4U0_PORT_INIT("CAM_LCEI", 0, 2, 17),
+ M4U0_PORT_INIT("CAM_RB", 0, 2, 18),
+ M4U0_PORT_INIT("CAM_RP", 0, 2, 19),
+ M4U0_PORT_INIT("CAM_WR", 0, 2, 20),
+
+ M4U0_PORT_INIT("UNKNOWN", 0, 4, 0),
};
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.h b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.h
index 777a6aad7..52379c39b 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_platform.h
@@ -5,17 +5,17 @@
#define M4U_BASE0 0xf0205000
-#define LARB0_BASE 0xf4016000
-#define LARB1_BASE 0xf6010000
-#define LARB2_BASE 0xf5001000
+#define LARB0_BASE 0xf4016000
+#define LARB1_BASE 0xf6010000
+#define LARB2_BASE 0xf5001000
-//mau related
+/* mau related */
#define MAU_NR_PER_M4U_SLAVE 4
-//smi
+/* smi */
#define SMI_LARB_NR 3
-//seq range related
+/* seq range related */
#define SEQ_NR_PER_MM_SLAVE 8
#define SEQ_NR_PER_PERI_SLAVE 0
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_port.h b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_port.h
index 0bd733d79..8f20139b4 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_port.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_port.h
@@ -1,47 +1,44 @@
#ifndef __M4U_PORT_D2_H__
#define __M4U_PORT_D2_H__
-//====================================
-// about portid
-//====================================
+/* ==================================== */
+/* about portid */
+/* ==================================== */
-enum
-{
- M4U_PORT_DISP_OVL0 ,
- M4U_PORT_DISP_OVL1 ,
- M4U_PORT_DISP_RDMA0 ,
- M4U_PORT_DISP_WDMA0 ,
- M4U_PORT_MDP_RDMA ,
- M4U_PORT_MDP_WDMA ,
- M4U_PORT_MDP_WROT ,
- M4u_PORT_DISP_FAKE ,
-
- M4U_PORT_HW_VDEC_MC_EXT ,
- M4U_PORT_HW_VDEC_PP_EXT ,
- M4U_PORT_HW_VDEC_AVC_MV_EXT ,
- M4U_PORT_HW_VDEC_PRED_RD_EXT ,
- M4U_PORT_HW_VDEC_PRED_WR_EXT ,
- M4U_PORT_HW_VDEC_VLD_EXT ,
- M4U_PORT_HW_VDEC_PPWRAP_EXT ,
-
- M4U_PORT_IMGO ,
- M4U_PORT_IMGO2O ,
- M4U_PORT_LSCI ,
- M4U_PORT_VENC_BSDMA_VDEC_POST0 ,
- M4U_PORT_JPGENC_RDMA ,
- M4U_PORT_CAM_IMGI ,
- M4U_PORT_CAM_ESFKO ,
- M4U_PORT_CAM_AAO ,
- M4U_PORT_JPGENC_BSDMA ,
- M4U_PORT_VENC_MVQP ,
- M4U_PORT_VENC_MC ,
- M4U_PORT_VENC_CDMA_VDEC_CDMA ,
- M4U_PORT_VENC_REC_VDEC_WDMA ,
-
- M4U_PORT_UNKNOWN ,
-
-};
+enum {
+ M4U_PORT_DISP_OVL0 ,
+ M4U_PORT_DISP_OVL1 ,
+ M4U_PORT_DISP_RDMA0 ,
+ M4U_PORT_DISP_WDMA0 ,
+ M4U_PORT_MDP_RDMA ,
+ M4U_PORT_MDP_WDMA ,
+ M4U_PORT_MDP_WROT ,
+ M4u_PORT_DISP_FAKE ,
+
+ M4U_PORT_HW_VDEC_MC_EXT ,
+ M4U_PORT_HW_VDEC_PP_EXT ,
+ M4U_PORT_HW_VDEC_AVC_MV_EXT ,
+ M4U_PORT_HW_VDEC_PRED_RD_EXT ,
+ M4U_PORT_HW_VDEC_PRED_WR_EXT ,
+ M4U_PORT_HW_VDEC_VLD_EXT ,
+ M4U_PORT_HW_VDEC_PPWRAP_EXT ,
+
+ M4U_PORT_IMGO ,
+ M4U_PORT_IMGO2O ,
+ M4U_PORT_LSCI ,
+ M4U_PORT_VENC_BSDMA_VDEC_POST0 ,
+ M4U_PORT_JPGENC_RDMA ,
+ M4U_PORT_CAM_IMGI ,
+ M4U_PORT_CAM_ESFKO ,
+ M4U_PORT_CAM_AAO ,
+ M4U_PORT_JPGENC_BSDMA ,
+ M4U_PORT_VENC_MVQP ,
+ M4U_PORT_VENC_MC ,
+ M4U_PORT_VENC_CDMA_VDEC_CDMA ,
+ M4U_PORT_VENC_REC_VDEC_WDMA ,
+
+ M4U_PORT_UNKNOWN ,
+};
#define M4U_PORT_NR M4U_PORT_UNKNOWN
#endif
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_reg.h b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_reg.h
index 80a493b62..2153d732a 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_reg.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6735m/m4u_reg.h
@@ -1,19 +1,17 @@
#ifndef _MT6735_M4U_REG_D2_H__
#define _MT6735_M4U_REG_D2_H__
-#include "mach/mt_reg_base.h"
-
-//=================================================
-//common macro definitions
-#define F_VAL(val,msb,lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
+/* ================================================= */
+/* common macro definitions */
+#define F_VAL(val, msb, lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
#define F_BIT_SET(bit) (1<<(bit))
-#define F_BIT_VAL(val,bit) ((!!(val))<<(bit))
-#define F_MSK_SHIFT(regval,msb,lsb) (((regval)&F_MSK(msb,lsb))>>lsb)
+#define F_BIT_VAL(val, bit) ((!!(val))<<(bit))
+#define F_MSK_SHIFT(regval, msb, lsb) (((regval)&F_MSK(msb, lsb))>>lsb)
-//=====================================================
-//M4U register definition
-//=====================================================
+/* ===================================================== */
+/* M4U register definition */
+/* ===================================================== */
#define REG_MMUg_PT_BASE (0x0)
#define F_MMUg_PT_VA_MSK 0xffff0000
@@ -32,27 +30,27 @@
#define REG_MMU_PROG_DSC 0x18
-#define REG_MMU_INVLD (0x20)
- #define F_MMU_INV_ALL 0x2
- #define F_MMU_INV_RANGE 0x1
+#define REG_MMU_INVLD (0x20)
+ #define F_MMU_INV_ALL 0x2
+ #define F_MMU_INV_RANGE 0x1
-#define REG_MMU_INVLD_SA (0x24)
-#define REG_MMU_INVLD_EA (0x28)
+#define REG_MMU_INVLD_SA (0x24)
+#define REG_MMU_INVLD_EA (0x28)
#define REG_MMU_INVLD_SEC (0x2c)
- #define F_MMU_INV_SEC_ALL 0x2
- #define F_MMU_INV_SEC_RANGE 0x1
-
-#define REG_MMU_INVLD_SA_SEC (0x30)
+ #define F_MMU_INV_SEC_ALL 0x2
+ #define F_MMU_INV_SEC_RANGE 0x1
+
+#define REG_MMU_INVLD_SA_SEC (0x30)
#define REG_MMU_INVLD_EA_SEC (0x34)
-#define REG_INVLID_SEL (0x38)
- #define F_MMU_INV_EN_L1 (1<<0)
- #define F_MMU_INV_EN_L2 (1<<1)
+#define REG_INVLID_SEL (0x38)
+ #define F_MMU_INV_EN_L1 (1<<0)
+ #define F_MMU_INV_EN_L2 (1<<1)
#define REG_INVLID_SEL_SEC (0x3c)
- #define F_MMU_INV_SEC_EN_L1 (1<<0)
- #define F_MMU_INV_SEC_EN_L2 (1<<1)
+ #define F_MMU_INV_SEC_EN_L1 (1<<0)
+ #define F_MMU_INV_SEC_EN_L2 (1<<1)
#define F_MMU_INV_SEC_INV_DONE (1<<2)
#define F_MMU_INV_SEC_INV_INT_CLR (1<<3)
#define F_MMU_INV_SEC_INV_INT_EN (1<<4)
@@ -79,7 +77,7 @@
#define REG_MMU_WR_LEN (0x54)
#define F_MMU_MMU0_WRITE_THROTTLING_DIS F_BIT_SET(5)
- #define F_MMU_MMU0_WRITE_LEN F_MSK(4,0)
+ #define F_MMU_MMU0_WRITE_LEN F_MSK(4, 0)
#define REG_MMU_HW_DEBUG (0x58)
#define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
@@ -92,16 +90,16 @@
#define REG_MMU_LEGACY_4KB_MODE (0x60)
#define REG_MMU_DBG0 (0X64)
- #define F_MMU_L2_TLB_DBG_SIGNALS F_MSK(13,0)
+ #define F_MMU_L2_TLB_DBG_SIGNALS F_MSK(13, 0)
#define REG_MMU_DBG1 (0x68)
- #define F_MMU_MMU0_AXI_INTERFACE_DBG_SIGNALS F_MSK(12,0)
+ #define F_MMU_MMU0_AXI_INTERFACE_DBG_SIGNALS F_MSK(12, 0)
#define REG_MMU_DBG2 (0x6c)
- #define F_MMU_MMU0_GLOBAL_DATA_COUNT_DBG_COUNTER F_MSK(11,0)
+ #define F_MMU_MMU0_GLOBAL_DATA_COUNT_DBG_COUNTER F_MSK(11, 0)
#define REG_MMU_SMI_COMMON_DBG0 (0x78)
- #define F_MMU_SMI_COMMON_DGB_SISGNALS F_MSK(23,0)
+ #define F_MMU_SMI_COMMON_DGB_SISGNALS F_MSK(23, 0)
#define REG_MMU_MMU_COHERENCE_EN 0x80
#define REG_MMU_IN_ORDER_WR_EN 0x84
@@ -113,38 +111,38 @@
#define F_READ_ENTRY_MM0_MAIN F_BIT_SET(27)
#define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(27+id)
#define F_READ_ENTRY_PFH F_BIT_SET(26)
- #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx,16,12)
- #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx,9,5)
- //#define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4)
- //#define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2)
+ #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx, 16, 12)
+ #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx, 9, 5)
+ /* #define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4) */
+ /* #define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2) */
#define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
- #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1,0)
+ #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1, 0)
#define REG_MMU_DES_RDATA 0x104
#define REG_MMU_PFH_TAG_RDATA 0x108
- #define F_PFH_TAG_VA_GET(mmu, tag) ((mmu==0)?F_MMU0_PFH_TAG_VA_GET(tag): F_MMU1_PFH_TAG_VA_GET(tag))
+ #define F_PFH_TAG_VA_GET(mmu, tag) ((mmu == 0)?F_MMU0_PFH_TAG_VA_GET(tag) : F_MMU1_PFH_TAG_VA_GET(tag))
#define F_MMU0_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(0)+1))
- #define F_MMU1_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(1)+1))
- #define F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu) ((mmu=0)?F_MSK(31, 28):F_MSK(31, 28))
+ #define F_MMU1_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(1)+1))
+ #define F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu) ((mmu = 0)?F_MSK(31, 28):F_MSK(31, 28))
#define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
- #define F_PFH_TAG_16X_BIT F_BIT_SET(2) //this bit is always 0 -- cost down.
+ #define F_PFH_TAG_16X_BIT F_BIT_SET(2) /* this bit is always 0 -- cost down. */
#define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
#define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
-// tag releated macro
+/* tag related macro */
#define MMU0_SET_ORDER 5
#define MMU1_SET_ORDER 5
- #define MMU_SET_ORDER(mmu) ((mmu==0) ? MMU0_SET_ORDER : MMU1_SET_ORDER)
+ #define MMU_SET_ORDER(mmu) ((mmu == 0) ? MMU0_SET_ORDER : MMU1_SET_ORDER)
#define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
#define MMU_SET_LSB_OFFSET 15
#define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
- #define MMU_PFH_VA_TO_SET(mmu,va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
+ #define MMU_PFH_VA_TO_SET(mmu, va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
#define MMU_PAGE_PER_LINE 8
#define MMU_WAY_NR 4
#define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
-
+
#define REG_MMU_CTRL_REG 0x110
#define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
#define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 1)
@@ -160,7 +158,7 @@
#define REG_MMU_IVRP_PADDR 0x114
#define F_MMU_IVRP_PA_SET(PA) (PA>>1)
#define F_MMU_IVRP_4G_DRAM_PA_SET(PA) ((PA>>1)|(1<<31))
-
+
#define REG_MMU_INT_L2_CONTROL 0x120
#define F_INT_L2_CLR_BIT (1<<12)
#define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
@@ -176,15 +174,15 @@
#define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(4+(((MMU)<<1)|((MMU)<<2)))
+ #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(4+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_MISS_FIFO_ERR(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_MAU(mmu, set) F_BIT_SET(7+(set)+(mmu<<2)) //(14+(set)+(mmu*4))
+ #define F_INT_MAU(mmu, set) F_BIT_SET(7+(set)+(mmu<<2)) /* (14+(set)+(mmu*4)) */
#define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
#define F_INT_MMU0_MAU_MSK F_MSK(10, 7)
-
+
#define REG_MMU_CPE_DONE_SEC 0x128
#define REG_MMU_CPE_DONE 0x12C
@@ -198,51 +196,53 @@
#define REG_MMU_TBWALK_FAULT_VA 0x138
#define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
#define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
-
+
#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
#define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
#define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
#define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
-
+
#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
- #define F_MMU0_INT_ID_TF_MSK (~0x3) //only for MM iommu.
+ #define F_MMU0_INT_ID_TF_MSK (~0x3) /* only for MM iommu. */
#define REG_MMU_PF_MSCNT 0x160
#define REG_MMU_PF_CNT 0x164
-#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) //(0x168+((mmu)*12)
+#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) /* (0x168+((mmu)*12) */
#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
#define REG_MMU_PFH_VLD_0 (0x180)
-#define REG_MMU_PFH_VLD(mmu, set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<((mmu==0)?(MMU0_SET_ORDER - 3):(MMU1_SET_ORDER - 3)))) //+((set/32)*4)+(way*16)
- #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) // set%32
+#define REG_MMU_PFH_VLD(mmu, set, way) \
+ (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<((mmu == 0) \
+ ? (MMU0_SET_ORDER - 3):(MMU1_SET_ORDER - 3))))
+ #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) /* set%32 */
#define MMU01_SQ_OFFSET (0x600-0x300)
-#define REG_MMU_SQ_START(mmu,x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_SQ_START(mmu, x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
#define F_SQ_VA_MASK F_MSK(31, 20)
#define F_SQ_EN_BIT (1<<19)
- //#define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13)
+ /* #define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13) */
#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
#define MMU_TOTAL_RS_NR 8
-#define REG_MMU_RSx_VA(mmu,x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_VA(mmu, x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
#define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
#define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
-
-#define REG_MMU_RSx_PA(mmu,x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+
+#define REG_MMU_RSx_PA(mmu, x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
-#define REG_MMU_RSx_2ND_BASE(mmu,x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_2ND_BASE(mmu, x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
-#define REG_MMU_RSx_ST(mmu,x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_ST(mmu, x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 22, 20)
#define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
#define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
-#define REG_MMU_MAIN_TAG(mmu,x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_MAIN_TAG(mmu, x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
#define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
#define F_MAIN_TLB_LOCK_BIT (1<<11)
#define F_MAIN_TLB_VALID_BIT (1<<10)
@@ -251,23 +251,23 @@
#define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
#define F_MAIN_TLB_INV_DES_BIT (1<<6)
#define F_MAIN_TLB_SQ_EN_BIT (1<<5)
- #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4,1)
+ #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4, 1)
#define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
-#define REG_MMU_MAU_START(mmu,mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_START_BIT32(mmu,mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END(mmu,mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END_BIT32(mmu,mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_PORT_EN(mmu,mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ID(mmu,mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START(mmu, mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START_BIT32(mmu, mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END(mmu, mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END_BIT32(mmu, mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_PORT_EN(mmu, mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ID(mmu, mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
#define F_MMU_MAU_ASSERT_ID_LARB(regval) F_MSK_SHIFT(regval, 7, 5)
#define F_MMU_MAU_ASSERT_ID_PORT(regval) F_MSK_SHIFT(regval, 4, 0)
-#define REG_MMU_MAU_ADDR(mmu,mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR_BIT32(mmu,mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR(mmu, mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR_BIT32(mmu, mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
- #define F_MAU_LARB_VAL(mau,larb) ((larb)<<(mau*8))
+ #define F_MAU_LARB_VAL(mau, larb) ((larb)<<(mau*8))
#define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
@@ -282,76 +282,76 @@
#define REG_MMU_PFH_DIST2 0xb08
#define REG_MMU_PFH_DIST3 0xb0c
#define REG_MMU_PFH_DIST(port) (REG_MMU_PFH_DIST0+(((port)>>3)<<2))
- #define F_MMU_PFH_DIST_VAL(port,val) ((val&0xf)<<(((port)&0x7)<<2))
+ #define F_MMU_PFH_DIST_VAL(port, val) ((val&0xf)<<(((port)&0x7)<<2))
#define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
#define REG_MMU_PFH_DIST_NR(nr) (REG_MMU_PFH_DIST0 + ((nr)<<2))
#define REG_MMU_PFH_DIR0 0xd00
#define REG_MMU_PFH_DIR(port) (REG_MMU_PFH_DIR0)
-#define F_MMU_PFH_DIR(port,val) ((!!(val))<<((port)&0x1f))
+#define F_MMU_PFH_DIR(port, val) ((!!(val))<<((port)&0x1f))
#define REG_MMU_PFH_DIR_NR(nr) (REG_MMU_PFH_DIR0 + ((nr)<<2))
-//================================================================
-// SMI larb
-//================================================================
+/* ================================================================ */
+/* SMI larb */
+/* ================================================================ */
-#define SMI_LARB_MMU_EN (0xfc0 )
+#define SMI_LARB_MMU_EN (0xfc0)
#define F_SMI_MMU_EN(port, en) ((en)<<((port)))
-#define SMI_LARB_SEC_EN (0xfc4 )
+#define SMI_LARB_SEC_EN (0xfc4)
#define F_SMI_SEC_EN(port, en) ((en)<<((port)))
-#define SMI_LARB_DOMN_0 (0xfd0 )
-#define SMI_LARB_DOMN_1 (0xfd4 )
-#define SMI_LARB_DOMN_2 (0xfd8 )
-#define SMI_LARB_DOMN_3 (0xfdc )
+#define SMI_LARB_DOMN_0 (0xfd0)
+#define SMI_LARB_DOMN_1 (0xfd4)
+#define SMI_LARB_DOMN_2 (0xfd8)
+#define SMI_LARB_DOMN_3 (0xfdc)
#define REG_SMI_LARB_DOMN_OF_PORT(port) (SMI_LARB_DOMN_0+(((port)>>3)<<2))
#define F_SMI_DOMN(port, domain) ((domain&0xf)<<(((port)&0x7)<<2))
-//=========================================================================
-// peripheral system
-//=========================================================================
+/* ========================================================================= */
+/* peripheral system */
+/* ========================================================================= */
#define REG_PERIAXI_BUS_CTL3 (0x208)
#define F_PERI_MMU_EN(port, en) ((en)<<((port)))
-#include <mach/sync_write.h>
+#include <mt-plat/sync_write.h>
-static inline unsigned int COM_ReadReg32(unsigned long addr)
+static inline unsigned int COM_ReadReg32(unsigned long addr)
{
- return ioread32((void*)addr);
+ return ioread32((void *)addr);
}
static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
-{
- mt_reg_sync_writel(Val, (void*)addr);
+{
+ mt_reg_sync_writel(Val, (void *)addr);
}
-static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned int Offset)
+static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned int Offset)
{
- unsigned int val;
- val = COM_ReadReg32((M4uBase+Offset));
- return val;
+ unsigned int val;
+
+ val = COM_ReadReg32((M4uBase+Offset));
+ return val;
}
-static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned int Offset, unsigned int Val)
-{
- //printk("M4U_WriteReg32: M4uBase: 0x%lx, Offset:0x%x, val:0x%x\n", M4uBase, Offset, Val);
- COM_WriteReg32((M4uBase+Offset), Val);
+static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned int Offset, unsigned int Val)
+{
+ COM_WriteReg32((M4uBase+Offset), Val);
}
static inline void m4uHw_set_field_by_mask(unsigned long M4UBase, unsigned int reg,
- unsigned long mask, unsigned int val)
+ unsigned long mask, unsigned int val)
{
- unsigned int regval;
- regval = M4U_ReadReg32(M4UBase, reg);
- regval = (regval & (~mask))|val;
- M4U_WriteReg32(M4UBase, reg, regval);
+ unsigned int regval;
+
+ regval = M4U_ReadReg32(M4UBase, reg);
+ regval = (regval & (~mask))|val;
+ M4U_WriteReg32(M4UBase, reg, regval);
}
static inline unsigned int m4uHw_get_field_by_mask(unsigned long M4UBase, unsigned int reg,
- unsigned int mask)
+ unsigned int mask)
{
- return M4U_ReadReg32(M4UBase, reg)&mask;
+ return M4U_ReadReg32(M4UBase, reg)&mask;
}
-#endif //_MT6735_M4U_REG_D2_H__
-
+#endif /* _MT6735_M4U_REG_D2_H__ */
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6753/Makefile b/drivers/misc/mediatek/m4u/mt6735/mt6753/Makefile
index ed241266e..cc8f8d03a 100755..100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6753/Makefile
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6753/Makefile
@@ -1,6 +1,5 @@
-include $(srctree)/drivers/misc/mediatek/Makefile.custom
-
ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/mt6735
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/mmp/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/2.0
obj-y += m4u_platform.o
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.c b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.c
index 509c484e3..7bbd5579c 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.c
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.c
@@ -4,70 +4,71 @@
unsigned int gM4UTagCount[] = {32};
-const char* gM4U_SMILARB[] = {
- "mediatek,SMI_LARB0", "mediatek,SMI_LARB1", "mediatek,SMI_LARB2", "mediatek,SMI_LARB3"};
+const char *gM4U_SMILARB[] = {
+ "mediatek,smi_larb0", "mediatek,smi_larb1", "mediatek,smi_larb2", "mediatek,smi_larb3"};
-M4U_RANGE_DES_T gM4u0_seq[M4U0_SEQ_NR] = {{0}};
+M4U_RANGE_DES_T gM4u0_seq[M4U0_SEQ_NR] = {{0} };
M4U_RANGE_DES_T *gM4USeq[] = {gM4u0_seq};
-#define M4U0_PORT_INIT(slave, larb, port) 0,slave,larb,port,(((larb)<<7)|((port)<<2)),1
+#define M4U0_PORT_INIT(name, slave, larb, port) {\
+ name, 0, slave, larb, port, (((larb)<<7)|((port)<<2)), 1 \
+}
-m4u_port_t gM4uPort[] =
-{
- { "DISP_OVL0", M4U0_PORT_INIT( 0, 0, 0 ), },
- { "DISP_RDMA0", M4U0_PORT_INIT( 0, 0, 1 ), },
- { "DISP_WDMA0", M4U0_PORT_INIT( 0, 0, 2 ), },
- { "DISP_OVL1", M4U0_PORT_INIT( 0, 0, 3 ), },
- { "DISP_RDMA1", M4U0_PORT_INIT( 0, 0, 4 ), },
- { "DISP_OD_R", M4U0_PORT_INIT( 0, 0, 5 ), },
- { "DISP_OD_W", M4U0_PORT_INIT( 0, 0, 6 ), },
- { "MDP_RDMA", M4U0_PORT_INIT( 0, 0, 7 ), },
- { "MDP_WDMA", M4U0_PORT_INIT( 0, 0, 8 ), },
- { "MDP_WROT", M4U0_PORT_INIT( 0, 0, 9 ), },
+m4u_port_t gM4uPort[] = {
+ M4U0_PORT_INIT("DISP_OVL0", 0, 0, 0),
+ M4U0_PORT_INIT("DISP_RDMA0", 0, 0, 1),
+ M4U0_PORT_INIT("DISP_WDMA0", 0, 0, 2),
+ M4U0_PORT_INIT("DISP_OVL1", 0, 0, 3),
+ M4U0_PORT_INIT("DISP_RDMA1", 0, 0, 4),
+ M4U0_PORT_INIT("DISP_OD_R", 0, 0, 5),
+ M4U0_PORT_INIT("DISP_OD_W", 0, 0, 6),
+ M4U0_PORT_INIT("MDP_RDMA", 0, 0, 7),
+ M4U0_PORT_INIT("MDP_WDMA", 0, 0, 8),
+ M4U0_PORT_INIT("MDP_WROT", 0, 0, 9),
- { "VDEC_MC", M4U0_PORT_INIT( 0, 1, 0 ), },
- { "VDEC_PP", M4U0_PORT_INIT( 0, 1, 1 ), },
- { "VDEC_AVC_MV", M4U0_PORT_INIT( 0, 1, 2 ), },
- { "VDEC_PRED_RD", M4U0_PORT_INIT( 0, 1, 3 ), },
- { "VDEC_PRED_WR", M4U0_PORT_INIT( 0, 1, 4 ), },
- { "VDEC_VLD", M4U0_PORT_INIT( 0, 1, 5 ), },
- { "VDEC_PPWRAP", M4U0_PORT_INIT( 0, 1, 6 ), },
+ M4U0_PORT_INIT("VDEC_MC", 0, 1, 0),
+ M4U0_PORT_INIT("VDEC_PP", 0, 1, 1),
+ M4U0_PORT_INIT("VDEC_AVC_MV", 0, 1, 2),
+ M4U0_PORT_INIT("VDEC_PRED_RD", 0, 1, 3),
+ M4U0_PORT_INIT("VDEC_PRED_WR", 0, 1, 4),
+ M4U0_PORT_INIT("VDEC_VLD", 0, 1, 5),
+ M4U0_PORT_INIT("VDEC_PPWRAP", 0, 1, 6),
- { "CAM_IMGO", M4U0_PORT_INIT( 0, 2, 0 ), },
- { "CAM_RRZO", M4U0_PORT_INIT( 0, 2, 1 ), },
- { "CAM_AAO", M4U0_PORT_INIT( 0, 2, 2 ), },
- { "CAM_LCSO", M4U0_PORT_INIT( 0, 2, 3 ), },
- { "CAM_ESFKO", M4U0_PORT_INIT( 0, 2, 4 ), },
- { "CAM_IMGO_S", M4U0_PORT_INIT( 0, 2, 5 ), },
- { "CAM_LSCI", M4U0_PORT_INIT( 0, 2, 6 ), },
- { "CAM_LSCI_D", M4U0_PORT_INIT( 0, 2, 7 ), },
- { "CAM_BPCI", M4U0_PORT_INIT( 0, 2, 8 ), },
- { "CAM_BPCI_D", M4U0_PORT_INIT( 0, 2, 9 ), },
- { "CAM_UFDI", M4U0_PORT_INIT( 0, 2, 10 ), },
- { "CAM_IMGI", M4U0_PORT_INIT( 0, 2, 11 ), },
- { "CAM_IMG2O", M4U0_PORT_INIT( 0, 2, 12 ), },
- { "CAM_IMG3O", M4U0_PORT_INIT( 0, 2, 13 ), },
- { "CAM_VIPI", M4U0_PORT_INIT( 0, 2, 14 ), },
- { "CAM_VIP2I", M4U0_PORT_INIT( 0, 2, 15 ), },
- { "CAM_VIP3I", M4U0_PORT_INIT( 0, 2, 16 ), },
- { "CAM_LCEI", M4U0_PORT_INIT( 0, 2, 17 ), },
- { "CAM_RB", M4U0_PORT_INIT( 0, 2, 18 ), },
- { "CAM_RP", M4U0_PORT_INIT( 0, 2, 19 ), },
- { "CAM_WR", M4U0_PORT_INIT( 0, 2, 20 ), },
+ M4U0_PORT_INIT("CAM_IMGO", 0, 2, 0),
+ M4U0_PORT_INIT("CAM_RRZO", 0, 2, 1),
+ M4U0_PORT_INIT("CAM_AAO", 0, 2, 2),
+ M4U0_PORT_INIT("CAM_LCSO", 0, 2, 3),
+ M4U0_PORT_INIT("CAM_ESFKO", 0, 2, 4),
+ M4U0_PORT_INIT("CAM_IMGO_S", 0, 2, 5),
+ M4U0_PORT_INIT("CAM_LSCI", 0, 2, 6),
+ M4U0_PORT_INIT("CAM_LSCI_D", 0, 2, 7),
+ M4U0_PORT_INIT("CAM_BPCI", 0, 2, 8),
+ M4U0_PORT_INIT("CAM_BPCI_D", 0, 2, 9),
+ M4U0_PORT_INIT("CAM_UFDI", 0, 2, 10),
+ M4U0_PORT_INIT("CAM_IMGI", 0, 2, 11),
+ M4U0_PORT_INIT("CAM_IMG2O", 0, 2, 12),
+ M4U0_PORT_INIT("CAM_IMG3O", 0, 2, 13),
+ M4U0_PORT_INIT("CAM_VIPI", 0, 2, 14),
+ M4U0_PORT_INIT("CAM_VIP2I", 0, 2, 15),
+ M4U0_PORT_INIT("CAM_VIP3I", 0, 2, 16),
+ M4U0_PORT_INIT("CAM_LCEI", 0, 2, 17),
+ M4U0_PORT_INIT("CAM_RB", 0, 2, 18),
+ M4U0_PORT_INIT("CAM_RP", 0, 2, 19),
+ M4U0_PORT_INIT("CAM_WR", 0, 2, 20),
- { "VENC_RCPU", M4U0_PORT_INIT( 0, 3, 0 ), },
- { "VENC_REC", M4U0_PORT_INIT( 0, 3, 1 ), },
- { "VENC_BSDMA", M4U0_PORT_INIT( 0, 3, 2 ), },
- { "VENC_SV_COMV", M4U0_PORT_INIT( 0, 3, 3 ), },
- { "VENC_RD_COMV", M4U0_PORT_INIT( 0, 3, 4 ), },
- { "JPGENC_RDMA", M4U0_PORT_INIT( 0, 3, 5 ), },
- { "JPGENC_BSDMA", M4U0_PORT_INIT( 0, 3, 6 ), },
- { "JPGDEC_WDMA", M4U0_PORT_INIT( 0, 3, 7 ), },
- { "JPGDEC_BSDMA", M4U0_PORT_INIT( 0, 3, 8 ), },
- { "VENC_CUR_LUMA", M4U0_PORT_INIT( 0, 3, 9 ), },
- { "VENC_CUR_CHROMA", M4U0_PORT_INIT( 0, 3, 10 ), },
- { "VENC_REF_LUMA", M4U0_PORT_INIT( 0, 3, 11 ), },
- { "VENC_REF_CHROMA", M4U0_PORT_INIT( 0, 3, 12 ), },
+ M4U0_PORT_INIT("VENC_RCPU", 0, 3, 0),
+ M4U0_PORT_INIT("VENC_REC", 0, 3, 1),
+ M4U0_PORT_INIT("VENC_BSDMA", 0, 3, 2),
+ M4U0_PORT_INIT("VENC_SV_COMV", 0, 3, 3),
+ M4U0_PORT_INIT("VENC_RD_COMV", 0, 3, 4),
+ M4U0_PORT_INIT("JPGENC_RDMA", 0, 3, 5),
+ M4U0_PORT_INIT("JPGENC_BSDMA", 0, 3, 6),
+ M4U0_PORT_INIT("JPGDEC_WDMA", 0, 3, 7),
+ M4U0_PORT_INIT("JPGDEC_BSDMA", 0, 3, 8),
+ M4U0_PORT_INIT("VENC_CUR_LUMA", 0, 3, 9),
+ M4U0_PORT_INIT("VENC_CUR_CHROMA", 0, 3, 10),
+ M4U0_PORT_INIT("VENC_REF_LUMA", 0, 3, 11),
+ M4U0_PORT_INIT("VENC_REF_CHROMA", 0, 3, 12),
- { "UNKOWN", M4U0_PORT_INIT( 0, 4, 0 ), },
+ M4U0_PORT_INIT("UNKNOWN", 0, 4, 0),
};
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.h b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.h
index 1e151f1b1..8a014d8cb 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_platform.h
@@ -5,18 +5,18 @@
#define M4U_BASE0 0xf0205000
-#define LARB0_BASE 0xf4015000
-#define LARB1_BASE 0xf6010000
-#define LARB2_BASE 0xf5001000
-#define LARB3_BASE 0xf7002000
+#define LARB0_BASE 0xf4015000
+#define LARB1_BASE 0xf6010000
+#define LARB2_BASE 0xf5001000
+#define LARB3_BASE 0xf7002000
-//mau related
+/* mau related */
#define MAU_NR_PER_M4U_SLAVE 4
-//smi
+/* smi */
#define SMI_LARB_NR 4
-//seq range related
+/* seq range related */
#define SEQ_NR_PER_MM_SLAVE 8
#define SEQ_NR_PER_PERI_SLAVE 0
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_port.h b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_port.h
index 51d623990..299749c60 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_port.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_port.h
@@ -1,71 +1,68 @@
#ifndef __M4U_PORT_D3_H__
#define __M4U_PORT_D3_H__
-//====================================
-// about portid
-//====================================
+/* ==================================== */
+/* about portid */
+/* ==================================== */
-enum
-{
- M4U_PORT_DISP_OVL0 ,
- M4U_PORT_DISP_RDMA0 ,
- M4U_PORT_DISP_WDMA0 ,
- M4U_PORT_DISP_OVL1 ,
- M4U_PORT_DISP_RDMA1 ,
- M4U_PORT_DISP_OD_R ,
- M4U_PORT_DISP_OD_W ,
- M4U_PORT_MDP_RDMA ,
- M4U_PORT_MDP_WDMA ,
- M4U_PORT_MDP_WROT ,
-
- M4U_PORT_HW_VDEC_MC_EXT ,
- M4U_PORT_HW_VDEC_PP_EXT ,
- M4U_PORT_HW_VDEC_AVC_MV_EXT ,
- M4U_PORT_HW_VDEC_PRED_RD_EXT ,
- M4U_PORT_HW_VDEC_PRED_WR_EXT ,
- M4U_PORT_HW_VDEC_VLD_EXT ,
- M4U_PORT_HW_VDEC_PPWRAP_EXT ,
-
- M4U_PORT_IMGO ,
- M4U_PORT_RRZO ,
- M4U_PORT_AAO ,
- M4U_PORT_LCSO ,
- M4U_PORT_ESFKO ,
- M4U_PORT_IMGO_S ,
- M4U_PORT_LSCI ,
- M4U_PORT_LSCI_D ,
- M4U_PORT_BPCI ,
- M4U_PORT_BPCI_D ,
- M4U_PORT_UFDI ,
- M4U_PORT_IMGI ,
- M4U_PORT_IMG2O ,
- M4U_PORT_IMG3O ,
- M4U_PORT_VIPI ,
- M4U_PORT_VIP2I ,
- M4U_PORT_VIP3I ,
- M4U_PORT_LCEI ,
- M4U_PORT_RB ,
- M4U_PORT_RP ,
- M4U_PORT_WR ,
+enum {
+ M4U_PORT_DISP_OVL0 ,
+ M4U_PORT_DISP_RDMA0 ,
+ M4U_PORT_DISP_WDMA0 ,
+ M4U_PORT_DISP_OVL1 ,
+ M4U_PORT_DISP_RDMA1 ,
+ M4U_PORT_DISP_OD_R ,
+ M4U_PORT_DISP_OD_W ,
+ M4U_PORT_MDP_RDMA ,
+ M4U_PORT_MDP_WDMA ,
+ M4U_PORT_MDP_WROT ,
- M4U_PORT_VENC_RCPU ,
- M4U_PORT_VENC_REC ,
- M4U_PORT_VENC_BSDMA ,
- M4U_PORT_VENC_SV_COMV ,
- M4U_PORT_VENC_RD_COMV ,
- M4U_PORT_JPGENC_RDMA ,
- M4U_PORT_JPGENC_BSDMA ,
- M4U_PORT_JPGDEC_WDMA ,
- M4U_PORT_JPGDEC_BSDMA ,
- M4U_PORT_VENC_CUR_LUMA ,
- M4U_PORT_VENC_CUR_CHROMA ,
- M4U_PORT_VENC_REF_LUMA ,
- M4U_PORT_VENC_REF_CHROMA ,
-
- M4U_PORT_UNKNOWN ,
-
-};
+ M4U_PORT_HW_VDEC_MC_EXT ,
+ M4U_PORT_HW_VDEC_PP_EXT ,
+ M4U_PORT_HW_VDEC_AVC_MV_EXT ,
+ M4U_PORT_HW_VDEC_PRED_RD_EXT ,
+ M4U_PORT_HW_VDEC_PRED_WR_EXT ,
+ M4U_PORT_HW_VDEC_VLD_EXT ,
+ M4U_PORT_HW_VDEC_PPWRAP_EXT ,
+
+ M4U_PORT_IMGO ,
+ M4U_PORT_RRZO ,
+ M4U_PORT_AAO ,
+ M4U_PORT_LCSO ,
+ M4U_PORT_ESFKO ,
+ M4U_PORT_IMGO_S ,
+ M4U_PORT_LSCI ,
+ M4U_PORT_LSCI_D ,
+ M4U_PORT_BPCI ,
+ M4U_PORT_BPCI_D ,
+ M4U_PORT_UFDI ,
+ M4U_PORT_IMGI ,
+ M4U_PORT_IMG2O ,
+ M4U_PORT_IMG3O ,
+ M4U_PORT_VIPI ,
+ M4U_PORT_VIP2I ,
+ M4U_PORT_VIP3I ,
+ M4U_PORT_LCEI ,
+ M4U_PORT_RB ,
+ M4U_PORT_RP ,
+ M4U_PORT_WR ,
+
+ M4U_PORT_VENC_RCPU ,
+ M4U_PORT_VENC_REC ,
+ M4U_PORT_VENC_BSDMA ,
+ M4U_PORT_VENC_SV_COMV ,
+ M4U_PORT_VENC_RD_COMV ,
+ M4U_PORT_JPGENC_RDMA ,
+ M4U_PORT_JPGENC_BSDMA ,
+ M4U_PORT_JPGDEC_WDMA ,
+ M4U_PORT_JPGDEC_BSDMA ,
+ M4U_PORT_VENC_CUR_LUMA ,
+ M4U_PORT_VENC_CUR_CHROMA ,
+ M4U_PORT_VENC_REF_LUMA ,
+ M4U_PORT_VENC_REF_CHROMA ,
+
+ M4U_PORT_UNKNOWN ,
+};
#define M4U_PORT_NR M4U_PORT_UNKNOWN
#endif
-
diff --git a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_reg.h b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_reg.h
index 596c21500..8c4710ec7 100644
--- a/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_reg.h
+++ b/drivers/misc/mediatek/m4u/mt6735/mt6753/m4u_reg.h
@@ -1,19 +1,17 @@
#ifndef _MT6735_M4U_REG_D3_H__
#define _MT6735_M4U_REG_D3_H__
-#include "mach/mt_reg_base.h"
-
-//=================================================
-//common macro definitions
-#define F_VAL(val,msb,lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
+/* ================================================= */
+/* common macro definitions */
+#define F_VAL(val, msb, lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
#define F_BIT_SET(bit) (1<<(bit))
-#define F_BIT_VAL(val,bit) ((!!(val))<<(bit))
-#define F_MSK_SHIFT(regval,msb,lsb) (((regval)&F_MSK(msb,lsb))>>lsb)
+#define F_BIT_VAL(val, bit) ((!!(val))<<(bit))
+#define F_MSK_SHIFT(regval, msb, lsb) (((regval)&F_MSK(msb, lsb))>>lsb)
-//=====================================================
-//M4U register definition
-//=====================================================
+/* ===================================================== */
+/* M4U register definition */
+/* ===================================================== */
#define REG_MMUg_PT_BASE (0x0)
#define F_MMUg_PT_VA_MSK 0xffff0000
@@ -32,27 +30,27 @@
#define REG_MMU_PROG_DSC 0x18
-#define REG_MMU_INVLD (0x20)
- #define F_MMU_INV_ALL 0x2
- #define F_MMU_INV_RANGE 0x1
+#define REG_MMU_INVLD (0x20)
+ #define F_MMU_INV_ALL 0x2
+ #define F_MMU_INV_RANGE 0x1
-#define REG_MMU_INVLD_SA (0x24)
-#define REG_MMU_INVLD_EA (0x28)
+#define REG_MMU_INVLD_SA (0x24)
+#define REG_MMU_INVLD_EA (0x28)
#define REG_MMU_INVLD_SEC (0x2c)
- #define F_MMU_INV_SEC_ALL 0x2
- #define F_MMU_INV_SEC_RANGE 0x1
-
-#define REG_MMU_INVLD_SA_SEC (0x30)
+ #define F_MMU_INV_SEC_ALL 0x2
+ #define F_MMU_INV_SEC_RANGE 0x1
+
+#define REG_MMU_INVLD_SA_SEC (0x30)
#define REG_MMU_INVLD_EA_SEC (0x34)
-#define REG_INVLID_SEL (0x38)
- #define F_MMU_INV_EN_L1 (1<<0)
- #define F_MMU_INV_EN_L2 (1<<1)
+#define REG_INVLID_SEL (0x38)
+ #define F_MMU_INV_EN_L1 (1<<0)
+ #define F_MMU_INV_EN_L2 (1<<1)
#define REG_INVLID_SEL_SEC (0x3c)
- #define F_MMU_INV_SEC_EN_L1 (1<<0)
- #define F_MMU_INV_SEC_EN_L2 (1<<1)
+ #define F_MMU_INV_SEC_EN_L1 (1<<0)
+ #define F_MMU_INV_SEC_EN_L2 (1<<1)
#define F_MMU_INV_SEC_INV_DONE (1<<2)
#define F_MMU_INV_SEC_INV_INT_CLR (1<<3)
#define F_MMU_INV_SEC_INV_INT_EN (1<<4)
@@ -79,7 +77,7 @@
#define REG_MMU_WR_LEN (0x54)
#define F_MMU_MMU0_WRITE_THROTTLING_DIS F_BIT_SET(5)
- #define F_MMU_MMU0_WRITE_LEN F_MSK(4,0)
+ #define F_MMU_MMU0_WRITE_LEN F_MSK(4, 0)
#define REG_MMU_HW_DEBUG (0x58)
#define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
@@ -92,16 +90,16 @@
#define REG_MMU_LEGACY_4KB_MODE (0x60)
#define REG_MMU_DBG0 (0X64)
- #define F_MMU_L2_TLB_DBG_SIGNALS F_MSK(13,0)
+ #define F_MMU_L2_TLB_DBG_SIGNALS F_MSK(13, 0)
#define REG_MMU_DBG1 (0x68)
- #define F_MMU_MMU0_AXI_INTERFACE_DBG_SIGNALS F_MSK(12,0)
+ #define F_MMU_MMU0_AXI_INTERFACE_DBG_SIGNALS F_MSK(12, 0)
#define REG_MMU_DBG2 (0x6c)
- #define F_MMU_MMU0_GLOBAL_DATA_COUNT_DBG_COUNTER F_MSK(11,0)
+ #define F_MMU_MMU0_GLOBAL_DATA_COUNT_DBG_COUNTER F_MSK(11, 0)
#define REG_MMU_SMI_COMMON_DBG0 (0x78)
- #define F_MMU_SMI_COMMON_DGB_SISGNALS F_MSK(23,0)
+ #define F_MMU_SMI_COMMON_DGB_SISGNALS F_MSK(23, 0)
#define REG_MMU_MMU_COHERENCE_EN 0x80
#define REG_MMU_IN_ORDER_WR_EN 0x84
@@ -113,38 +111,38 @@
#define F_READ_ENTRY_MM0_MAIN F_BIT_SET(27)
#define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(27+id)
#define F_READ_ENTRY_PFH F_BIT_SET(26)
- #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx,16,12)
- #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx,10,5)
- //#define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4)
- //#define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2)
+ #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx, 16, 12)
+ #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx, 10, 5)
+ /* #define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4) */
+ /* #define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2) */
#define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
- #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1,0)
+ #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1, 0)
#define REG_MMU_DES_RDATA 0x104
#define REG_MMU_PFH_TAG_RDATA 0x108
- #define F_PFH_TAG_VA_GET(mmu, tag) ((mmu==0)?F_MMU0_PFH_TAG_VA_GET(tag): F_MMU1_PFH_TAG_VA_GET(tag))
+ #define F_PFH_TAG_VA_GET(mmu, tag) ((mmu == 0)?F_MMU0_PFH_TAG_VA_GET(tag) : F_MMU1_PFH_TAG_VA_GET(tag))
#define F_MMU0_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(0)+1))
- #define F_MMU1_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(1)+1))
- #define F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu) ((mmu=0)?F_MSK(31, 29):F_MSK(31, 28))
+ #define F_MMU1_PFH_TAG_VA_GET(tag) (F_MSK_SHIFT(tag, 15, 4)<<(MMU_SET_MSB_OFFSET(1)+1))
+ #define F_MMU_PFH_TAG_VA_LAYER0_MSK(mmu) ((mmu = 0)?F_MSK(31, 29):F_MSK(31, 28))
#define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
- #define F_PFH_TAG_16X_BIT F_BIT_SET(2) //this bit is always 0 -- cost down.
+ #define F_PFH_TAG_16X_BIT F_BIT_SET(2) /* this bit is always 0 -- cost down. */
#define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
#define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
-// tag releated macro
+/* tag related macro */
#define MMU0_SET_ORDER 6
#define MMU1_SET_ORDER 5
- #define MMU_SET_ORDER(mmu) ((mmu==0) ? MMU0_SET_ORDER : MMU1_SET_ORDER)
+ #define MMU_SET_ORDER(mmu) ((mmu == 0) ? MMU0_SET_ORDER : MMU1_SET_ORDER)
#define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
#define MMU_SET_LSB_OFFSET 15
#define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
- #define MMU_PFH_VA_TO_SET(mmu,va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
+ #define MMU_PFH_VA_TO_SET(mmu, va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
#define MMU_PAGE_PER_LINE 8
#define MMU_WAY_NR 4
#define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
-
+
#define REG_MMU_CTRL_REG 0x110
#define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
#define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 1)
@@ -160,7 +158,7 @@
#define REG_MMU_IVRP_PADDR 0x114
#define F_MMU_IVRP_PA_SET(PA) (PA>>1)
#define F_MMU_IVRP_4G_DRAM_PA_SET(PA) ((PA>>1)|(1<<31))
-
+
#define REG_MMU_INT_L2_CONTROL 0x120
#define F_INT_L2_CLR_BIT (1<<12)
#define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
@@ -176,15 +174,15 @@
#define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(4+(((MMU)<<1)|((MMU)<<2)))
+ #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(4+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_MISS_FIFO_ERR(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
#define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_MAU(mmu, set) F_BIT_SET(7+(set)+(mmu<<2)) //(14+(set)+(mmu*4))
+ #define F_INT_MAU(mmu, set) F_BIT_SET(7+(set)+(mmu<<2)) /* (14+(set)+(mmu*4)) */
#define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
#define F_INT_MMU0_MAU_MSK F_MSK(10, 7)
-
+
#define REG_MMU_CPE_DONE_SEC 0x128
#define REG_MMU_CPE_DONE 0x12C
@@ -198,51 +196,53 @@
#define REG_MMU_TBWALK_FAULT_VA 0x138
#define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
#define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
-
+
#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
#define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
#define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
#define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
-
+
#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
- #define F_MMU0_INT_ID_TF_MSK (~0x3) //only for MM iommu.
+ #define F_MMU0_INT_ID_TF_MSK (~0x3) /* only for MM iommu. */
#define REG_MMU_PF_MSCNT 0x160
#define REG_MMU_PF_CNT 0x164
-#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) //(0x168+((mmu)*12)
+#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) /* (0x168+((mmu)*12) */
#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
#define REG_MMU_PFH_VLD_0 (0x180)
-#define REG_MMU_PFH_VLD(mmu, set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<((mmu==0)?(MMU0_SET_ORDER - 3):(MMU1_SET_ORDER - 3)))) //+((set/32)*4)+(way*16)
- #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) // set%32
+#define REG_MMU_PFH_VLD(mmu, set, way) \
+ (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<((mmu == 0) \
+ ? (MMU0_SET_ORDER - 3):(MMU1_SET_ORDER - 3))))
+ #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) /* set%32 */
#define MMU01_SQ_OFFSET (0x600-0x300)
-#define REG_MMU_SQ_START(mmu,x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_SQ_START(mmu, x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
#define F_SQ_VA_MASK F_MSK(31, 20)
#define F_SQ_EN_BIT (1<<19)
- //#define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13)
+ /* #define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13) */
#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
#define MMU_TOTAL_RS_NR 8
-#define REG_MMU_RSx_VA(mmu,x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_VA(mmu, x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
#define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
#define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
-
-#define REG_MMU_RSx_PA(mmu,x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+
+#define REG_MMU_RSx_PA(mmu, x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
-#define REG_MMU_RSx_2ND_BASE(mmu,x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_2ND_BASE(mmu, x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
-#define REG_MMU_RSx_ST(mmu,x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_RSx_ST(mmu, x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
#define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 22, 20)
#define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
#define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
-#define REG_MMU_MAIN_TAG(mmu,x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
+#define REG_MMU_MAIN_TAG(mmu, x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
#define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
#define F_MAIN_TLB_LOCK_BIT (1<<11)
#define F_MAIN_TLB_VALID_BIT (1<<10)
@@ -251,23 +251,23 @@
#define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
#define F_MAIN_TLB_INV_DES_BIT (1<<6)
#define F_MAIN_TLB_SQ_EN_BIT (1<<5)
- #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4,1)
+ #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4, 1)
#define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
-#define REG_MMU_MAU_START(mmu,mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_START_BIT32(mmu,mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END(mmu,mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END_BIT32(mmu,mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_PORT_EN(mmu,mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ID(mmu,mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START(mmu, mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START_BIT32(mmu, mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END(mmu, mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END_BIT32(mmu, mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_PORT_EN(mmu, mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ID(mmu, mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
#define F_MMU_MAU_ASSERT_ID_LARB(regval) F_MSK_SHIFT(regval, 7, 5)
#define F_MMU_MAU_ASSERT_ID_PORT(regval) F_MSK_SHIFT(regval, 4, 0)
-#define REG_MMU_MAU_ADDR(mmu,mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR_BIT32(mmu,mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR(mmu, mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR_BIT32(mmu, mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
- #define F_MAU_LARB_VAL(mau,larb) ((larb)<<(mau*8))
+ #define F_MAU_LARB_VAL(mau, larb) ((larb)<<(mau*8))
#define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
@@ -285,77 +285,77 @@
#define REG_MMU_PFH_DIST5 0xb14
#define REG_MMU_PFH_DIST6 0xb18
#define REG_MMU_PFH_DIST(port) (0xb00+(((port)>>3)<<2))
- #define F_MMU_PFH_DIST_VAL(port,val) ((val&0xf)<<(((port)&0x7)<<2))
+ #define F_MMU_PFH_DIST_VAL(port, val) ((val&0xf)<<(((port)&0x7)<<2))
#define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
#define REG_MMU_PFH_DIST_NR(nr) (REG_MMU_PFH_DIST0 + ((nr)<<2))
#define REG_MMU_PFH_DIR0 0xd00
#define REG_MMU_PFH_DIR1 0xd04
-#define REG_MMU_PFH_DIR(port) (((port)<32) ? REG_MMU_PFH_DIR0: REG_MMU_PFH_DIR1)
-#define F_MMU_PFH_DIR(port,val) ((!!(val))<<((port)&0x1f))
+#define REG_MMU_PFH_DIR(port) (((port) < 32) ? REG_MMU_PFH_DIR0 : REG_MMU_PFH_DIR1)
+#define F_MMU_PFH_DIR(port, val) ((!!(val))<<((port)&0x1f))
#define REG_MMU_PFH_DIR_NR(nr) (REG_MMU_PFH_DIR0 + ((nr)<<2))
-//================================================================
-// SMI larb
-//================================================================
+/* ================================================================ */
+/* SMI larb */
+/* ================================================================ */
-#define SMI_LARB_MMU_EN (0xfc0 )
+#define SMI_LARB_MMU_EN (0xfc0)
#define F_SMI_MMU_EN(port, en) ((en)<<((port)))
-#define SMI_LARB_SEC_EN (0xfc4 )
+#define SMI_LARB_SEC_EN (0xfc4)
#define F_SMI_SEC_EN(port, en) ((en)<<((port)))
-#define SMI_LARB_DOMN_0 (0xfd0 )
-#define SMI_LARB_DOMN_1 (0xfd4 )
-#define SMI_LARB_DOMN_2 (0xfd8 )
-#define SMI_LARB_DOMN_3 (0xfdc )
+#define SMI_LARB_DOMN_0 (0xfd0)
+#define SMI_LARB_DOMN_1 (0xfd4)
+#define SMI_LARB_DOMN_2 (0xfd8)
+#define SMI_LARB_DOMN_3 (0xfdc)
#define REG_SMI_LARB_DOMN_OF_PORT(port) (SMI_LARB_DOMN_0+(((port)>>3)<<2))
#define F_SMI_DOMN(port, domain) ((domain&0xf)<<(((port)&0x7)<<2))
-//=========================================================================
-// peripheral system
-//=========================================================================
+/* ========================================================================= */
+/* peripheral system */
+/* ========================================================================= */
#define REG_PERIAXI_BUS_CTL3 (0x208)
#define F_PERI_MMU_EN(port, en) ((en)<<((port)))
#include <mach/sync_write.h>
-static inline unsigned int COM_ReadReg32(unsigned long addr)
+static inline unsigned int COM_ReadReg32(unsigned long addr)
{
- return ioread32((void*)addr);
+ return ioread32((void *)addr);
}
static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
-{
- mt_reg_sync_writel(Val, (void*)addr);
+{
+ mt_reg_sync_writel(Val, (void *)addr);
}
-static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned int Offset)
+static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned int Offset)
{
- unsigned int val;
- val = COM_ReadReg32((M4uBase+Offset));
- return val;
+ unsigned int val;
+
+ val = COM_ReadReg32((M4uBase+Offset));
+ return val;
}
-static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned int Offset, unsigned int Val)
-{
- //printk("M4U_WriteReg32: M4uBase: 0x%lx, Offset:0x%x, val:0x%x\n", M4uBase, Offset, Val);
- COM_WriteReg32((M4uBase+Offset), Val);
+static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned int Offset, unsigned int Val)
+{
+ COM_WriteReg32((M4uBase+Offset), Val);
}
static inline void m4uHw_set_field_by_mask(unsigned long M4UBase, unsigned int reg,
- unsigned long mask, unsigned int val)
+ unsigned long mask, unsigned int val)
{
- unsigned int regval;
- regval = M4U_ReadReg32(M4UBase, reg);
- regval = (regval & (~mask))|val;
- M4U_WriteReg32(M4UBase, reg, regval);
+ unsigned int regval;
+
+ regval = M4U_ReadReg32(M4UBase, reg);
+ regval = (regval & (~mask))|val;
+ M4U_WriteReg32(M4UBase, reg, regval);
}
static inline unsigned int m4uHw_get_field_by_mask(unsigned long M4UBase, unsigned int reg,
- unsigned int mask)
+ unsigned int mask)
{
- return M4U_ReadReg32(M4UBase, reg)&mask;
+ return M4U_ReadReg32(M4UBase, reg)&mask;
}
-#endif //_MT6735_M4U_REG_D3_H__
-
+#endif /* _MT6735_M4U_REG_D3_H__ */
diff --git a/drivers/misc/mediatek/mach/mt6735/include/mach/m4u.h b/drivers/misc/mediatek/mach/mt6735/include/mach/m4u.h
index c674dcb52..6063d8c84 100644
--- a/drivers/misc/mediatek/mach/mt6735/include/mach/m4u.h
+++ b/drivers/misc/mediatek/mach/mt6735/include/mach/m4u.h
@@ -1,5 +1,5 @@
-#ifndef __M4U_H__
-#define __M4U_H__
+#ifndef __M4U_V2_H__
+#define __M4U_V2_H__
#include <linux/ioctl.h>
#include <linux/fs.h>
#include "m4u_port.h"
@@ -7,133 +7,155 @@
typedef int M4U_PORT_ID;
-#define M4U_PROT_READ (1<<0) //buffer can be read by engine
-#define M4U_PROT_WRITE (1<<1) //buffer can be write by engine
-#define M4U_PROT_CACHE (1<<2) //buffer access will goto CCI to do cache snoop
-#define M4U_PROT_SHARE (1<<3) /*buffer access will goto CCI, but don't do cache snoop
- (just for engines who wants to use CCI bandwith) */
-#define M4U_PROT_SEC (1<<4) //buffer can only be accessed by secure engine.
+#define M4U_PROT_READ (1<<0) /* buffer can be read by engine */
+#define M4U_PROT_WRITE (1<<1) /* buffer can be write by engine */
+#define M4U_PROT_CACHE (1<<2) /* buffer access will goto CCI to do cache snoop */
+#define M4U_PROT_SHARE (1<<3) /* buffer access will goto CCI, but don't do cache snoop
+ (just for engines who wants to use CCI bandwidth) */
+#define M4U_PROT_SEC (1<<4) /* buffer can only be accessed by secure engine. */
-//public flags
-#define M4U_FLAGS_SEQ_ACCESS (1<<0) //engine access this buffer in sequncial way.
-#define M4U_FLAGS_FIX_MVA (1<<1) //fix allocation, we will use mva user specified.
-#define M4U_FLAGS_SEC_SHAREABLE (1<<2) //the mva will share in SWd
+/* public flags */
+#define M4U_FLAGS_SEQ_ACCESS (1<<0) /* engine access this buffer in sequncial way. */
+#define M4U_FLAGS_FIX_MVA (1<<1) /* fix allocation, we will use mva user specified. */
+#define M4U_FLAGS_SEC_SHAREABLE (1<<2) /* the mva will share in SWd */
-//m4u internal flags (DO NOT use them for other purpers)
-#define M4U_FLAGS_MVA_IN_FREE (1<<8) //this mva is in deallocating.
+/* m4u internal flags (DO NOT use them for other purpers) */
+#define M4U_FLAGS_MVA_IN_FREE (1<<8) /* this mva is in deallocating. */
-typedef enum
-{
- RT_RANGE_HIGH_PRIORITY=0,
- SEQ_RANGE_LOW_PRIORITY=1
+typedef enum {
+ RT_RANGE_HIGH_PRIORITY = 0,
+ SEQ_RANGE_LOW_PRIORITY = 1
} M4U_RANGE_PRIORITY_ENUM;
-// port related: virtuality, security, distance
-typedef struct _M4U_PORT
-{
- M4U_PORT_ID ePortID; //hardware port ID, defined in M4U_PORT_ID
- unsigned int Virtuality;
+/* port related: virtuality, security, distance */
+typedef struct _M4U_PORT {
+ M4U_PORT_ID ePortID; /* hardware port ID, defined in M4U_PORT_ID */
+ unsigned int Virtuality;
unsigned int Security;
- unsigned int domain; //domain : 0 1 2 3
+ unsigned int domain; /* domain : 0 1 2 3 */
unsigned int Distance;
- unsigned int Direction; //0:- 1:+
-}M4U_PORT_STRUCT;
-
-struct m4u_port_array
-{
- #define M4U_PORT_ATTR_EN (1<<0)
- #define M4U_PORT_ATTR_VIRTUAL (1<<1)
- #define M4U_PORT_ATTR_SEC (1<<2)
- unsigned char ports[M4U_PORT_NR];
+ unsigned int Direction; /* 0:- 1:+ */
+} M4U_PORT_STRUCT;
+
+struct m4u_port_array {
+ #define M4U_PORT_ATTR_EN (1<<0)
+ #define M4U_PORT_ATTR_VIRTUAL (1<<1)
+ #define M4U_PORT_ATTR_SEC (1<<2)
+ unsigned char ports[M4U_PORT_NR];
};
-typedef enum
-{
- M4U_CACHE_CLEAN_BY_RANGE,
- M4U_CACHE_INVALID_BY_RANGE,
- M4U_CACHE_FLUSH_BY_RANGE,
+typedef enum {
+ M4U_CACHE_CLEAN_BY_RANGE,
+ M4U_CACHE_INVALID_BY_RANGE,
+ M4U_CACHE_FLUSH_BY_RANGE,
- M4U_CACHE_CLEAN_ALL,
- M4U_CACHE_INVALID_ALL,
- M4U_CACHE_FLUSH_ALL,
+ M4U_CACHE_CLEAN_ALL,
+ M4U_CACHE_INVALID_ALL,
+ M4U_CACHE_FLUSH_ALL,
} M4U_CACHE_SYNC_ENUM;
-typedef struct
-{
- //mutex to protect mvaList
- //should get this mutex whenever add/delete/interate mvaList
- struct mutex dataMutex;
- pid_t open_pid;
- pid_t open_tgid;
- struct list_head mvaList;
+typedef enum {
+ M4U_DMA_MAP_AREA,
+ M4U_DMA_UNMAP_AREA,
+} M4U_DMA_TYPE;
+
+typedef enum {
+ M4U_DMA_FROM_DEVICE,
+ M4U_DMA_TO_DEVICE,
+ M4U_DMA_BIDIRECTIONAL,
+} M4U_DMA_DIR;
+
+typedef struct {
+ /* mutex to protect mvaList */
+ /* should get this mutex whenever add/delete/interate mvaList */
+ struct mutex dataMutex;
+ pid_t open_pid;
+ pid_t open_tgid;
+ struct list_head mvaList;
} m4u_client_t;
-
-
-
int m4u_dump_info(int m4u_index);
int m4u_power_on(int m4u_index);
int m4u_power_off(int m4u_index);
-int m4u_alloc_mva(m4u_client_t *client, M4U_PORT_ID port,
- unsigned long va, struct sg_table *sg_table,
- unsigned int size, unsigned int prot, unsigned int flags,
- unsigned int *pMva);
+int m4u_alloc_mva(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, struct sg_table *sg_table,
+ unsigned int size, unsigned int prot, unsigned int flags,
+ unsigned int *pMva);
int m4u_dealloc_mva(m4u_client_t *client, M4U_PORT_ID port, unsigned int mva);
-int m4u_alloc_mva_sg(int eModuleID,
- struct sg_table *sg_table,
- const unsigned int BufSize,
- int security,
- int cache_coherent,
- unsigned int *pRetMVABuf);
+int m4u_alloc_mva_sg(int eModuleID,
+ struct sg_table *sg_table,
+ const unsigned int BufSize,
+ int security,
+ int cache_coherent,
+ unsigned int *pRetMVABuf);
-int m4u_dealloc_mva_sg(int eModuleID,
- struct sg_table* sg_table,
- const unsigned int BufSize,
- const unsigned int MVA);
+int m4u_dealloc_mva_sg(int eModuleID,
+ struct sg_table *sg_table,
+ const unsigned int BufSize,
+ const unsigned int MVA);
-
-int m4u_config_port(M4U_PORT_STRUCT* pM4uPort);
+int m4u_config_port(M4U_PORT_STRUCT *pM4uPort);
int m4u_config_port_array(struct m4u_port_array *port_array);
int m4u_monitor_start(int m4u_id);
int m4u_monitor_stop(int m4u_id);
+int m4u_cache_sync(m4u_client_t *client, M4U_PORT_ID port,
+ unsigned long va, unsigned int size, unsigned int mva,
+ M4U_CACHE_SYNC_ENUM sync_type);
-int m4u_cache_sync(m4u_client_t *client, M4U_PORT_ID port,
- unsigned long va, unsigned int size, unsigned int mva,
- M4U_CACHE_SYNC_ENUM sync_type);
-
-int m4u_mva_map_kernel(unsigned int mva, unsigned int size,
- unsigned long *map_va, unsigned int *map_size);
+int m4u_mva_map_kernel(unsigned int mva, unsigned int size,
+ unsigned long *map_va, unsigned int *map_size);
int m4u_mva_unmap_kernel(unsigned int mva, unsigned int size, unsigned long va);
-m4u_client_t * m4u_create_client(void);
+m4u_client_t *m4u_create_client(void);
int m4u_destroy_client(m4u_client_t *client);
int m4u_dump_reg_for_smi_hang_issue(void);
+int m4u_display_fake_engine_test(unsigned long ulFakeReadAddr, unsigned long ulFakeWriteAddr);
+void m4u_larb_backup(int larb_idx);
+void m4u_larb_restore(int larb_idx);
-typedef enum m4u_callback_ret
-{
- M4U_CALLBACK_HANDLED,
- M4U_CALLBACK_NOT_HANDLED,
-}m4u_callback_ret_t;
+typedef enum m4u_callback_ret {
+ M4U_CALLBACK_HANDLED,
+ M4U_CALLBACK_NOT_HANDLED,
+} m4u_callback_ret_t;
-typedef m4u_callback_ret_t (m4u_reclaim_mva_callback_t)(int alloc_port, unsigned int mva,
- unsigned int size, void* data);
-int m4u_register_reclaim_callback(int port, m4u_reclaim_mva_callback_t *fn, void* data);
+typedef m4u_callback_ret_t (m4u_reclaim_mva_callback_t)(int alloc_port, unsigned int mva,
+ unsigned int size, void *data);
+int m4u_register_reclaim_callback(int port, m4u_reclaim_mva_callback_t *fn, void *data);
int m4u_unregister_reclaim_callback(int port);
-typedef m4u_callback_ret_t (m4u_fault_callback_t)(int port, unsigned int mva, void* data);
-int m4u_register_fault_callback(int port, m4u_fault_callback_t *fn, void* data);
+typedef m4u_callback_ret_t (m4u_fault_callback_t)(int port, unsigned int mva, void *data);
+int m4u_register_fault_callback(int port, m4u_fault_callback_t *fn, void *data);
int m4u_unregister_fault_callback(int port);
-// m4u driver internal use ---------------------------------------------------
-//
+#ifdef CONFIG_PM
+extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
+extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity);
+#endif
+#ifdef M4U_TEE_SERVICE_ENABLE
+extern int gM4U_L2_enable;
#endif
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+
+#ifdef M4U_PROFILE
+extern void MMProfileEnable(int enable);
+extern void MMProfileStart(int start);
+extern MMP_Event M4U_MMP_Events[M4U_MMP_MAX];
+#endif
+
+#ifndef M4U_FPGAPORTING
+extern void smp_inner_dcache_flush_all(void);
+#endif
+/* m4u driver internal use --------------------------------------------------- */
+/* */
+
+#endif
diff --git a/drivers/misc/mediatek/mach/mt6735/include/mach/mt_smi.h b/drivers/misc/mediatek/mach/mt6735/include/mach/mt_smi.h
index 067fe15f8..9ab941167 100644
--- a/drivers/misc/mediatek/mach/mt6735/include/mach/mt_smi.h
+++ b/drivers/misc/mediatek/mach/mt6735/include/mach/mt_smi.h
@@ -1,5 +1,5 @@
-#ifndef _MTK_MAU_H_
-#define _MTK_MAU_H_
+#ifndef _MTK_SMI_H_
+#define _MTK_SMI_H_
#define MTK_SMI_MAJOR_NUMBER 190
@@ -8,80 +8,84 @@
#define MTK_IOWR(num, dtype) _IOWR('O', num, dtype)
#define MTK_IO(num) _IO('O', num)
-// --------------------------------------------------------------------------
+/* -------------------------------------------------------------------------- */
#define MTK_CONFIG_MM_MAU MTK_IOW(10, unsigned long)
-typedef struct
-{
- int larb; //0~4: the larb you want to monitor
- int entry; //0~2: the mau entry to use
- unsigned int port_msk; //port mask to be monitored
- int virt; // 1: monitor va (this port is using m4u); 0: monitor pa (this port is not using m4u)
- int monitor_read; // monitor read transaction 1-enable, 0-disable
- int monitor_write; //monitor write transaction 1-enable, 0-disable
- unsigned int start; //start address to monitor
- unsigned int end; //end address to monitor
+typedef struct {
+ int larb; /* 0~4: the larb you want to monitor */
+ int entry; /* 0~2: the mau entry to use */
+ unsigned int port_msk; /* port mask to be monitored */
+ int virt; /* 1: monitor va (this port is using m4u); */
+ /* 0: monitor pa (this port is not using m4u) */
+ int monitor_read; /* monitor read transaction 1-enable, 0-disable */
+ int monitor_write; /* monitor write transaction 1-enable, 0-disable */
+ unsigned int start; /* start address to monitor */
+ unsigned int end; /* end address to monitor */
} MTK_MAU_CONFIG;
-int mau_config(MTK_MAU_CONFIG* pMauConf);
+int mau_config(MTK_MAU_CONFIG *pMauConf);
+int mau_dump_status(int larb);
-//---------------------------------------------------------------------------
-typedef enum
-{
- SMI_BWC_SCEN_NORMAL,
- SMI_BWC_SCEN_VR,
- SMI_BWC_SCEN_SWDEC_VP,
- SMI_BWC_SCEN_VP,
- SMI_BWC_SCEN_VR_SLOW,
- SMI_BWC_SCEN_MM_GPU,
- SMI_BWC_SCEN_WFD,
- SMI_BWC_SCEN_VENC,
- SMI_BWC_SCEN_ICFP,
- SMI_BWC_SCEN_VSS,
- SMI_BWC_SCEN_FORCE_MMDVFS,
- SMI_BWC_SCEN_CNT
+/* --------------------------------------------------------------------------- */
+typedef enum {
+ SMI_BWC_SCEN_NORMAL,
+ SMI_BWC_SCEN_VR,
+ SMI_BWC_SCEN_SWDEC_VP,
+ SMI_BWC_SCEN_VP,
+ SMI_BWC_SCEN_VR_SLOW,
+ SMI_BWC_SCEN_MM_GPU,
+ SMI_BWC_SCEN_WFD,
+ SMI_BWC_SCEN_VENC,
+ SMI_BWC_SCEN_ICFP,
+ SMI_BWC_SCEN_UI_IDLE,
+ SMI_BWC_SCEN_VSS,
+ SMI_BWC_SCEN_FORCE_MMDVFS,
+ SMI_BWC_SCEN_HDMI,
+ SMI_BWC_SCEN_HDMI4K,
+ SMI_BWC_SCEN_CNT
} MTK_SMI_BWC_SCEN;
/* MMDVFS */
-typedef enum
-{
+typedef enum {
MMDVFS_VOLTAGE_DEFAULT,
MMDVFS_VOLTAGE_0 = MMDVFS_VOLTAGE_DEFAULT,
MMDVFS_VOLTAGE_LOW = MMDVFS_VOLTAGE_0,
MMDVFS_VOLTAGE_1,
MMDVFS_VOLTAGE_HIGH = MMDVFS_VOLTAGE_1,
+ MMDVFS_VOLTAGE_DEFAULT_STEP,
MMDVFS_VOLTAGE_COUNT
} mmdvfs_voltage_enum;
-typedef struct
-{
- int scenario;
- int b_on_off; //0 : exit this scenario , 1 : enter this scenario
+typedef struct {
+ int scenario;
+ int b_on_off; /* 0 : exit this scenario , 1 : enter this scenario */
} MTK_SMI_BWC_CONFIG;
-typedef struct
-{
- unsigned int address;
- unsigned int value;
+typedef struct {
+ unsigned int *hwc_max_pixel; /* : exit this scenario , 1 : enter this scenario */
+} MTK_SMI_BWC_STATE;
+
+typedef struct {
+ unsigned int address;
+ unsigned int value;
} MTK_SMI_BWC_REGISTER_SET;
-typedef struct
-{
- unsigned int address;
- unsigned int* return_address; //0 : exit this scenario , 1 : enter this scenario
+typedef struct {
+ unsigned int address;
+ unsigned int *return_address;
} MTK_SMI_BWC_REGISTER_GET;
#define MMDVFS_CAMERA_MODE_FLAG_DEFAULT 1
-#define MMDVFS_CAMERA_MODE_FLAG_PIP (1 << 1)
-#define MMDVFS_CAMERA_MODE_FLAG_VFB (1 << 2)
+#define MMDVFS_CAMERA_MODE_FLAG_PIP (1 << 1)
+#define MMDVFS_CAMERA_MODE_FLAG_VFB (1 << 2)
#define MMDVFS_CAMERA_MODE_FLAG_EIS_2_0 (1 << 3)
-#define MMDVFS_CAMERA_MODE_FLAG_IVHDR (1 << 4)
+#define MMDVFS_CAMERA_MODE_FLAG_IVHDR (1 << 4)
+#define MMDVFS_CAMERA_MODE_FLAG_STEREO (1 << 5)
-typedef struct
-{
+typedef struct {
unsigned int type;
MTK_SMI_BWC_SCEN scen;
@@ -96,42 +100,39 @@ typedef struct
#define MTK_MMDVFS_CMD_TYPE_SET 0
#define MTK_MMDVFS_CMD_TYPE_QUERY 1
+#define MTK_MMDVFS_CMD_TYPE_MMSYS_SET 2
-// GMP start
-typedef enum
-{
- SMI_BWC_INFO_CON_PROFILE = 0,
- SMI_BWC_INFO_SENSOR_SIZE,
- SMI_BWC_INFO_VIDEO_RECORD_SIZE,
- SMI_BWC_INFO_DISP_SIZE,
- SMI_BWC_INFO_TV_OUT_SIZE,
- SMI_BWC_INFO_FPS,
- SMI_BWC_INFO_VIDEO_ENCODE_CODEC,
- SMI_BWC_INFO_VIDEO_DECODE_CODEC,
- SMI_BWC_INFO_HW_OVL_LIMIT,
- SMI_BWC_INFO_CNT
+typedef enum {
+ SMI_BWC_INFO_CON_PROFILE = 0,
+ SMI_BWC_INFO_SENSOR_SIZE,
+ SMI_BWC_INFO_VIDEO_RECORD_SIZE,
+ SMI_BWC_INFO_DISP_SIZE,
+ SMI_BWC_INFO_TV_OUT_SIZE,
+ SMI_BWC_INFO_FPS,
+ SMI_BWC_INFO_VIDEO_ENCODE_CODEC,
+ SMI_BWC_INFO_VIDEO_DECODE_CODEC,
+ SMI_BWC_INFO_HW_OVL_LIMIT,
+ SMI_BWC_INFO_CNT
} MTK_SMI_BWC_INFO_ID;
-typedef struct
-{
- int property;
- int value1;
- int value2;
+typedef struct {
+ int property;
+ int value1;
+ int value2;
} MTK_SMI_BWC_INFO_SET;
-typedef struct
-{
- unsigned int flag; // Reserved
- int concurrent_profile;
- int sensor_size[2];
- int video_record_size[2];
- int display_size[2];
- int tv_out_size[2];
- int fps;
- int video_encode_codec;
- int video_decode_codec;
- int hw_ovl_limit;
+typedef struct {
+ unsigned int flag; /* Reserved */
+ int concurrent_profile;
+ int sensor_size[2];
+ int video_record_size[2];
+ int display_size[2];
+ int tv_out_size[2];
+ int fps;
+ int video_encode_codec;
+ int video_decode_codec;
+ int hw_ovl_limit;
} MTK_SMI_BWC_MM_INFO;
@@ -140,15 +141,16 @@ typedef struct
#define MTK_IOC_SPC_DUMP_STA MTK_IOW(22, unsigned long)
#define MTK_IOC_SPC_CMD MTK_IOW(23, unsigned long)
#define MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, MTK_SMI_BWC_CONFIG)
+#define MTK_IOC_SMI_BWC_STATE MTK_IOWR(25, MTK_SMI_BWC_STATE)
#define MTK_IOC_SMI_BWC_REGISTER_SET MTK_IOWR(26, MTK_SMI_BWC_REGISTER_SET)
#define MTK_IOC_SMI_BWC_REGISTER_GET MTK_IOWR(27, MTK_SMI_BWC_REGISTER_GET)
-// For BWC.MM property setting
+/* For BWC.MM property setting */
#define MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, MTK_SMI_BWC_INFO_SET)
-// For BWC.MM property get
+/* For BWC.MM property get */
#define MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, MTK_SMI_BWC_MM_INFO)
-// GMP end
+/* GMP end */
#define MTK_IOC_SMI_DUMP_LARB MTK_IOWR(66, unsigned int)
#define MTK_IOC_SMI_DUMP_COMMON MTK_IOWR(67, unsigned int)
@@ -156,34 +158,65 @@ typedef struct
typedef enum {
- SPC_PROT_NO_PROT = 0,
- SPC_PROT_SEC_RW_ONLY,
- SPC_PROT_SEC_RW_NONSEC_R,
- SPC_PROT_NO_ACCESS,
+ SPC_PROT_NO_PROT = 0,
+ SPC_PROT_SEC_RW_ONLY,
+ SPC_PROT_SEC_RW_NONSEC_R,
+ SPC_PROT_NO_ACCESS,
-}SPC_PROT_T;
+} SPC_PROT_T;
-typedef struct
-{
- SPC_PROT_T domain_0_prot;
- SPC_PROT_T domain_1_prot;
- SPC_PROT_T domain_2_prot;
- SPC_PROT_T domain_3_prot;
- unsigned int start; //start address to monitor
- unsigned int end; //end address to monitor
+typedef struct {
+ SPC_PROT_T domain_0_prot;
+ SPC_PROT_T domain_1_prot;
+ SPC_PROT_T domain_2_prot;
+ SPC_PROT_T domain_3_prot;
+ unsigned int start; /* start address to monitor */
+ unsigned int end; /* end address to monitor */
} MTK_SPC_CONFIG;
-void spc_config(MTK_SPC_CONFIG* pCfg);
+void spc_config(MTK_SPC_CONFIG *pCfg);
unsigned int spc_status_check(void);
unsigned int spc_dump_reg(void);
-unsigned int spc_register_isr(void* dev);
+unsigned int spc_register_isr(void *dev);
unsigned int spc_clear_irq(void);
int spc_test(int code);
-int MTK_SPC_Init(void* dev);
+int MTK_SPC_Init(void *dev);
+#define MMDVFS_ENABLE_DEFAULT_STEP_QUERY
+#define MMDVFS_MMCLOCK_NOTIFICATION
/* MMDVFS kernel API */
extern int mmdvfs_set_step(MTK_SMI_BWC_SCEN scenario, mmdvfs_voltage_enum step);
-
+extern int mmdvfs_is_default_step_need_perf(void);
+extern void mmdvfs_mm_clock_switch_notify(int is_before, int is_to_high);
+
+#ifdef CONFIG_MTK_SMI_VARIANT
+/* Enable the power-domain and the clocks of the larb.
+ *
+ * larb: larb id
+ * pm: if true, this function will help enable larb's power-domain.
+ * if false, please make sure the larb's power-domain has been enabled.
+ * some h/w may reset if the sequence is not good while
+ * smi-larb enable power-domain.
+ * please call them in non-atmoic context.
+ * Return : 0 is successful, Others is failed.
+ */
+int mtk_smi_larb_clock_on(int larb, bool pm);
+void mtk_smi_larb_clock_off(int larb, bool pm);
+
+/* Return 0 is failed */
+unsigned long mtk_smi_larb_get_base(int larbid);
+#else
+
+static inline int mtk_smi_larb_clock_on(int larb, bool pm)
+{
+ return 0;
+}
+static inline void mtk_smi_larb_clock_off(int larb, bool pm) {}
+static inline unsigned long mtk_smi_larb_get_base(int larbid)
+{
+ return 0;
+}
#endif
+#endif
diff --git a/drivers/misc/mediatek/smi/Kconfig b/drivers/misc/mediatek/smi/Kconfig
index 56d1e4f7c..7e8d39fc1 100644
--- a/drivers/misc/mediatek/smi/Kconfig
+++ b/drivers/misc/mediatek/smi/Kconfig
@@ -1,5 +1,11 @@
-config MTK_SMI
- bool CONFIG_MTK_SMI
- default n
- help
- CONFIG_MTK_SMI
+config MTK_SMI_EXT
+ bool "SMI Driver"
+ default n
+ help
+ SMI Driver is used to arbitrate memory bandwidth of multimedia
+
+config MTK_SMI_VARIANT
+ bool "MTK SMI Tablet Code"
+ select MTK_SMI_EXT
+ help
+ It's for smi common tablet framework. And it will enable power-domain.
diff --git a/drivers/misc/mediatek/smi/Makefile b/drivers/misc/mediatek/smi/Makefile
index 804347f3e..2ff41abc7 100644
--- a/drivers/misc/mediatek/smi/Makefile
+++ b/drivers/misc/mediatek/smi/Makefile
@@ -1,3 +1,44 @@
+ifneq ($(CONFIG_MTK_SMI_VARIANT),y)
-obj-y += $(subst ",,$(CONFIG_MTK_PLATFORM))/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/base/power/$(MTK_PLATFORM)/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/$(MTK_PLATFORM)/
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/$(MTK_PLATFORM)/mt6753/
+obj-y += smi_common.o
+obj-y += smi_debug.o
+obj-y += smi_info_util.o
+obj-y += smi_config_util.o
+obj-y += smi_configuration.o
+obj-y += smi_internal.o
+ifeq ($(CONFIG_ARCH_MT6735),y)
+ccflags-y += -I$(srctree)/drivers/clk/mediatek
+ccflags-y += -DSMI_D1
+obj-y += mmdvfs_mgr.o
+endif
+
+ifeq ($(CONFIG_ARCH_MT6735M),y)
+ccflags-y += -DSMI_D2
+obj-y += mmdvfs_mgr.o
+endif
+
+ifeq ($(CONFIG_ARCH_MT6753),y)
+ccflags-y += -DSMI_D3
+obj-y += mmdvfs_mgr.o
+endif
+
+ifeq ($(CONFIG_ARCH_MT6580),y)
+ccflags-y += -DSMI_R
+endif
+
+ifeq ($(CONFIG_ARCH_MT6755),y)
+ccflags-y += -DSMI_J
+ccflags-y += -I$(srctree)/drivers/clk/mediatek
+obj-y += mmdvfs_mgr_v2.o
+endif
+
+else
+obj-y += $(subst ",,variant)/
+endif
diff --git a/drivers/misc/mediatek/smi/mmdvfs_mgr.c b/drivers/misc/mediatek/smi/mmdvfs_mgr.c
new file mode 100644
index 000000000..b428c1ba9
--- /dev/null
+++ b/drivers/misc/mediatek/smi/mmdvfs_mgr.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if ((defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)) && !IS_ENABLED(CONFIG_FPGA_EARLY_PORTING))
+#define MMDVFS_ENABLE 1
+#endif
+
+#include <linux/uaccess.h>
+#include <linux/aee.h>
+
+#include <mach/mt_smi.h>
+
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+#include <mach/mt_freqhopping.h>
+#include <mach/mt_clkmgr.h>
+#include <mach/mt_vcore_dvfs.h>
+#include <mach/mt_freqhopping_drv.h>
+
+
+#include "mmdvfs_mgr.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "[" MMDVFS_LOG_TAG "]" fmt
+
+#define MMDVFS_ENABLE_FLIPER_CONTROL 0
+/* #define MMDVFS_USE_APMCU_CLK_MUX_SWITCH */
+
+#if MMDVFS_ENABLE_FLIPER_CONTROL
+#include <mach/fliper.h>
+#endif
+
+/* mmdvfs MM sizes */
+#define MMDVFS_PIXEL_NUM_720P (1280 * 720)
+#define MMDVFS_PIXEL_NUM_2160P (3840 * 2160)
+#define MMDVFS_PIXEL_NUM_1080P (2100 * 1300)
+#define MMDVFS_PIXEL_NUM_2M (2100 * 1300)
+/* 13M sensor */
+#define MMDVFS_PIXEL_NUM_SENSOR_FULL (13000000)
+#define MMDVFS_PIXEL_NUM_SENSOR_6M (5800000)
+#define MMDVFS_PIXEL_NUM_SENSOR_8M (7800000)
+
+/* mmdvfs display sizes */
+#define MMDVFS_DISPLAY_SIZE_HD (1280 * 832)
+#define MMDVFS_DISPLAY_SIZE_FHD (1920 * 1216)
+
+/* + 1 for MMDVFS_CAM_MON_SCEN */
+static mmdvfs_voltage_enum g_mmdvfs_scenario_voltage[MMDVFS_SCEN_COUNT] = {
+MMDVFS_VOLTAGE_DEFAULT};
+static mmdvfs_voltage_enum g_mmdvfs_current_step;
+static unsigned int g_mmdvfs_concurrency;
+static MTK_SMI_BWC_MM_INFO *g_mmdvfs_info;
+static int g_mmdvfs_profile_id = MMDVFS_PROFILE_UNKNOWN;
+static MTK_MMDVFS_CMD g_mmdvfs_cmd;
+
+struct mmdvfs_context_struct {
+ spinlock_t scen_lock;
+ int is_mhl_enable;
+ int is_mjc_enable;
+};
+
+/* mmdvfs_query() return value, remember to sync with user space */
+enum mmdvfs_step_enum {
+ MMDVFS_STEP_LOW = 0, MMDVFS_STEP_HIGH,
+
+ MMDVFS_STEP_LOW2LOW, /* LOW */
+ MMDVFS_STEP_HIGH2LOW, /* LOW */
+ MMDVFS_STEP_LOW2HIGH, /* HIGH */
+ MMDVFS_STEP_HIGH2HIGH,
+/* HIGH */
+};
+
+/* lcd size */
+enum mmdvfs_lcd_size_enum {
+ MMDVFS_LCD_SIZE_HD, MMDVFS_LCD_SIZE_FHD, MMDVFS_LCD_SIZE_WQHD, MMDVFS_LCD_SIZE_END_OF_ENUM
+};
+
+static struct mmdvfs_context_struct g_mmdvfs_mgr_cntx;
+static struct mmdvfs_context_struct * const g_mmdvfs_mgr = &g_mmdvfs_mgr_cntx;
+
+static enum mmdvfs_lcd_size_enum mmdvfs_get_lcd_resolution(void)
+{
+ if (DISP_GetScreenWidth() * DISP_GetScreenHeight()
+ <= MMDVFS_DISPLAY_SIZE_HD)
+ return MMDVFS_LCD_SIZE_HD;
+
+ return MMDVFS_LCD_SIZE_FHD;
+}
+
+static int vdec_ctrl_func_checked(vdec_ctrl_cb func, char *msg);
+static int notify_cb_func_checked(clk_switch_cb func, int ori_mmsys_clk_mode, int update_mmsys_clk_mode, char *msg);
+static int mmdfvs_adjust_mmsys_clk_by_hopping(int clk_mode);
+static int default_clk_switch_cb(int ori_mmsys_clk_mode, int update_mmsys_clk_mode);
+static int current_mmsys_clk = MMSYS_CLK_LOW;
+
+static mmdvfs_voltage_enum mmdvfs_get_default_step(void)
+{
+ mmdvfs_voltage_enum result = MMDVFS_VOLTAGE_LOW;
+
+ if (g_mmdvfs_profile_id == MMDVFS_PROFILE_D3)
+ result = MMDVFS_VOLTAGE_LOW;
+ else if (g_mmdvfs_profile_id == MMDVFS_PROFILE_D1_PLUS)
+ result = MMDVFS_VOLTAGE_LOW;
+ else
+ if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_HD)
+ result = MMDVFS_VOLTAGE_LOW;
+ else
+ /* D1 FHD always HPM. do not have to trigger vcore dvfs. */
+ result = MMDVFS_VOLTAGE_HIGH;
+
+ return result;
+}
+
+static mmdvfs_voltage_enum mmdvfs_get_current_step(void)
+{
+ return g_mmdvfs_current_step;
+}
+
+static mmdvfs_voltage_enum mmdvfs_query(MTK_SMI_BWC_SCEN scenario,
+MTK_MMDVFS_CMD *cmd)
+{
+ mmdvfs_voltage_enum step = mmdvfs_get_default_step();
+ unsigned int venc_size;
+ MTK_MMDVFS_CMD cmd_default;
+
+ venc_size = g_mmdvfs_info->video_record_size[0]
+ * g_mmdvfs_info->video_record_size[1];
+
+ /* use default info */
+ if (cmd == NULL) {
+ memset(&cmd_default, 0, sizeof(MTK_MMDVFS_CMD));
+ cmd_default.camera_mode = MMDVFS_CAMERA_MODE_FLAG_DEFAULT;
+ cmd = &cmd_default;
+ }
+
+ /* collect the final information */
+ if (cmd->sensor_size == 0)
+ cmd->sensor_size = g_mmdvfs_cmd.sensor_size;
+
+ if (cmd->sensor_fps == 0)
+ cmd->sensor_fps = g_mmdvfs_cmd.sensor_fps;
+
+ if (cmd->camera_mode == MMDVFS_CAMERA_MODE_FLAG_DEFAULT)
+ cmd->camera_mode = g_mmdvfs_cmd.camera_mode;
+
+ /* HIGH level scenarios */
+ switch (scenario) {
+#if defined(SMI_D2) /* D2 ISP >= 6M HIGH */
+ case SMI_BWC_SCEN_VR_SLOW:
+ case SMI_BWC_SCEN_VR:
+ if (cmd->sensor_size >= MMDVFS_PIXEL_NUM_SENSOR_6M)
+ step = MMDVFS_VOLTAGE_HIGH;
+
+ break;
+#endif
+
+ case SMI_BWC_SCEN_ICFP:
+ step = MMDVFS_VOLTAGE_HIGH;
+ break;
+ /* force HPM for engineering mode */
+ case SMI_BWC_SCEN_FORCE_MMDVFS:
+ step = MMDVFS_VOLTAGE_HIGH;
+ break;
+ default:
+ break;
+ }
+
+ return step;
+}
+
+static void mmdvfs_update_cmd(MTK_MMDVFS_CMD *cmd)
+{
+ if (cmd == NULL)
+ return;
+
+ if (cmd->sensor_size)
+ g_mmdvfs_cmd.sensor_size = cmd->sensor_size;
+
+ if (cmd->sensor_fps)
+ g_mmdvfs_cmd.sensor_fps = cmd->sensor_fps;
+
+ MMDVFSMSG("update cm %d %d\n", cmd->camera_mode, cmd->sensor_size);
+ g_mmdvfs_cmd.camera_mode = cmd->camera_mode;
+}
+
+static void mmdvfs_dump_info(void)
+{
+ MMDVFSMSG("CMD %d %d %d\n", g_mmdvfs_cmd.sensor_size,
+ g_mmdvfs_cmd.sensor_fps, g_mmdvfs_cmd.camera_mode);
+ MMDVFSMSG("INFO VR %d %d\n", g_mmdvfs_info->video_record_size[0],
+ g_mmdvfs_info->video_record_size[1]);
+}
+
+/* delay 4 seconds to go LPM to workaround camera ZSD + PIP issue */
+#if !defined(SMI_D3)
+static void mmdvfs_cam_work_handler(struct work_struct *work)
+{
+ MMDVFSMSG("CAM handler %d\n", jiffies_to_msecs(jiffies));
+ mmdvfs_set_step(MMDVFS_CAM_MON_SCEN, mmdvfs_get_default_step());
+}
+
+static DECLARE_DELAYED_WORK(g_mmdvfs_cam_work, mmdvfs_cam_work_handler);
+
+static void mmdvfs_stop_cam_monitor(void)
+{
+ cancel_delayed_work_sync(&g_mmdvfs_cam_work);
+}
+
+#define MMDVFS_CAM_MON_DELAY (4 * HZ)
+static void mmdvfs_start_cam_monitor(void)
+{
+ mmdvfs_stop_cam_monitor();
+ MMDVFSMSG("CAM start %d\n", jiffies_to_msecs(jiffies));
+ mmdvfs_set_step(MMDVFS_CAM_MON_SCEN, MMDVFS_VOLTAGE_HIGH);
+ /* 4 seconds for PIP switch preview aspect delays... */
+ schedule_delayed_work(&g_mmdvfs_cam_work, MMDVFS_CAM_MON_DELAY);
+}
+
+#endif /* !defined(SMI_D3) */
+
+int mmdvfs_set_step(MTK_SMI_BWC_SCEN scenario, mmdvfs_voltage_enum step)
+{
+ int i, scen_index;
+ unsigned int concurrency = 0;
+ mmdvfs_voltage_enum final_step = mmdvfs_get_default_step();
+
+#if !MMDVFS_ENABLE
+ return 0;
+#endif
+
+ if (!is_vcorefs_can_work())
+ return 0;
+
+ /* D1 FHD always HPM. do not have to trigger vcore dvfs. */
+ if (g_mmdvfs_profile_id == MMDVFS_PROFILE_D1
+ && mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD)
+ return 0;
+
+ /* D1 plus FHD only allowed DISP as the client */
+ if (g_mmdvfs_profile_id == MMDVFS_PROFILE_D1_PLUS)
+ if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD
+ && scenario != (MTK_SMI_BWC_SCEN) MMDVFS_SCEN_DISP)
+ return 0;
+
+
+ if ((scenario >= (MTK_SMI_BWC_SCEN) MMDVFS_SCEN_COUNT) || (scenario < SMI_BWC_SCEN_NORMAL)) {
+ MMDVFSERR("invalid scenario\n");
+ return -1;
+ }
+
+ /* dump information */
+ mmdvfs_dump_info();
+
+ /* go through all scenarios to decide the final step */
+ scen_index = (int)scenario;
+
+ spin_lock(&g_mmdvfs_mgr->scen_lock);
+
+ g_mmdvfs_scenario_voltage[scen_index] = step;
+
+ concurrency = 0;
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
+ if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH)
+ concurrency |= 1 << i;
+ }
+
+ /* one high = final high */
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
+ if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH) {
+ final_step = MMDVFS_VOLTAGE_HIGH;
+ break;
+ }
+ }
+
+ g_mmdvfs_current_step = final_step;
+
+ spin_unlock(&g_mmdvfs_mgr->scen_lock);
+
+ MMDVFSMSG("Set vol scen:%d,step:%d,final:%d(0x%x),CMD(%d,%d,0x%x),INFO(%d,%d)\n",
+ scenario, step, final_step, concurrency,
+ g_mmdvfs_cmd.sensor_size, g_mmdvfs_cmd.sensor_fps, g_mmdvfs_cmd.camera_mode,
+ g_mmdvfs_info->video_record_size[0], g_mmdvfs_info->video_record_size[1]);
+
+#if MMDVFS_ENABLE
+ /* call vcore dvfs API */
+ if (final_step == MMDVFS_VOLTAGE_HIGH)
+ vcorefs_request_dvfs_opp(KIR_MM, OPPI_PERF);
+ else
+ vcorefs_request_dvfs_opp(KIR_MM, OPPI_UNREQ);
+
+#endif
+
+ return 0;
+}
+
+void mmdvfs_handle_cmd(MTK_MMDVFS_CMD *cmd)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ MMDVFSMSG("MMDVFS cmd %u %d\n", cmd->type, cmd->scen);
+
+ switch (cmd->type) {
+ case MTK_MMDVFS_CMD_TYPE_MMSYS_SET:
+ if (cmd->scen == SMI_BWC_SCEN_NORMAL) {
+ mmdvfs_set_mmsys_clk(cmd->scen, MMSYS_CLK_LOW);
+ vcorefs_request_dvfs_opp(KIR_MM, OPPI_UNREQ);
+ } else {
+ vcorefs_request_dvfs_opp(KIR_MM, OPPI_PERF);
+ mmdvfs_set_mmsys_clk(cmd->scen, MMSYS_CLK_HIGH);
+ }
+ break;
+ case MTK_MMDVFS_CMD_TYPE_SET:
+ /* save cmd */
+ mmdvfs_update_cmd(cmd);
+ if (!(g_mmdvfs_concurrency & (1 << cmd->scen)))
+ MMDVFSMSG("invalid set scen %d\n", cmd->scen);
+ cmd->ret = mmdvfs_set_step(cmd->scen,
+ mmdvfs_query(cmd->scen, cmd));
+ break;
+ case MTK_MMDVFS_CMD_TYPE_QUERY: { /* query with some parameters */
+ if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD) {
+ /* QUERY ALWAYS HIGH for FHD */
+ cmd->ret = (unsigned int)MMDVFS_STEP_HIGH2HIGH;
+
+ } else { /* FHD */
+ mmdvfs_voltage_enum query_voltage = mmdvfs_query(cmd->scen, cmd);
+
+ mmdvfs_voltage_enum current_voltage = mmdvfs_get_current_step();
+
+ if (current_voltage < query_voltage) {
+ cmd->ret = (unsigned int)MMDVFS_STEP_LOW2HIGH;
+ } else if (current_voltage > query_voltage) {
+ cmd->ret = (unsigned int)MMDVFS_STEP_HIGH2LOW;
+ } else {
+ cmd->ret
+ = (unsigned int)(query_voltage
+ == MMDVFS_VOLTAGE_HIGH
+ ? MMDVFS_STEP_HIGH2HIGH
+ : MMDVFS_STEP_LOW2LOW);
+ }
+ }
+
+ MMDVFSMSG("query %d\n", cmd->ret);
+ /* cmd->ret = (unsigned int)query_voltage; */
+ break;
+ }
+
+ default:
+ MMDVFSMSG("invalid mmdvfs cmd\n");
+ BUG();
+ break;
+ }
+}
+
+void mmdvfs_notify_scenario_exit(MTK_SMI_BWC_SCEN scen)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ MMDVFSMSG("leave %d\n", scen);
+
+#if !defined(SMI_D3) /* d3 does not need this workaround because the MMCLK is always the highest */
+ /*
+ * keep HPM for 4 seconds after exiting camera scenarios to get rid of
+ * cam framework will let us go to normal scenario for a short time
+ * (ex: STOP PREVIEW --> NORMAL --> START PREVIEW)
+ * where the LPM mode (low MMCLK) may cause ISP failures
+ */
+ if ((scen == SMI_BWC_SCEN_VR) || (scen == SMI_BWC_SCEN_VR_SLOW)
+ || (scen == SMI_BWC_SCEN_ICFP)) {
+ mmdvfs_start_cam_monitor();
+ }
+#endif /* !defined(SMI_D3) */
+
+ /* reset scenario voltage to default when it exits */
+ mmdvfs_set_step(scen, mmdvfs_get_default_step());
+}
+
+void mmdvfs_notify_scenario_enter(MTK_SMI_BWC_SCEN scen)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ MMDVFSMSG("enter %d\n", scen);
+
+ /* ISP ON = high */
+ switch (scen) {
+#if defined(SMI_D2) /* d2 sensor > 6M */
+ case SMI_BWC_SCEN_VR:
+ mmdvfs_set_step(scen, mmdvfs_query(scen, NULL));
+ break;
+#else /* default VR high */
+ case SMI_BWC_SCEN_VR:
+#endif
+ case SMI_BWC_SCEN_WFD:
+ case SMI_BWC_SCEN_VR_SLOW:
+ case SMI_BWC_SCEN_VSS:
+ /* Fall through */
+ case SMI_BWC_SCEN_ICFP:
+ /* Fall through */
+ case SMI_BWC_SCEN_FORCE_MMDVFS:
+ mmdvfs_set_step(scen, MMDVFS_VOLTAGE_HIGH);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void mmdvfs_init(MTK_SMI_BWC_MM_INFO *info)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ spin_lock_init(&g_mmdvfs_mgr->scen_lock);
+
+ /* set current step as the default step */
+ g_mmdvfs_profile_id = mmdvfs_get_mmdvfs_profile();
+
+ g_mmdvfs_current_step = mmdvfs_get_default_step();
+
+ g_mmdvfs_info = info;
+}
+
+void mmdvfs_mhl_enable(int enable)
+{
+ g_mmdvfs_mgr->is_mhl_enable = enable;
+}
+
+void mmdvfs_mjc_enable(int enable)
+{
+ g_mmdvfs_mgr->is_mjc_enable = enable;
+}
+
+void mmdvfs_notify_scenario_concurrency(unsigned int u4Concurrency)
+{
+ /* raise EMI monitor BW threshold in VP, VR, VR SLOW motion cases */
+ /* to make sure vcore stay MMDVFS level as long as possible */
+ if (u4Concurrency & ((1 << SMI_BWC_SCEN_VP) | (1 << SMI_BWC_SCEN_VR)
+ | (1 << SMI_BWC_SCEN_VR_SLOW))) {
+#if MMDVFS_ENABLE_FLIPER_CONTROL
+ MMDVFSMSG("fliper high\n");
+ fliper_set_bw(BW_THRESHOLD_HIGH);
+#endif
+ } else {
+#if MMDVFS_ENABLE_FLIPER_CONTROL
+ MMDVFSMSG("fliper normal\n");
+ fliper_restore_bw();
+#endif
+ }
+ g_mmdvfs_concurrency = u4Concurrency;
+}
+
+/* switch MM CLK callback from VCORE DVFS driver */
+void mmdvfs_mm_clock_switch_notify(int is_before, int is_to_high)
+{
+ /* for WQHD 1.0v, we have to dynamically switch DL/DC */
+#ifdef MMDVFS_WQHD_1_0V
+ int session_id;
+
+ if (mmdvfs_get_lcd_resolution() != MMDVFS_LCD_SIZE_WQHD)
+ return;
+
+ session_id = MAKE_DISP_SESSION(DISP_SESSION_PRIMARY, 0);
+
+ if (!is_before && is_to_high) {
+ MMDVFSMSG("DL\n");
+ /* nonblocking switch to direct link after HPM */
+ primary_display_switch_mode_for_mmdvfs(DISP_SESSION_DIRECT_LINK_MODE, session_id,
+ 0);
+ } else if (is_before && !is_to_high) {
+ /* BLOCKING switch to decouple before switching to LPM */
+ MMDVFSMSG("DC\n");
+ primary_display_switch_mode_for_mmdvfs(DISP_SESSION_DECOUPLE_MODE, session_id, 1);
+ }
+#endif /* MMDVFS_WQHD_1_0V */
+}
+
+
+int mmdvfs_get_mmdvfs_profile(void)
+{
+
+ int mmdvfs_profile_id = MMDVFS_PROFILE_UNKNOWN;
+ unsigned int segment_code = 0;
+
+ segment_code = _GET_BITS_VAL_(31 : 25, get_devinfo_with_index(47));
+
+#if defined(SMI_D1)
+ mmdvfs_profile_id = MMDVFS_PROFILE_D1;
+ if (segment_code == 0x41 || segment_code == 0x42 ||
+ segment_code == 0x43 || segment_code == 0x49 ||
+ segment_code == 0x51)
+ mmdvfs_profile_id = MMDVFS_PROFILE_D1_PLUS;
+ else
+ mmdvfs_profile_id = MMDVFS_PROFILE_D1;
+#elif defined(SMI_D2)
+ mmdvfs_profile_id = MMDVFS_PROFILE_D2;
+ if (segment_code == 0x4A || segment_code == 0x4B)
+ mmdvfs_profile_id = MMDVFS_PROFILE_D2_M_PLUS;
+ else if (segment_code == 0x52 || segment_code == 0x53)
+ mmdvfs_profile_id = MMDVFS_PROFILE_D2_P_PLUS;
+ else
+ mmdvfs_profile_id = MMDVFS_PROFILE_D2;
+#elif defined(SMI_D3)
+ mmdvfs_profile_id = MMDVFS_PROFILE_D3;
+#elif defined(SMI_J)
+ mmdvfs_profile_id = MMDVFS_PROFILE_J1;
+#elif defined(SMI_EV)
+ mmdvfs_profile_id = MMDVFS_PROFILE_E1;
+#endif
+
+ return mmdvfs_profile_id;
+
+}
+
+int is_mmdvfs_supported(void)
+{
+ int mmdvfs_profile_id = mmdvfs_get_mmdvfs_profile();
+
+ if (mmdvfs_profile_id == MMDVFS_PROFILE_D1 && mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD)
+ return 0;
+ else if (mmdvfs_profile_id == MMDVFS_PROFILE_UNKNOWN)
+ return 0;
+ else
+ return 1;
+}
+
+static clk_switch_cb notify_cb_func = default_clk_switch_cb;
+static clk_switch_cb notify_cb_func_nolock;
+static vdec_ctrl_cb vdec_suspend_cb_func;
+static vdec_ctrl_cb vdec_resume_cb_func;
+
+int register_mmclk_switch_vdec_ctrl_cb(vdec_ctrl_cb vdec_suspend_cb,
+vdec_ctrl_cb vdec_resume_cb)
+{
+ vdec_suspend_cb_func = vdec_suspend_cb;
+ vdec_resume_cb_func = vdec_resume_cb;
+
+ return 1;
+}
+
+int register_mmclk_switch_cb(clk_switch_cb notify_cb,
+clk_switch_cb notify_cb_nolock)
+{
+ notify_cb_func = notify_cb;
+ notify_cb_func_nolock = notify_cb_nolock;
+
+ return 1;
+}
+
+
+
+/* This desing is only for CLK Mux switch relate flows */
+int mmdvfs_notify_mmclk_switch_request(int event)
+{
+ /* This API should only be used in J1 MMDVFS profile */
+ return 0;
+}
+
+
+
+static int mmdfvs_adjust_mmsys_clk_by_hopping(int clk_mode)
+{
+ int result = 1;
+
+ if (g_mmdvfs_profile_id != MMDVFS_PROFILE_D2_M_PLUS &&
+ g_mmdvfs_profile_id != MMDVFS_PROFILE_D2_P_PLUS) {
+ result = 0;
+ return result;
+ }
+
+ if (!is_vcorefs_can_work()) {
+ result = 0;
+ return result;
+ }
+
+ if (clk_mode == MMSYS_CLK_HIGH) {
+ if (current_mmsys_clk == MMSYS_CLK_MEDIUM)
+ mt_dfs_vencpll(0xE0000);
+
+ vdec_ctrl_func_checked(vdec_suspend_cb_func, "VDEC suspend");
+ freqhopping_config(FH_VENC_PLLID , 0, false);
+ notify_cb_func_checked(notify_cb_func, MMSYS_CLK_LOW, MMSYS_CLK_HIGH,
+ "notify_cb_func");
+ freqhopping_config(FH_VENC_PLLID , 0, true);
+ vdec_ctrl_func_checked(vdec_resume_cb_func, "VDEC resume");
+
+ current_mmsys_clk = MMSYS_CLK_HIGH;
+
+ } else if (clk_mode == MMSYS_CLK_MEDIUM) {
+ if (current_mmsys_clk == MMSYS_CLK_HIGH) {
+ vdec_ctrl_func_checked(vdec_suspend_cb_func, "VDEC suspend");
+ freqhopping_config(FH_VENC_PLLID , 0, false);
+ notify_cb_func_checked(notify_cb_func, MMSYS_CLK_HIGH, MMSYS_CLK_LOW, "notify_cb_func");
+ freqhopping_config(FH_VENC_PLLID , 0, true);
+ vdec_ctrl_func_checked(vdec_resume_cb_func, "VDEC resume");
+ }
+ mt_dfs_vencpll(0x1713B1);
+ notify_cb_func_checked(notify_cb_func, current_mmsys_clk, MMSYS_CLK_MEDIUM,
+ "notify_cb_func");
+ current_mmsys_clk = MMSYS_CLK_MEDIUM;
+ } else if (clk_mode == MMSYS_CLK_LOW) {
+ if (current_mmsys_clk == MMSYS_CLK_HIGH) {
+ vdec_ctrl_func_checked(vdec_suspend_cb_func, "VDEC suspend");
+ freqhopping_config(FH_VENC_PLLID , 0, false);
+ notify_cb_func_checked(notify_cb_func, MMSYS_CLK_HIGH, MMSYS_CLK_LOW, "notify_cb_func");
+ freqhopping_config(FH_VENC_PLLID , 0, true);
+ vdec_ctrl_func_checked(vdec_resume_cb_func, "VDEC resume");
+ }
+ mt_dfs_vencpll(0xE0000);
+ current_mmsys_clk = MMSYS_CLK_LOW;
+
+ } else {
+ MMDVFSMSG("Don't change CLK: mode=%d\n", clk_mode);
+ result = 0;
+ }
+
+ return result;
+}
+
+int mmdvfs_set_mmsys_clk(MTK_SMI_BWC_SCEN scenario, int mmsys_clk_mode)
+{
+ return mmdfvs_adjust_mmsys_clk_by_hopping(mmsys_clk_mode);
+}
+
+static int vdec_ctrl_func_checked(vdec_ctrl_cb func, char *msg)
+{
+ if (func == NULL) {
+ MMDVFSMSG("vdec_ctrl_func is NULL, not invoked: %s\n", msg);
+ } else {
+ func();
+ return 1;
+ }
+ return 0;
+}
+
+static int notify_cb_func_checked(clk_switch_cb func, int ori_mmsys_clk_mode, int update_mmsys_clk_mode, char *msg)
+{
+ if (func == NULL) {
+ MMDVFSMSG("notify_cb_func is NULL, not invoked: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode,
+ update_mmsys_clk_mode);
+ } else {
+ if (ori_mmsys_clk_mode != update_mmsys_clk_mode)
+ MMDVFSMSG("notify_cb_func: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode, update_mmsys_clk_mode);
+
+ func(ori_mmsys_clk_mode, update_mmsys_clk_mode);
+ return 1;
+ }
+ return 0;
+}
+
+static int mmsys_clk_switch_impl(unsigned int venc_pll_con1_val)
+{
+ if (g_mmdvfs_profile_id != MMDVFS_PROFILE_D2_M_PLUS
+ && g_mmdvfs_profile_id != MMDVFS_PROFILE_D2_P_PLUS) {
+ MMDVFSMSG("mmsys_clk_switch_impl is not support in profile:%d", g_mmdvfs_profile_id);
+ return 0;
+ }
+
+#if defined(SMI_D2)
+ clkmux_sel(MT_MUX_MM, 6, "SMI common");
+ mt_set_vencpll_con1(venc_pll_con1_val);
+ udelay(20);
+ clkmux_sel(MT_MUX_MM, 1, "SMI common");
+#endif
+
+ return 1;
+}
+
+static int default_clk_switch_cb(int ori_mmsys_clk_mode, int update_mmsys_clk_mode)
+{
+ unsigned int venc_pll_con1_val = 0;
+
+ if (ori_mmsys_clk_mode == MMSYS_CLK_LOW && update_mmsys_clk_mode == MMSYS_CLK_HIGH) {
+ if (g_mmdvfs_profile_id == MMDVFS_PROFILE_D2_M_PLUS)
+ venc_pll_con1_val = 0x820F0000; /* 380MHz (35M+) */
+ else
+ venc_pll_con1_val = 0x82110000; /* 442MHz (35P+) */
+ } else if (ori_mmsys_clk_mode == MMSYS_CLK_HIGH && update_mmsys_clk_mode == MMSYS_CLK_LOW) {
+ venc_pll_con1_val = 0x830E0000;
+ } else {
+ MMDVFSMSG("default_clk_switch_cb: by-pass (%d,%d)\n", ori_mmsys_clk_mode, update_mmsys_clk_mode);
+ return 1;
+ }
+
+ if (venc_pll_con1_val != 0)
+ mmsys_clk_switch_impl(venc_pll_con1_val);
+
+ return 1;
+}
diff --git a/drivers/misc/mediatek/smi/mmdvfs_mgr.h b/drivers/misc/mediatek/smi/mmdvfs_mgr.h
new file mode 100644
index 000000000..f22f2a251
--- /dev/null
+++ b/drivers/misc/mediatek/smi/mmdvfs_mgr.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MMDVFS_MGR_H__
+#define __MMDVFS_MGR_H__
+
+#include <linux/aee.h>
+#include <mach/mt_smi.h>
+
+#define MMDVFS_LOG_TAG "MMDVFS"
+
+#define MMDVFSMSG(string, args...) pr_debug("[pid=%d]"string, current->tgid, ##args)
+#define MMDVFSMSG2(string, args...) pr_debug(string, ##args)
+#define MMDVFSTMP(string, args...) pr_debug("[pid=%d]"string, current->tgid, ##args)
+#define MMDVFSERR(string, args...) \
+ do {\
+ pr_debug("error: "string, ##args); \
+ aee_kernel_warning(MMDVFS_LOG_TAG, "error: "string, ##args); \
+ } while (0)
+
+#define _BIT_(_bit_) (unsigned)(1 << (_bit_))
+#define _BITS_(_bits_, _val_) ((((unsigned) -1 >> (31 - ((1) ? _bits_))) \
+ & ~((1U << ((0) ? _bits_)) - 1)) & ((_val_)<<((0) ? _bits_)))
+#define _BITMASK_(_bits_) (((unsigned) -1 >> (31 - ((1) ? _bits_))) & ~((1U << ((0) ? _bits_)) - 1))
+#define _GET_BITS_VAL_(_bits_, _val_) (((_val_) & (_BITMASK_(_bits_))) >> ((0) ? _bits_))
+
+/* MMDVFS extern APIs */
+extern void mmdvfs_init(MTK_SMI_BWC_MM_INFO *info);
+extern void mmdvfs_handle_cmd(MTK_MMDVFS_CMD *cmd);
+extern void mmdvfs_notify_scenario_enter(MTK_SMI_BWC_SCEN scen);
+extern void mmdvfs_notify_scenario_exit(MTK_SMI_BWC_SCEN scen);
+extern void mmdvfs_notify_scenario_concurrency(unsigned int u4Concurrency);
+extern void mmdvfs_mhl_enable(int enable);
+extern void mmdvfs_mjc_enable(int enable);
+
+/* screen size */
+extern unsigned int DISP_GetScreenWidth(void);
+extern unsigned int DISP_GetScreenHeight(void);
+
+
+#define MMSYS_CLK_LOW (0)
+#define MMSYS_CLK_HIGH (1)
+#define MMSYS_CLK_MEDIUM (2)
+
+#define MMDVFS_EVENT_OVL_SINGLE_LAYER_ENTER 0
+#define MMDVFS_EVENT_OVL_SINGLE_LAYER_EXIT 1
+#define MMDVFS_EVENT_UI_IDLE_ENTER 2
+#define MMDVFS_EVENT_UI_IDLE_EXIT 3
+
+#define MMDVFS_CLIENT_ID_ISP 0
+
+typedef int (*clk_switch_cb)(int ori_mmsys_clk_mode, int update_mmsys_clk_mode);
+typedef int (*vdec_ctrl_cb)(void);
+
+/* MMDVFS V2 only APIs */
+extern int mmdvfs_notify_mmclk_switch_request(int event);
+extern int mmdvfs_raise_mmsys_by_mux(void);
+extern int mmdvfs_lower_mmsys_by_mux(void);
+extern int register_mmclk_switch_cb(clk_switch_cb notify_cb,
+clk_switch_cb notify_cb_nolock);
+extern int mmdvfs_register_mmclk_switch_cb(clk_switch_cb notify_cb, int mmdvfs_client_id);
+extern void dump_mmdvfs_info(void);
+
+
+/* Extern from other module */
+extern MTK_SMI_BWC_SCEN smi_get_current_profile(void);
+extern int is_mmdvfs_freq_hopping_disabled(void);
+extern int is_mmdvfs_freq_mux_disabled(void);
+extern int is_force_max_mmsys_clk(void);
+extern int is_force_camera_hpm(void);
+extern int is_mmdvfs_disabled(void);
+
+
+#ifdef MMDVFS_STANDALONE
+#define vcorefs_request_dvfs_opp(scen, mode) do { \
+ MMDVFSMSG("vcorefs_request_dvfs_opp"); \
+ MMDVFSMSG("MMDVFS_STANDALONE mode enabled\n"); \
+} while (0)
+
+#define fliper_set_bw(BW_THRESHOLD_HIGH) do { \
+ MMDVFSMSG("MMDVFS_STANDALONE mode enabled\n"); \
+ MMDVFSMSG("fliper_set_bw");\
+} while (0)
+
+#define fliper_restore_bw() do {\
+ MMDVFSMSG("MMDVFS_STANDALONE mode enabled\n"); \
+ MMDVFSMSG("fliper_restore_bw(): fliper normal\n"); \
+} while (0)
+
+#endif /* MMDVFS_STANDALONE */
+
+#ifdef MMDVFS_WQHD_1_0V
+#include "disp_session.h"
+extern int primary_display_switch_mode_for_mmdvfs(int sess_mode, unsigned int session, int blocking);
+#endif
+
+/* D2 plus only */
+#if defined(SMI_D2)
+extern void mt_set_vencpll_con1(int val);
+extern int clkmux_sel(int id, unsigned int clksrc, char *name);
+#endif
+
+/* D1 plus implementation only */
+extern u32 get_devinfo_with_index(u32 index);
+
+#define MMDVFS_PROFILE_UNKNOWN (0)
+#define MMDVFS_PROFILE_R1 (1)
+#define MMDVFS_PROFILE_J1 (2)
+#define MMDVFS_PROFILE_D1 (3)
+#define MMDVFS_PROFILE_D1_PLUS (4)
+#define MMDVFS_PROFILE_D2 (5)
+#define MMDVFS_PROFILE_D2_M_PLUS (6)
+#define MMDVFS_PROFILE_D2_P_PLUS (7)
+#define MMDVFS_PROFILE_D3 (8)
+#define MMDVFS_PROFILE_E1 (9)
+
+
+enum {
+ MMDVFS_CAM_MON_SCEN = SMI_BWC_SCEN_CNT, MMDVFS_SCEN_MHL, MMDVFS_SCEN_MJC, MMDVFS_SCEN_DISP,
+ MMDVFS_SCEN_ISP, MMDVFS_SCEN_VP_HIGH_RESOLUTION , MMDVFS_SCEN_COUNT
+};
+
+/* Backward compatible */
+#define SMI_BWC_SCEN_120HZ MMDVFS_SCEN_DISP
+
+
+#ifndef CONFIG_MTK_SMI_EXT
+#define mmdvfs_set_step(scenario, step)
+#else
+int mmdvfs_set_step(MTK_SMI_BWC_SCEN scenario, mmdvfs_voltage_enum step);
+#endif /* CONFIG_MTK_SMI_EXT */
+
+extern int mmdvfs_get_mmdvfs_profile(void);
+extern int is_mmdvfs_supported(void);
+extern int mmdvfs_set_mmsys_clk(MTK_SMI_BWC_SCEN scenario, int mmsys_clk_mode);
+
+#endif /* __MMDVFS_MGR_H__ */
diff --git a/drivers/misc/mediatek/smi/mmdvfs_mgr_v2.c b/drivers/misc/mediatek/smi/mmdvfs_mgr_v2.c
new file mode 100644
index 000000000..fd9dc9c2d
--- /dev/null
+++ b/drivers/misc/mediatek/smi/mmdvfs_mgr_v2.c
@@ -0,0 +1,1019 @@
+#include <linux/uaccess.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/mtk_gpu_utility.h>
+
+#include <aee.h>
+#include <mt_smi.h>
+
+
+
+
+#ifndef MMDVFS_STANDALONE
+#include <mt_vcorefs_manager.h>
+#endif
+#include <mach/mt_freqhopping.h>
+
+
+#include "mmdvfs_mgr.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "[" MMDVFS_LOG_TAG "]" fmt
+
+/* MMDVFS SWITCH. NO MMDVFS for 6595 */
+#if IS_ENABLED(CONFIG_ARM64)
+/* 6795 */
+#define MMDVFS_ENABLE 1
+#else
+/* 6595 */
+#define MMDVFS_ENABLE 0
+#endif
+
+#if MMDVFS_ENABLE
+#ifndef MMDVFS_STANDALONE
+#include <mach/fliper.h>
+#endif
+#endif
+
+/* WQHD MMDVFS SWITCH */
+#define MMDVFS_ENABLE_WQHD 0
+
+#define MMDVFS_GPU_LOADING_NUM 30
+#define MMDVFS_GPU_LOADING_START_INDEX 10
+#define MMDVFS_GPU_LOADING_SAMPLE_DURATION_IN_MS 100
+#define MMDVFS_GPU_LOADING_THRESHOLD 18
+
+/* enable WQHD defalt 1.0v */
+/* #define MMDVFS_WQHD_1_0V */
+
+#if (MMDVFS_GPU_LOADING_START_INDEX >= MMDVFS_GPU_LOADING_NUM)
+#error "start index too large"
+#endif
+
+/* mmdvfs MM sizes */
+#define MMDVFS_PIXEL_NUM_720P (1280 * 720)
+#define MMDVFS_PIXEL_NUM_2160P (3840 * 2160)
+#define MMDVFS_PIXEL_NUM_1080P (2100 * 1300)
+#define MMDVFS_PIXEL_NUM_2M (2100 * 1300)
+#define MMDVFS_PIXEL_NUM_13M (13000000)
+
+/* 13M sensor */
+#define MMDVFS_PIXEL_NUM_SENSOR_FULL (13000000)
+
+/* mmdvfs display sizes */
+#define MMDVFS_DISPLAY_SIZE_FHD (1920 * 1216)
+
+#define MMDVFS_CLK_SWITCH_CB_MAX 16
+#define MMDVFS_CLK_SWITCH_CLIENT_MSG_MAX 20
+
+static int notify_cb_func_checked(clk_switch_cb func, int ori_mmsys_clk_mode,
+int update_mmsys_clk_mode, char *msg);
+static int mmdfvs_adjust_mmsys_clk_by_hopping(int clk_mode);
+static int mmdvfs_set_step_with_mmsys_clk(MTK_SMI_BWC_SCEN scenario, mmdvfs_voltage_enum step,
+int mmsys_clk_mode);
+static void notify_mmsys_clk_change(int ori_mmsys_clk_mode, int update_mmsys_clk_mode);
+static int mmsys_clk_change_notify_checked(clk_switch_cb func, int ori_mmsys_clk_mode,
+int update_mmsys_clk_mode, char *msg);
+static mmdvfs_voltage_enum determine_current_mmsys_clk(void);
+static int is_cam_monior_work;
+
+
+enum {
+ MMDVFS_CAM_MON_SCEN = SMI_BWC_SCEN_CNT, MMDVFS_SCEN_MHL, MMDVFS_SCEN_COUNT
+};
+
+static clk_switch_cb quick_mmclk_cbs[MMDVFS_CLK_SWITCH_CB_MAX];
+static clk_switch_cb notify_cb_func;
+static clk_switch_cb notify_cb_func_nolock;
+static int current_mmsys_clk = MMSYS_CLK_MEDIUM;
+
+/* + 1 for MMDVFS_CAM_MON_SCEN */
+static mmdvfs_voltage_enum g_mmdvfs_scenario_voltage[MMDVFS_SCEN_COUNT] = {
+MMDVFS_VOLTAGE_DEFAULT};
+static mmdvfs_voltage_enum g_mmdvfs_current_step;
+static unsigned int g_mmdvfs_concurrency;
+static MTK_SMI_BWC_MM_INFO *g_mmdvfs_info;
+static MTK_MMDVFS_CMD g_mmdvfs_cmd;
+
+/* mmdvfs timer for monitor gpu loading */
+typedef struct {
+ /* linux timer */
+ struct timer_list timer;
+
+ /* work q */
+ struct workqueue_struct *work_queue;
+ struct work_struct work;
+
+ /* data payload */
+ unsigned int gpu_loadings[MMDVFS_GPU_LOADING_NUM];
+ int gpu_loading_index;
+} mmdvfs_gpu_monitor_struct;
+
+typedef struct {
+ spinlock_t scen_lock;
+ int is_mhl_enable;
+ mmdvfs_gpu_monitor_struct gpu_monitor;
+
+} mmdvfs_context_struct;
+
+/* mmdvfs_query() return value, remember to sync with user space */
+typedef enum {
+ MMDVFS_STEP_LOW = 0, MMDVFS_STEP_HIGH,
+
+ MMDVFS_STEP_LOW2LOW, /* LOW */
+ MMDVFS_STEP_HIGH2LOW, /* LOW */
+ MMDVFS_STEP_LOW2HIGH, /* HIGH */
+ MMDVFS_STEP_HIGH2HIGH,
+/* HIGH */
+} mmdvfs_step_enum;
+
+/* lcd size */
+typedef enum {
+ MMDVFS_LCD_SIZE_FHD, MMDVFS_LCD_SIZE_WQHD, MMDVFS_LCD_SIZE_END_OF_ENUM
+} mmdvfs_lcd_size_enum;
+
+static mmdvfs_context_struct g_mmdvfs_mgr_cntx;
+static mmdvfs_context_struct * const g_mmdvfs_mgr = &g_mmdvfs_mgr_cntx;
+
+static mmdvfs_lcd_size_enum mmdvfs_get_lcd_resolution(void)
+{
+ if (DISP_GetScreenWidth() * DISP_GetScreenHeight()
+ <= MMDVFS_DISPLAY_SIZE_FHD) {
+ return MMDVFS_LCD_SIZE_FHD;
+ }
+
+ return MMDVFS_LCD_SIZE_WQHD;
+}
+
+static mmdvfs_voltage_enum mmdvfs_get_default_step(void)
+{
+#ifdef MMDVFS_WQHD_1_0V
+ return MMDVFS_VOLTAGE_LOW;
+#else
+ if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD)
+ return MMDVFS_VOLTAGE_LOW;
+ else
+ return MMDVFS_VOLTAGE_HIGH;
+#endif
+}
+
+static mmdvfs_voltage_enum mmdvfs_get_current_step(void)
+{
+ return g_mmdvfs_current_step;
+}
+
+static int mmsys_clk_query(MTK_SMI_BWC_SCEN scenario,
+MTK_MMDVFS_CMD *cmd)
+{
+ int step = MMSYS_CLK_MEDIUM;
+
+ unsigned int venc_size;
+ MTK_MMDVFS_CMD cmd_default;
+
+ venc_size = g_mmdvfs_info->video_record_size[0]
+ * g_mmdvfs_info->video_record_size[1];
+
+ /* use default info */
+ if (cmd == NULL) {
+ memset(&cmd_default, 0, sizeof(MTK_MMDVFS_CMD));
+ cmd_default.camera_mode = MMDVFS_CAMERA_MODE_FLAG_DEFAULT;
+ cmd = &cmd_default;
+ }
+
+ /* collect the final information */
+ if (cmd->sensor_size == 0)
+ cmd->sensor_size = g_mmdvfs_cmd.sensor_size;
+
+ if (cmd->sensor_fps == 0)
+ cmd->sensor_fps = g_mmdvfs_cmd.sensor_fps;
+
+ if (cmd->camera_mode == MMDVFS_CAMERA_MODE_FLAG_DEFAULT)
+ cmd->camera_mode = g_mmdvfs_cmd.camera_mode;
+
+ /* HIGH level scenarios */
+ switch (scenario) {
+ case SMI_BWC_SCEN_VR:
+ if (is_force_max_mmsys_clk())
+ step = MMSYS_CLK_HIGH;
+
+ if (cmd->sensor_size >= MMDVFS_PIXEL_NUM_13M)
+ /* 13M high */
+ step = MMSYS_CLK_HIGH;
+ else if (cmd->camera_mode & (MMDVFS_CAMERA_MODE_FLAG_PIP | MMDVFS_CAMERA_MODE_FLAG_STEREO))
+ /* PIP for ISP clock */
+ step = MMSYS_CLK_HIGH;
+ break;
+
+ case SMI_BWC_SCEN_VR_SLOW:
+ case SMI_BWC_SCEN_ICFP:
+ step = MMSYS_CLK_HIGH;
+ break;
+
+ default:
+ break;
+ }
+
+ return step;
+}
+
+static mmdvfs_voltage_enum mmdvfs_query(MTK_SMI_BWC_SCEN scenario,
+MTK_MMDVFS_CMD *cmd)
+{
+ mmdvfs_voltage_enum step = mmdvfs_get_default_step();
+ unsigned int venc_size;
+ MTK_MMDVFS_CMD cmd_default;
+
+ venc_size = g_mmdvfs_info->video_record_size[0]
+ * g_mmdvfs_info->video_record_size[1];
+
+ /* use default info */
+ if (cmd == NULL) {
+ memset(&cmd_default, 0, sizeof(MTK_MMDVFS_CMD));
+ cmd_default.camera_mode = MMDVFS_CAMERA_MODE_FLAG_DEFAULT;
+ cmd = &cmd_default;
+ }
+
+ /* collect the final information */
+ if (cmd->sensor_size == 0)
+ cmd->sensor_size = g_mmdvfs_cmd.sensor_size;
+
+ if (cmd->sensor_fps == 0)
+ cmd->sensor_fps = g_mmdvfs_cmd.sensor_fps;
+
+ if (cmd->camera_mode == MMDVFS_CAMERA_MODE_FLAG_DEFAULT)
+ cmd->camera_mode = g_mmdvfs_cmd.camera_mode;
+
+ /* HIGH level scenarios */
+ switch (scenario) {
+
+ case SMI_BWC_SCEN_VR:
+ if (is_force_camera_hpm())
+ step = MMDVFS_VOLTAGE_HIGH;
+
+ if (cmd->sensor_size >= MMDVFS_PIXEL_NUM_13M)
+ /* 13M high */
+ step = MMDVFS_VOLTAGE_HIGH;
+ else if (cmd->camera_mode & (MMDVFS_CAMERA_MODE_FLAG_PIP | MMDVFS_CAMERA_MODE_FLAG_STEREO |
+ MMDVFS_CAMERA_MODE_FLAG_VFB | MMDVFS_CAMERA_MODE_FLAG_EIS_2_0))
+ /* PIP for ISP clock */
+ step = MMDVFS_VOLTAGE_HIGH;
+
+ break;
+
+ case SMI_BWC_SCEN_VR_SLOW:
+ case SMI_BWC_SCEN_ICFP:
+ step = MMDVFS_VOLTAGE_HIGH;
+ break;
+
+ default:
+ break;
+ }
+
+ return step;
+}
+
+static mmdvfs_voltage_enum determine_current_mmsys_clk(void)
+{
+ int i = 0;
+ int final_clk = MMSYS_CLK_MEDIUM;
+
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
+ if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH) {
+ /* Check the mmsys clk */
+ switch (i) {
+ case SMI_BWC_SCEN_VR:
+ case MMDVFS_CAM_MON_SCEN:
+ if (is_force_max_mmsys_clk())
+ final_clk = MMSYS_CLK_HIGH;
+ else if (g_mmdvfs_cmd.sensor_size >= MMDVFS_PIXEL_NUM_13M)
+ /* 13M high */
+ final_clk = MMSYS_CLK_HIGH;
+ else if (g_mmdvfs_cmd.camera_mode & (MMDVFS_CAMERA_MODE_FLAG_PIP |
+ MMDVFS_CAMERA_MODE_FLAG_STEREO))
+ /* PIP for ISP clock */
+ final_clk = MMSYS_CLK_HIGH;
+ break;
+ case SMI_BWC_SCEN_VR_SLOW:
+ case SMI_BWC_SCEN_ICFP:
+ final_clk = MMSYS_CLK_HIGH;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return final_clk;
+}
+
+
+static void mmdvfs_update_cmd(MTK_MMDVFS_CMD *cmd)
+{
+ if (cmd == NULL)
+ return;
+
+ if (cmd->sensor_size)
+ g_mmdvfs_cmd.sensor_size = cmd->sensor_size;
+
+ if (cmd->sensor_fps)
+ g_mmdvfs_cmd.sensor_fps = cmd->sensor_fps;
+
+ /* MMDVFSMSG("update cm %d\n", cmd->camera_mode); */
+
+ /* if (cmd->camera_mode != MMDVFS_CAMERA_MODE_FLAG_DEFAULT) { */
+ g_mmdvfs_cmd.camera_mode = cmd->camera_mode;
+ /* } */
+}
+
+/* static void mmdvfs_dump_info(void)
+{
+ MMDVFSMSG("CMD %d %d %d\n", g_mmdvfs_cmd.sensor_size,
+ g_mmdvfs_cmd.sensor_fps, g_mmdvfs_cmd.camera_mode);
+ MMDVFSMSG("INFO VR %d %d\n", g_mmdvfs_info->video_record_size[0],
+ g_mmdvfs_info->video_record_size[1]);
+}
+*/
+
+#ifdef MMDVFS_GPU_MONITOR_ENABLE
+static void mmdvfs_timer_callback(unsigned long data)
+{
+ mmdvfs_gpu_monitor_struct *gpu_monitor =
+ (mmdvfs_gpu_monitor_struct *)data;
+
+ unsigned int gpu_loading = 0;
+
+ /* if (mtk_get_gpu_loading(&gpu_loading)) {
+ MMDVFSMSG("gpuload %d %ld\n", gpu_loading, jiffies_to_msecs(jiffies));
+ */
+
+ /* store gpu loading into the array */
+ gpu_monitor->gpu_loadings[gpu_monitor->gpu_loading_index++]
+ = gpu_loading;
+
+ /* fire another timer until the end */
+ if (gpu_monitor->gpu_loading_index < MMDVFS_GPU_LOADING_NUM - 1) {
+ mod_timer(
+ &gpu_monitor->timer,
+ jiffies + msecs_to_jiffies(
+ MMDVFS_GPU_LOADING_SAMPLE_DURATION_IN_MS));
+ } else {
+ /* the final timer */
+ int i;
+ int avg_loading;
+ unsigned int sum = 0;
+
+ for (i = MMDVFS_GPU_LOADING_START_INDEX; i
+ < MMDVFS_GPU_LOADING_NUM; i++) {
+ sum += gpu_monitor->gpu_loadings[i];
+ }
+
+ avg_loading = sum / MMDVFS_GPU_LOADING_NUM;
+
+ MMDVFSMSG("gpuload %d AVG %d\n", jiffies_to_msecs(jiffies),
+ avg_loading);
+
+ /* drops to low step if the gpu loading is low */
+ if (avg_loading <= MMDVFS_GPU_LOADING_THRESHOLD)
+ queue_work(gpu_monitor->work_queue, &gpu_monitor->work);
+ }
+
+}
+
+static void mmdvfs_gpu_monitor_work(struct work_struct *work)
+{
+ MMDVFSMSG("WQ %d\n", jiffies_to_msecs(jiffies));
+}
+
+static void mmdvfs_init_gpu_monitor(mmdvfs_gpu_monitor_struct *gm)
+{
+ struct timer_list *gpu_timer = &gm->timer;
+
+ /* setup gpu monitor timer */
+ setup_timer(gpu_timer, mmdvfs_timer_callback, (unsigned long)gm);
+
+ gm->work_queue = create_singlethread_workqueue("mmdvfs_gpumon");
+ INIT_WORK(&gm->work, mmdvfs_gpu_monitor_work);
+}
+#endif /* MMDVFS_GPU_MONITOR_ENABLE */
+
+/* delay 4 seconds to go LPM to workaround camera ZSD + PIP issue */
+static void mmdvfs_cam_work_handler(struct work_struct *work)
+{
+ /* MMDVFSMSG("CAM handler %d\n", jiffies_to_msecs(jiffies)); */
+ mmdvfs_set_step(MMDVFS_CAM_MON_SCEN, mmdvfs_get_default_step());
+
+ spin_lock(&g_mmdvfs_mgr->scen_lock);
+ is_cam_monior_work = 0;
+ spin_unlock(&g_mmdvfs_mgr->scen_lock);
+
+}
+
+static DECLARE_DELAYED_WORK(g_mmdvfs_cam_work, mmdvfs_cam_work_handler);
+
+static void mmdvfs_stop_cam_monitor(void)
+{
+ cancel_delayed_work_sync(&g_mmdvfs_cam_work);
+}
+
+#define MMDVFS_CAM_MON_DELAY (6 * HZ)
+static void mmdvfs_start_cam_monitor(int scen)
+{
+ int delayed_mmsys_state = MMSYS_CLK_MEDIUM;
+
+ mmdvfs_stop_cam_monitor();
+
+ spin_lock(&g_mmdvfs_mgr->scen_lock);
+ is_cam_monior_work = 1;
+ spin_unlock(&g_mmdvfs_mgr->scen_lock);
+
+
+ if (current_mmsys_clk == MMSYS_CLK_LOW) {
+ MMDVFSMSG("Can't switch clk by hopping when CLK is low\n");
+ delayed_mmsys_state = MMSYS_CLK_MEDIUM;
+ } else {
+ delayed_mmsys_state = current_mmsys_clk;
+ }
+
+ /* MMDVFSMSG("CAM start %d\n", jiffies_to_msecs(jiffies)); */
+
+ if (is_force_max_mmsys_clk()) {
+
+ mmdvfs_set_step_with_mmsys_clk(MMDVFS_CAM_MON_SCEN, MMDVFS_VOLTAGE_HIGH, MMSYS_CLK_HIGH);
+
+ } else if (scen == SMI_BWC_SCEN_ICFP || scen == SMI_BWC_SCEN_VR_SLOW || scen == SMI_BWC_SCEN_VR) {
+
+ if (g_mmdvfs_cmd.camera_mode & (MMDVFS_CAMERA_MODE_FLAG_PIP | MMDVFS_CAMERA_MODE_FLAG_STEREO))
+ mmdvfs_set_step_with_mmsys_clk(MMDVFS_CAM_MON_SCEN, MMDVFS_VOLTAGE_HIGH, MMSYS_CLK_HIGH);
+ /* MMDVFSMSG("CAM monitor keep MMSYS_CLK_HIGH\n"); */
+ else if (g_mmdvfs_cmd.camera_mode & (MMDVFS_CAMERA_MODE_FLAG_VFB | MMDVFS_CAMERA_MODE_FLAG_EIS_2_0))
+ mmdvfs_set_step_with_mmsys_clk(MMDVFS_CAM_MON_SCEN, MMDVFS_VOLTAGE_HIGH, delayed_mmsys_state);
+ /*
+ else {
+ MMDVFSMSG("Keep cam monitor going so that DISP can't disable the vencpll\n");
+ }
+ */
+
+ }
+ /* 4 seconds for PIP switch preview aspect delays... */
+ schedule_delayed_work(&g_mmdvfs_cam_work, MMDVFS_CAM_MON_DELAY);
+}
+
+#if MMDVFS_ENABLE_WQHD
+
+static void mmdvfs_start_gpu_monitor(mmdvfs_gpu_monitor_struct *gm)
+{
+ struct timer_list *gpu_timer = &gm->timer;
+
+ gm->gpu_loading_index = 0;
+ memset(gm->gpu_loadings, 0, sizeof(unsigned int) * MMDVFS_GPU_LOADING_NUM);
+
+ mod_timer(gpu_timer, jiffies + msecs_to_jiffies(MMDVFS_GPU_LOADING_SAMPLE_DURATION_IN_MS));
+}
+
+static void mmdvfs_stop_gpu_monitor(mmdvfs_gpu_monitor_struct *gm)
+{
+ struct timer_list *gpu_timer = &gm->timer;
+
+ /* flush workqueue */
+ flush_workqueue(gm->work_queue);
+ /* delete timer */
+ del_timer(gpu_timer);
+}
+
+#endif /* MMDVFS_ENABLE_WQHD */
+
+static void mmdvfs_vcorefs_request_dvfs_opp(int mm_kicker, int mm_dvfs_opp)
+{
+ int vcore_enable = 0;
+
+ vcore_enable = is_vcorefs_can_work();
+
+ if (vcore_enable != 1) {
+ MMDVFSMSG("Vcore disable: is_vcorefs_can_work = %d, (%d, %d)\n", vcore_enable, mm_kicker, mm_dvfs_opp);
+ } else {
+ /* MMDVFSMSG("Vcore trigger: is_vcorefs_can_work = %d, (%d, %d)\n", vcore_enable,
+ mm_kicker, mm_dvfs_opp); */
+ vcorefs_request_dvfs_opp(mm_kicker, mm_dvfs_opp);
+ }
+}
+
+int mmdvfs_set_step(MTK_SMI_BWC_SCEN scenario, mmdvfs_voltage_enum step)
+{
+ return mmdvfs_set_step_with_mmsys_clk(scenario, step, MMSYS_CLK_MEDIUM);
+}
+
+int mmdvfs_set_step_with_mmsys_clk(MTK_SMI_BWC_SCEN smi_scenario, mmdvfs_voltage_enum step, int mmsys_clk_mode_request)
+{
+ int i, scen_index;
+ unsigned int concurrency;
+ unsigned int scenario = smi_scenario;
+ mmdvfs_voltage_enum final_step = mmdvfs_get_default_step();
+ int mmsys_clk_step = MMSYS_CLK_MEDIUM;
+ int mmsys_clk_mode = mmsys_clk_mode_request;
+
+ /* workaround for WFD VENC scenario*/
+ if (scenario == SMI_BWC_SCEN_VENC || scenario == SMI_BWC_SCEN_VP)
+ return 0;
+
+ if (step == MMDVFS_VOLTAGE_DEFAULT_STEP)
+ step = final_step;
+
+#if !MMDVFS_ENABLE
+ return 0;
+#endif
+
+ /* MMDVFSMSG("MMDVFS set voltage scen %d step %d\n", scenario, step); */
+
+ if ((scenario >= (MTK_SMI_BWC_SCEN)MMDVFS_SCEN_COUNT) || (scenario
+ < SMI_BWC_SCEN_NORMAL)) {
+ MMDVFSERR("invalid scenario\n");
+ return -1;
+ }
+
+ /* dump information */
+ /* mmdvfs_dump_info(); */
+
+ /* go through all scenarios to decide the final step */
+ scen_index = (int)scenario;
+
+ spin_lock(&g_mmdvfs_mgr->scen_lock);
+
+ g_mmdvfs_scenario_voltage[scen_index] = step;
+
+ concurrency = 0;
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
+ if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH)
+ concurrency |= 1 << i;
+ }
+
+ /* one high = final high */
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
+ if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH) {
+ final_step = MMDVFS_VOLTAGE_HIGH;
+ break;
+ }
+ }
+
+ mmsys_clk_step = determine_current_mmsys_clk();
+ if (mmsys_clk_mode_request == MMSYS_CLK_MEDIUM && mmsys_clk_step == MMSYS_CLK_HIGH)
+ mmsys_clk_mode = MMSYS_CLK_HIGH;
+ else
+ mmsys_clk_mode = mmsys_clk_mode_request;
+
+ g_mmdvfs_current_step = final_step;
+
+ spin_unlock(&g_mmdvfs_mgr->scen_lock);
+
+#if MMDVFS_ENABLE
+
+ /* call vcore dvfs API */
+ /* MMDVFSMSG("FHD %d\n", final_step); */
+
+
+
+ if (final_step == MMDVFS_VOLTAGE_HIGH) {
+ if (scenario == MMDVFS_SCEN_MHL)
+ mmdvfs_vcorefs_request_dvfs_opp(KIR_MM_MHL, OPPI_PERF);
+ else if (scenario == SMI_BWC_SCEN_WFD)
+ mmdvfs_vcorefs_request_dvfs_opp(KIR_MM_WFD, OPPI_PERF);
+ else {
+ mmdvfs_vcorefs_request_dvfs_opp(KIR_MM_16MCAM, OPPI_PERF);
+ if (mmsys_clk_mode == MMSYS_CLK_HIGH)
+ mmdfvs_adjust_mmsys_clk_by_hopping(MMSYS_CLK_HIGH);
+ else
+ mmdfvs_adjust_mmsys_clk_by_hopping(MMSYS_CLK_MEDIUM);
+ }
+ } else{
+ if (scenario == MMDVFS_SCEN_MHL)
+ mmdvfs_vcorefs_request_dvfs_opp(KIR_MM_MHL, OPPI_UNREQ);
+ else if (scenario == SMI_BWC_SCEN_WFD)
+ mmdvfs_vcorefs_request_dvfs_opp(KIR_MM_WFD, OPPI_UNREQ);
+ else {
+ /* must lower the mmsys clk before enter LPM mode */
+ mmdfvs_adjust_mmsys_clk_by_hopping(MMSYS_CLK_MEDIUM);
+ mmdvfs_vcorefs_request_dvfs_opp(KIR_MM_16MCAM, OPPI_UNREQ);
+ }
+ }
+#endif /* MMDVFS_ENABLE */
+
+ MMDVFSMSG("Set vol scen:%d,step:%d,final:%d(0x%x),CMD(%d,%d,0x%x),INFO(%d,%d),CLK:%d\n",
+ scenario, step, final_step, concurrency,
+ g_mmdvfs_cmd.sensor_size, g_mmdvfs_cmd.sensor_fps, g_mmdvfs_cmd.camera_mode,
+ g_mmdvfs_info->video_record_size[0], g_mmdvfs_info->video_record_size[1],
+ current_mmsys_clk);
+
+
+ return 0;
+}
+
+void mmdvfs_handle_cmd(MTK_MMDVFS_CMD *cmd)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ /* MMDVFSMSG("MMDVFS handle cmd %u s %d\n", cmd->type, cmd->scen); */
+
+ switch (cmd->type) {
+ case MTK_MMDVFS_CMD_TYPE_SET:
+ /* save cmd */
+ mmdvfs_update_cmd(cmd);
+
+ if (!(g_mmdvfs_concurrency & (1 << cmd->scen))) {
+ MMDVFSMSG("invalid set scen %d\n", cmd->scen);
+ cmd->ret = -1;
+ } else {
+ cmd->ret = mmdvfs_set_step_with_mmsys_clk(cmd->scen,
+ mmdvfs_query(cmd->scen, cmd), mmsys_clk_query(cmd->scen, cmd));
+ }
+ break;
+
+ case MTK_MMDVFS_CMD_TYPE_QUERY: { /* query with some parameters */
+#ifndef MMDVFS_WQHD_1_0V
+ if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_WQHD) {
+ /* QUERY ALWAYS HIGH for WQHD */
+ cmd->ret = (unsigned int)MMDVFS_STEP_HIGH2HIGH;
+ } else
+#endif
+ {
+ mmdvfs_voltage_enum query_voltage = mmdvfs_query(cmd->scen, cmd);
+
+ mmdvfs_voltage_enum current_voltage = mmdvfs_get_current_step();
+
+ if (current_voltage < query_voltage) {
+ cmd->ret = (unsigned int)MMDVFS_STEP_LOW2HIGH;
+ } else if (current_voltage > query_voltage) {
+ cmd->ret = (unsigned int)MMDVFS_STEP_HIGH2LOW;
+ } else {
+ cmd->ret
+ = (unsigned int)(query_voltage
+ == MMDVFS_VOLTAGE_HIGH
+ ? MMDVFS_STEP_HIGH2HIGH
+ : MMDVFS_STEP_LOW2LOW);
+ }
+ }
+
+ /* MMDVFSMSG("query %d\n", cmd->ret); */
+ /* cmd->ret = (unsigned int)query_voltage; */
+ break;
+ }
+
+ default:
+ MMDVFSMSG("invalid mmdvfs cmd\n");
+ BUG();
+ break;
+ }
+}
+
+void mmdvfs_notify_scenario_exit(MTK_SMI_BWC_SCEN scen)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ /* MMDVFSMSG("leave %d\n", scen); */
+
+ if ((scen == SMI_BWC_SCEN_VR) || (scen == SMI_BWC_SCEN_VR_SLOW) || (scen == SMI_BWC_SCEN_ICFP))
+ mmdvfs_start_cam_monitor(scen);
+
+ /* reset scenario voltage to default when it exits */
+ mmdvfs_set_step(scen, mmdvfs_get_default_step());
+}
+
+void mmdvfs_notify_scenario_enter(MTK_SMI_BWC_SCEN scen)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ /* MMDVFSMSG("enter %d\n", scen); */
+
+ switch (scen) {
+ case SMI_BWC_SCEN_WFD:
+ mmdvfs_set_step(scen, MMDVFS_VOLTAGE_HIGH);
+ if (current_mmsys_clk == MMSYS_CLK_LOW)
+ mmdvfs_raise_mmsys_by_mux();
+ break;
+ case SMI_BWC_SCEN_VR:
+ if (current_mmsys_clk == MMSYS_CLK_LOW)
+ mmdvfs_raise_mmsys_by_mux();
+
+ if (is_force_camera_hpm()) {
+ if (is_force_max_mmsys_clk())
+ mmdvfs_set_step_with_mmsys_clk(scen, MMDVFS_VOLTAGE_HIGH, MMSYS_CLK_HIGH);
+ else
+ mmdvfs_set_step(scen, MMDVFS_VOLTAGE_HIGH);
+ } else {
+ if (g_mmdvfs_cmd.camera_mode & (MMDVFS_CAMERA_MODE_FLAG_PIP | MMDVFS_CAMERA_MODE_FLAG_STEREO)) {
+ mmdvfs_set_step_with_mmsys_clk(scen, MMDVFS_VOLTAGE_HIGH, MMSYS_CLK_HIGH);
+ } else if (g_mmdvfs_cmd.camera_mode & (MMDVFS_CAMERA_MODE_FLAG_VFB |
+ MMDVFS_CAMERA_MODE_FLAG_EIS_2_0)){
+ mmdvfs_set_step(scen, MMDVFS_VOLTAGE_HIGH);
+ }
+ }
+ break;
+ case SMI_BWC_SCEN_VR_SLOW:
+ case SMI_BWC_SCEN_ICFP:
+ if (current_mmsys_clk == MMSYS_CLK_LOW)
+ mmdvfs_raise_mmsys_by_mux();
+ mmdvfs_set_step_with_mmsys_clk(scen, MMDVFS_VOLTAGE_HIGH, MMSYS_CLK_HIGH);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void mmdvfs_init(MTK_SMI_BWC_MM_INFO *info)
+{
+#if !MMDVFS_ENABLE
+ return;
+#endif
+
+ spin_lock_init(&g_mmdvfs_mgr->scen_lock);
+ /* set current step as the default step */
+ g_mmdvfs_current_step = mmdvfs_get_default_step();
+
+ g_mmdvfs_info = info;
+
+#ifdef MMDVFS_GPU_MONITOR_ENABLE
+ mmdvfs_init_gpu_monitor(&g_mmdvfs_mgr->gpu_monitor);
+#endif /* MMDVFS_GPU_MONITOR_ENABLE */
+}
+
+void mmdvfs_mhl_enable(int enable)
+{
+ g_mmdvfs_mgr->is_mhl_enable = enable;
+
+ if (enable)
+ mmdvfs_set_step(MMDVFS_SCEN_MHL, MMDVFS_VOLTAGE_HIGH);
+ else
+ mmdvfs_set_step(MMDVFS_SCEN_MHL, MMDVFS_VOLTAGE_DEFAULT_STEP);
+}
+
+void mmdvfs_notify_scenario_concurrency(unsigned int u4Concurrency)
+{
+ /*
+ * DO NOT CALL VCORE DVFS API HERE. THIS FUNCTION IS IN SMI SPIN LOCK.
+ */
+
+ /* raise EMI monitor BW threshold in VP, VR, VR SLOW motion cases to
+ make sure vcore stay MMDVFS level as long as possible */
+ if (u4Concurrency & ((1 << SMI_BWC_SCEN_VP) | (1 << SMI_BWC_SCEN_VR)
+ | (1 << SMI_BWC_SCEN_VR_SLOW))) {
+#if MMDVFS_ENABLE
+ /* MMDVFSMSG("fliper high\n"); */
+ /* fliper_set_bw(BW_THRESHOLD_HIGH); */
+#endif
+ } else {
+#if MMDVFS_ENABLE
+ /* MMDVFSMSG("fliper normal\n"); */
+ /* fliper_restore_bw(); */
+#endif
+ }
+
+ g_mmdvfs_concurrency = u4Concurrency;
+}
+
+int mmdvfs_is_default_step_need_perf(void)
+{
+ if (mmdvfs_get_default_step() == MMDVFS_VOLTAGE_LOW)
+ return 0;
+ else
+ return 1;
+}
+
+/* switch MM CLK callback from VCORE DVFS driver */
+void mmdvfs_mm_clock_switch_notify(int is_before, int is_to_high)
+{
+ /* for WQHD 1.0v, we have to dynamically switch DL/DC */
+#ifdef MMDVFS_WQHD_1_0V
+ int session_id;
+
+ if (mmdvfs_get_lcd_resolution() != MMDVFS_LCD_SIZE_WQHD)
+ return;
+
+ session_id = MAKE_DISP_SESSION(DISP_SESSION_PRIMARY, 0);
+
+ if (!is_before && is_to_high) {
+ MMDVFSMSG("DL\n");
+ /* nonblocking switch to direct link after HPM */
+ primary_display_switch_mode_for_mmdvfs(DISP_SESSION_DIRECT_LINK_MODE, session_id, 0);
+ } else if (is_before && !is_to_high) {
+ /* BLOCKING switch to decouple before switching to LPM */
+ MMDVFSMSG("DC\n");
+ primary_display_switch_mode_for_mmdvfs(DISP_SESSION_DECOUPLE_MODE, session_id, 1);
+ }
+#endif /* MMDVFS_WQHD_1_0V */
+}
+
+static int mmdfvs_adjust_mmsys_clk_by_hopping(int clk_mode)
+{
+ int freq_hopping_disable = is_mmdvfs_freq_hopping_disabled();
+
+ int result = 0;
+
+ if (clk_mode == MMSYS_CLK_HIGH) {
+ if (current_mmsys_clk == MMSYS_CLK_LOW) {
+ MMDVFSMSG("Doesn't allow mmsys clk adjust from low to high!\n");
+ } else if (!freq_hopping_disable && current_mmsys_clk != MMSYS_CLK_HIGH) {
+ /* MMDVFSMSG("Freq hopping: DSS: %d\n", 0xE0000);*/
+ mt_dfs_vencpll(0xE0000);
+ notify_cb_func_checked(notify_cb_func, current_mmsys_clk, MMSYS_CLK_HIGH,
+ "notify_cb_func");
+ /* For common clients */
+ notify_mmsys_clk_change(current_mmsys_clk, MMSYS_CLK_HIGH);
+ current_mmsys_clk = MMSYS_CLK_HIGH;
+ } else {
+ if (freq_hopping_disable)
+ MMDVFSMSG("Freq hopping disable, not trigger: DSS: %d\n", 0xE0000);
+ }
+ result = 1;
+ } else if (clk_mode == MMSYS_CLK_MEDIUM) {
+ if (!freq_hopping_disable && current_mmsys_clk != MMSYS_CLK_MEDIUM) {
+ /* MMDVFSMSG("Freq hopping: DSS: %d\n", 0xB0000); */
+ mt_dfs_vencpll(0xB0000);
+ notify_cb_func_checked(notify_cb_func, current_mmsys_clk, MMSYS_CLK_MEDIUM,
+ "notify_cb_func");
+ /* For common clients */
+ notify_mmsys_clk_change(current_mmsys_clk, MMSYS_CLK_MEDIUM);
+ current_mmsys_clk = MMSYS_CLK_MEDIUM;
+ } else {
+ if (freq_hopping_disable)
+ MMDVFSMSG("Freq hopping disable, not trigger: DSS: %d\n", 0xB0000);
+ }
+ result = 1;
+ } else if (clk_mode == MMSYS_CLK_LOW) {
+ MMDVFSMSG("Doesn't support MMSYS_CLK_LOW with hopping in this platform\n");
+ result = 1;
+ } else {
+ MMDVFSMSG("Don't change CLK: mode=%d\n", clk_mode);
+ result = 0;
+ }
+
+ return result;
+}
+
+int mmdvfs_raise_mmsys_by_mux(void)
+{
+ if (is_mmdvfs_freq_mux_disabled())
+ return 0;
+
+ notify_cb_func_checked(notify_cb_func, current_mmsys_clk, MMSYS_CLK_MEDIUM,
+ "notify_cb_func");
+ current_mmsys_clk = MMSYS_CLK_MEDIUM;
+ return 1;
+
+}
+
+int mmdvfs_lower_mmsys_by_mux(void)
+{
+ if (is_mmdvfs_freq_mux_disabled())
+ return 0;
+
+ if (notify_cb_func != NULL && current_mmsys_clk != MMSYS_CLK_HIGH) {
+ notify_cb_func(current_mmsys_clk, MMSYS_CLK_LOW);
+ current_mmsys_clk = MMSYS_CLK_LOW;
+ } else{
+ MMDVFSMSG("lower_cb_func has not been registered");
+ return 0;
+ }
+ return 1;
+
+}
+
+int register_mmclk_switch_cb(clk_switch_cb notify_cb,
+clk_switch_cb notify_cb_nolock)
+{
+ notify_cb_func = notify_cb;
+ notify_cb_func_nolock = notify_cb_nolock;
+
+ return 1;
+}
+
+static int notify_cb_func_checked(clk_switch_cb func, int ori_mmsys_clk_mode, int update_mmsys_clk_mode, char *msg)
+{
+
+ if (is_mmdvfs_freq_mux_disabled()) {
+ MMDVFSMSG("notify_cb_func is disabled, not invoked: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode,
+ update_mmsys_clk_mode);
+ return 0;
+ }
+ if (func == NULL) {
+ MMDVFSMSG("notify_cb_func is NULL, not invoked: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode,
+ update_mmsys_clk_mode);
+ } else {
+ if (ori_mmsys_clk_mode != update_mmsys_clk_mode)
+ MMDVFSMSG("notify_cb_func: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode, update_mmsys_clk_mode);
+
+ func(ori_mmsys_clk_mode, update_mmsys_clk_mode);
+ return 1;
+ }
+ return 0;
+}
+
+int mmdvfs_notify_mmclk_switch_request(int event)
+{
+ int i = 0;
+ MTK_SMI_BWC_SCEN current_smi_scenario = smi_get_current_profile();
+
+ /* Don't get the lock since there is no need to synchronize the is_cam_monior_work here*/
+ if (is_cam_monior_work != 0) {
+ /* MMDVFSMSG("Doesn't handle disp request when cam monitor is active\n"); */
+ return 0;
+ }
+ /* MMDVFSMSG("mmclk_switch_request: event=%d, current=%d", event, current_smi_scenario); */
+
+ /* Only in UI idle modw or VP 1 layer scenario */
+ /* we can lower the mmsys clock */
+ if (event == MMDVFS_EVENT_UI_IDLE_ENTER && current_smi_scenario == SMI_BWC_SCEN_NORMAL) {
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
+ if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH) {
+ MMDVFSMSG("Doesn't switch to low mmsys clk; vore is still in HPM mode");
+ return 0;
+ }
+ }
+
+ /* call back from DISP so we don't need use DISP lock here */
+ if (current_mmsys_clk != MMSYS_CLK_HIGH) {
+ /* Only disable VENC pll when clock is in 286MHz */
+ notify_cb_func_checked(notify_cb_func_nolock, current_mmsys_clk, MMSYS_CLK_LOW,
+ "notify_cb_func_nolock");
+ current_mmsys_clk = MMSYS_CLK_LOW;
+ return 1;
+ }
+ } else if (event == MMDVFS_EVENT_OVL_SINGLE_LAYER_EXIT || event == MMDVFS_EVENT_UI_IDLE_EXIT) {
+ if (current_mmsys_clk != MMSYS_CLK_HIGH) {
+ /* call back from DISP so we don't need use DISP lock here */
+ notify_cb_func_checked(notify_cb_func_nolock, current_mmsys_clk, MMSYS_CLK_MEDIUM,
+ "notify_cb_func_nolock");
+ current_mmsys_clk = MMSYS_CLK_MEDIUM;
+ return 1;
+ }
+ } else if (event == MMDVFS_EVENT_OVL_SINGLE_LAYER_ENTER && SMI_BWC_SCEN_VP) {
+ /* call back from DISP so we don't need use DISP lock here */
+ if (current_mmsys_clk != MMSYS_CLK_HIGH) {
+ notify_cb_func_checked(notify_cb_func_nolock, current_mmsys_clk, MMSYS_CLK_LOW,
+ "notify_cb_func_nolock");
+ current_mmsys_clk = MMSYS_CLK_LOW;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+int mmdvfs_register_mmclk_switch_cb(clk_switch_cb notify_cb, int mmdvfs_client_id)
+{
+ if (mmdvfs_client_id >= 0 && mmdvfs_client_id < MMDVFS_CLK_SWITCH_CB_MAX) {
+ quick_mmclk_cbs[mmdvfs_client_id] = notify_cb;
+ } else{
+ MMDVFSMSG("clk_switch_cb register failed: id=%d\n", mmdvfs_client_id);
+ return 1;
+ }
+ return 0;
+}
+
+static int mmsys_clk_change_notify_checked(clk_switch_cb func, int ori_mmsys_clk_mode,
+int update_mmsys_clk_mode, char *msg)
+{
+ if (func == NULL) {
+ MMDVFSMSG("notify_cb_func is NULL, not invoked: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode,
+ update_mmsys_clk_mode);
+ } else {
+ MMDVFSMSG("notify_cb_func: %s, (%d,%d)\n", msg, ori_mmsys_clk_mode, update_mmsys_clk_mode);
+ func(ori_mmsys_clk_mode, update_mmsys_clk_mode);
+ return 1;
+ }
+ return 0;
+}
+
+static void notify_mmsys_clk_change(int ori_mmsys_clk_mode, int update_mmsys_clk_mode)
+{
+ int i = 0;
+
+ char msg[MMDVFS_CLK_SWITCH_CLIENT_MSG_MAX] = "";
+
+ for (i = 0; i < MMDVFS_CLK_SWITCH_CB_MAX; i++) {
+ snprintf(msg, MMDVFS_CLK_SWITCH_CLIENT_MSG_MAX, "id=%d", i);
+ if (quick_mmclk_cbs[i] != NULL)
+ mmsys_clk_change_notify_checked(quick_mmclk_cbs[i], ori_mmsys_clk_mode,
+ update_mmsys_clk_mode, msg);
+ }
+}
+
+
+void dump_mmdvfs_info(void)
+{
+ int i = 0;
+
+ MMDVFSMSG("MMDVFS dump: CMD(%d,%d,0x%x),INFO VR(%d,%d),CLK: %d\n",
+ g_mmdvfs_cmd.sensor_size, g_mmdvfs_cmd.sensor_fps, g_mmdvfs_cmd.camera_mode,
+ g_mmdvfs_info->video_record_size[0], g_mmdvfs_info->video_record_size[1],
+ current_mmsys_clk);
+
+ for (i = 0; i < MMDVFS_SCEN_COUNT; i++)
+ MMDVFSMSG("Secn:%d,vol-step:%d\n", i, g_mmdvfs_scenario_voltage[i]);
+
+}
diff --git a/drivers/misc/mediatek/smi/mt6735/Makefile b/drivers/misc/mediatek/smi/mt6735/Makefile
deleted file mode 100755
index 2f64b80f2..000000000
--- a/drivers/misc/mediatek/smi/mt6735/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-include $(srctree)/drivers/misc/mediatek/Makefile.custom
-
-obj-y += smi_debug.o
-obj-y += mmdvfs_mgr.o
-
-ifeq ($(CONFIG_ARCH_MT6735),y)
-obj-y += smi_common_d1.o
-ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/$(MTK_PLATFORM)/mt6735/
-ccflags-y += -DD1
-endif
-
-ifeq ($(CONFIG_ARCH_MT6735M),y)
-obj-y += smi_common_d2.o
-ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/$(MTK_PLATFORM)/mt6735m/
-ccflags-y += -DD2
-endif
-
-ifeq ($(CONFIG_ARCH_MT6753),y)
-obj-y += smi_common_d3.o
-ccflags-y += -I$(srctree)/drivers/misc/mediatek/cmdq/$(MTK_PLATFORM)/mt6753/
-ccflags-y += -DD3
-endif \ No newline at end of file
diff --git a/drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.c b/drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.c
deleted file mode 100644
index e4ba0e5d3..000000000
--- a/drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.c
+++ /dev/null
@@ -1,410 +0,0 @@
-#include <linux/uaccess.h>
-#include <linux/aee.h>
-#include <linux/xlog.h>
-#include <mach/mt_smi.h>
-#include <mach/mt_vcore_dvfs.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-
-#include <linux/mtk_gpu_utility.h>
-
-#include "mmdvfs_mgr.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) "[" MMDVFS_LOG_TAG "]" fmt
-
-#if ((defined(D1) || defined(D2) || defined(D3)) && !IS_ENABLED(CONFIG_FPGA_EARLY_PORTING))
- #define MMDVFS_ENABLE 1
-#endif
-
-#if defined(D3)
-#define MMDVFS_ENABLE_FLIPER_CONTROL 1
-#else
-#define MMDVFS_ENABLE_FLIPER_CONTROL 0
-#endif
-
-#if MMDVFS_ENABLE_FLIPER_CONTROL
-#include <mach/fliper.h>
-#endif
-
-/* mmdvfs MM sizes */
-#define MMDVFS_PIXEL_NUM_720P (1280 * 720)
-#define MMDVFS_PIXEL_NUM_2160P (3840 * 2160)
-#define MMDVFS_PIXEL_NUM_1080P (2100 * 1300)
-#define MMDVFS_PIXEL_NUM_2M (2100 * 1300)
-/* 13M sensor */
-#define MMDVFS_PIXEL_NUM_SENSOR_FULL (13000000)
-#define MMDVFS_PIXEL_NUM_SENSOR_6M ( 5800000)
-#define MMDVFS_PIXEL_NUM_SENSOR_8M ( 7800000)
-
-/* mmdvfs display sizes */
-#define MMDVFS_DISPLAY_SIZE_HD (1280 * 832)
-#define MMDVFS_DISPLAY_SIZE_FHD (1920 * 1216)
-
-/* screen size */
-extern unsigned int DISP_GetScreenWidth(void);
-extern unsigned int DISP_GetScreenHeight(void);
-
-#define MMDVFS_CAM_MON_SCEN SMI_BWC_SCEN_CNT
-#define MMDVFS_SCEN_COUNT (SMI_BWC_SCEN_CNT + 1)
-
-/* + 1 for MMDVFS_CAM_MON_SCEN */
-static mmdvfs_voltage_enum g_mmdvfs_scenario_voltage[MMDVFS_SCEN_COUNT] = {MMDVFS_VOLTAGE_DEFAULT};
-static mmdvfs_voltage_enum g_mmdvfs_current_step;
-static MTK_SMI_BWC_MM_INFO *g_mmdvfs_info;
-static MTK_MMDVFS_CMD g_mmdvfs_cmd;
-
-typedef struct
-{
- spinlock_t scen_lock;
- int is_mhl_enable;
-} mmdvfs_context_struct;
-
-/* mmdvfs_query() return value, remember to sync with user space */
-typedef enum
-{
- MMDVFS_STEP_LOW = 0,
- MMDVFS_STEP_HIGH,
-
- MMDVFS_STEP_LOW2LOW, /* LOW */
- MMDVFS_STEP_HIGH2LOW, /* LOW */
- MMDVFS_STEP_LOW2HIGH, /* HIGH */
- MMDVFS_STEP_HIGH2HIGH, /* HIGH */
-} mmdvfs_step_enum;
-
-/* lcd size */
-typedef enum
-{
- MMDVFS_LCD_SIZE_HD,
- MMDVFS_LCD_SIZE_FHD,
- MMDVFS_LCD_SIZE_WQHD,
- MMDVFS_LCD_SIZE_END_OF_ENUM
-} mmdvfs_lcd_size_enum;
-
-static mmdvfs_context_struct g_mmdvfs_mgr_cntx;
-static mmdvfs_context_struct * const g_mmdvfs_mgr = &g_mmdvfs_mgr_cntx;
-
-static mmdvfs_lcd_size_enum mmdvfs_get_lcd_resolution(void)
-{
- if (DISP_GetScreenWidth() * DISP_GetScreenHeight() <= MMDVFS_DISPLAY_SIZE_HD) {
- return MMDVFS_LCD_SIZE_HD;
- }
-
- return MMDVFS_LCD_SIZE_FHD;
-}
-
-static mmdvfs_voltage_enum mmdvfs_get_default_step(void)
-{
-#if defined(D3)
- return MMDVFS_VOLTAGE_LOW;
-#else /* defined(D3) */
- if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_HD) {
- return MMDVFS_VOLTAGE_LOW;
- }
-
- return MMDVFS_VOLTAGE_HIGH;
-#endif /* defined(D3) */
-}
-
-static mmdvfs_voltage_enum mmdvfs_get_current_step(void)
-{
- return g_mmdvfs_current_step;
-}
-
-static mmdvfs_voltage_enum mmdvfs_query(MTK_SMI_BWC_SCEN scenario, MTK_MMDVFS_CMD *cmd)
-{
- mmdvfs_voltage_enum step = mmdvfs_get_default_step();
- unsigned int venc_size;
- MTK_MMDVFS_CMD cmd_default;
-
- venc_size = g_mmdvfs_info->video_record_size[0] * g_mmdvfs_info->video_record_size[1];
-
- /* use default info */
- if (cmd == NULL) {
- memset(&cmd_default, 0, sizeof(MTK_MMDVFS_CMD));
- cmd_default.camera_mode = MMDVFS_CAMERA_MODE_FLAG_DEFAULT;
- cmd = &cmd_default;
- }
-
- /* collect the final information */
- if (cmd->sensor_size == 0) {
- cmd->sensor_size = g_mmdvfs_cmd.sensor_size;
- }
-
- if (cmd->sensor_fps == 0) {
- cmd->sensor_fps = g_mmdvfs_cmd.sensor_fps;
- }
-
- if (cmd->camera_mode == MMDVFS_CAMERA_MODE_FLAG_DEFAULT) {
- cmd->camera_mode = g_mmdvfs_cmd.camera_mode;
- }
-
- /* HIGH level scenarios */
- switch (scenario) {
-#if defined(D2) /* D2 ISP >= 6M HIGH */
- case SMI_BWC_SCEN_VR_SLOW:
- case SMI_BWC_SCEN_VR:
- if (cmd->sensor_size >= MMDVFS_PIXEL_NUM_SENSOR_6M) {
- step = MMDVFS_VOLTAGE_HIGH;
- }
- break;
-#endif
- /* force HPM for engineering mode */
- case SMI_BWC_SCEN_FORCE_MMDVFS:
- step = MMDVFS_VOLTAGE_HIGH;
- break;
- default:
- break;
- }
-
- return step;
-}
-
-static void mmdvfs_update_cmd(MTK_MMDVFS_CMD *cmd)
-{
- if (cmd == NULL) {
- return;
- }
-
- if (cmd->sensor_size) {
- g_mmdvfs_cmd.sensor_size = cmd->sensor_size;
- }
-
- if (cmd->sensor_fps) {
- g_mmdvfs_cmd.sensor_fps = cmd->sensor_fps;
- }
-
- MMDVFSMSG("update cm %d %d\n", cmd->camera_mode, cmd->sensor_size);
- g_mmdvfs_cmd.camera_mode = cmd->camera_mode;
-}
-
-static void mmdvfs_dump_info(void)
-{
- MMDVFSMSG("CMD %d %d %d\n", g_mmdvfs_cmd.sensor_size, g_mmdvfs_cmd.sensor_fps, g_mmdvfs_cmd.camera_mode);
- MMDVFSMSG("INFO VR %d %d\n", g_mmdvfs_info->video_record_size[0], g_mmdvfs_info->video_record_size[1]);
-}
-
-/* delay 4 seconds to go LPM to workaround camera ZSD + PIP issue */
-static void mmdvfs_cam_work_handler(struct work_struct *work)
-{
- MMDVFSMSG("CAM handler %d\n", jiffies_to_msecs(jiffies));
- mmdvfs_set_step(MMDVFS_CAM_MON_SCEN, mmdvfs_get_default_step());
-}
-
-#if !defined(D3)
-
-static DECLARE_DELAYED_WORK(g_mmdvfs_cam_work, mmdvfs_cam_work_handler);
-
-static void mmdvfs_stop_cam_monitor(void)
-{
- cancel_delayed_work_sync(&g_mmdvfs_cam_work);
-}
-
-#define MMDVFS_CAM_MON_DELAY (4 * HZ)
-static void mmdvfs_start_cam_monitor(void)
-{
- mmdvfs_stop_cam_monitor();
- MMDVFSMSG("CAM start %d\n", jiffies_to_msecs(jiffies));
- mmdvfs_set_step(MMDVFS_CAM_MON_SCEN, MMDVFS_VOLTAGE_HIGH);
- /* 4 seconds for PIP switch preview aspect delays... */
- schedule_delayed_work(&g_mmdvfs_cam_work, MMDVFS_CAM_MON_DELAY);
-}
-
-#endif /* !defined(D3) */
-
-int mmdvfs_set_step(MTK_SMI_BWC_SCEN scenario, mmdvfs_voltage_enum step)
-{
- int i, scen_index;
- mmdvfs_voltage_enum final_step = mmdvfs_get_default_step();
-
-#if !MMDVFS_ENABLE
- return 0;
-#endif
-
-#if defined(D1)
- /* D1 FHD always HPM. do not have to trigger vcore dvfs. */
- if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD) {
- return 0;
- }
-#endif
-
- MMDVFSMSG("MMDVFS set voltage scen %d step %d\n", scenario, step);
-
- if ((scenario >= MMDVFS_SCEN_COUNT) || (scenario < SMI_BWC_SCEN_NORMAL))
- {
- MMDVFSERR("invalid scenario\n");
- return -1;
- }
-
- /* dump information */
- mmdvfs_dump_info();
-
- /* go through all scenarios to decide the final step */
- scen_index = (int)scenario;
-
- spin_lock(&g_mmdvfs_mgr->scen_lock);
-
- g_mmdvfs_scenario_voltage[scen_index] = step;
-
- /* one high = final high */
- for (i = 0; i < MMDVFS_SCEN_COUNT; i++) {
- if (g_mmdvfs_scenario_voltage[i] == MMDVFS_VOLTAGE_HIGH) {
- final_step = MMDVFS_VOLTAGE_HIGH;
- break;
- }
- }
-
- g_mmdvfs_current_step = final_step;
-
- spin_unlock(&g_mmdvfs_mgr->scen_lock);
-
- MMDVFSMSG("MMDVFS set voltage scen %d step %d final %d\n", scenario, step, final_step);
-
-#if MMDVFS_ENABLE
- /* call vcore dvfs API */
- if (final_step == MMDVFS_VOLTAGE_HIGH) {
- vcorefs_request_dvfs_opp(KIR_MM, OPPI_PERF);
- } else {
- vcorefs_request_dvfs_opp(KIR_MM, OPPI_UNREQ);
- }
-#endif
-
- return 0;
-}
-
-void mmdvfs_handle_cmd(MTK_MMDVFS_CMD *cmd)
-{
-#if !MMDVFS_ENABLE
- return;
-#endif
-
- MMDVFSMSG("MMDVFS cmd %u %d\n", cmd->type, cmd->scen);
-
- switch (cmd->type) {
- case MTK_MMDVFS_CMD_TYPE_SET:
- /* save cmd */
- mmdvfs_update_cmd(cmd);
- cmd->ret = mmdvfs_set_step(cmd->scen, mmdvfs_query(cmd->scen, cmd));
- break;
-
- case MTK_MMDVFS_CMD_TYPE_QUERY:
- { /* query with some parameters */
- if (mmdvfs_get_lcd_resolution() == MMDVFS_LCD_SIZE_FHD) {
- /* QUERY ALWAYS HIGH for FHD */
- cmd->ret = (unsigned int)MMDVFS_STEP_HIGH2HIGH;
- } else { /* FHD */
- mmdvfs_voltage_enum query_voltage = mmdvfs_query(cmd->scen, cmd);
- mmdvfs_voltage_enum current_voltage = mmdvfs_get_current_step();
-
- if (current_voltage < query_voltage) {
- cmd->ret = (unsigned int)MMDVFS_STEP_LOW2HIGH;
- } else if (current_voltage > query_voltage) {
- cmd->ret = (unsigned int)MMDVFS_STEP_HIGH2LOW;
- } else {
- cmd->ret = (unsigned int)(query_voltage == MMDVFS_VOLTAGE_HIGH ? MMDVFS_STEP_HIGH2HIGH : MMDVFS_STEP_LOW2LOW);
- }
- }
-
- MMDVFSMSG("query %d\n", cmd->ret);
- /* cmd->ret = (unsigned int)query_voltage; */
- break;
- }
-
- default:
- MMDVFSMSG("invalid mmdvfs cmd\n");
- BUG();
- break;
- }
-}
-
-void mmdvfs_notify_scenario_exit(MTK_SMI_BWC_SCEN scen)
-{
-#if !MMDVFS_ENABLE
- return;
-#endif
-
- MMDVFSMSG("leave %d\n", scen);
-
-#if !defined(D3) /* denali-3 does not need this workaround because the MMCLK is always the highest */
- /*
- * keep HPM for 4 seconds after exiting camera scenarios to get rid of
- * cam framework will let us go to normal scenario for a short time (ex: STOP PREVIEW --> NORMAL --> START PREVIEW)
- * where the LPM mode (low MMCLK) may cause ISP failures
- */
- if ((scen == SMI_BWC_SCEN_VR) || (scen == SMI_BWC_SCEN_VR_SLOW) || (scen == SMI_BWC_SCEN_ICFP)) {
- mmdvfs_start_cam_monitor();
- }
-#endif /* !defined(D3) */
-
- /* reset scenario voltage to default when it exits */
- mmdvfs_set_step(scen, mmdvfs_get_default_step());
-}
-
-void mmdvfs_notify_scenario_enter(MTK_SMI_BWC_SCEN scen)
-{
-#if !MMDVFS_ENABLE
- return;
-#endif
-
- MMDVFSMSG("enter %d\n", scen);
-
- /* ISP ON = high */
- switch (scen) {
-#if defined(D2) /* d2 sensor > 6M */
- case SMI_BWC_SCEN_VR:
- mmdvfs_set_step(scen, mmdvfs_query(scen, NULL));
- break;
- case SMI_BWC_SCEN_VR_SLOW:
-#elif defined(D1) /* default VR high */
- case SMI_BWC_SCEN_VR:
- case SMI_BWC_SCEN_VR_SLOW:
-#else /* D3 */
- case SMI_BWC_SCEN_WFD:
- case SMI_BWC_SCEN_VSS:
-#endif
- case SMI_BWC_SCEN_ICFP:
- case SMI_BWC_SCEN_FORCE_MMDVFS:
- mmdvfs_set_step(scen, MMDVFS_VOLTAGE_HIGH);
- break;
-
- default:
- break;
- }
-}
-
-void mmdvfs_init(MTK_SMI_BWC_MM_INFO *info)
-{
-#if !MMDVFS_ENABLE
- return;
-#endif
-
- spin_lock_init(&g_mmdvfs_mgr->scen_lock);
- /* set current step as the default step */
- g_mmdvfs_current_step = mmdvfs_get_default_step();
-
- g_mmdvfs_info = info;
-}
-
-void mmdvfs_mhl_enable(int enable)
-{
- g_mmdvfs_mgr->is_mhl_enable = enable;
-}
-
-void mmdvfs_notify_scenario_concurrency(unsigned int u4Concurrency)
-{
- /* raise EMI monitor BW threshold in VP, VR, VR SLOW motion cases to make sure vcore stay MMDVFS level as long as possible */
- if (u4Concurrency & ((1 << SMI_BWC_SCEN_VP) | (1 << SMI_BWC_SCEN_VR) | (1 << SMI_BWC_SCEN_VR_SLOW))) {
- #if MMDVFS_ENABLE_FLIPER_CONTROL
- MMDVFSMSG("fliper high\n");
- fliper_set_bw(BW_THRESHOLD_HIGH);
- #endif
- } else {
- #if MMDVFS_ENABLE_FLIPER_CONTROL
- MMDVFSMSG("fliper normal\n");
- fliper_restore_bw();
- #endif
- }
-}
-
-
diff --git a/drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.h b/drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.h
deleted file mode 100644
index a25e4b61e..000000000
--- a/drivers/misc/mediatek/smi/mt6735/mmdvfs_mgr.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __MMDVFS_MGR_H__
-#define __MMDVFS_MGR_H__
-
-#include <linux/aee.h>
-
-#define MMDVFS_LOG_TAG "MMDVFS"
-
-#define MMDVFSMSG(string, args...) if(1){\
- pr_warn("[pid=%d]"string, current->tgid, ##args); \
- }
-#define MMDVFSMSG2(string, args...) pr_warn(string, ##args)
-#define MMDVFSTMP(string, args...) pr_warn("[pid=%d]"string, current->tgid, ##args)
-#define MMDVFSERR(string, args...) do{\
- pr_err("error: "string, ##args); \
- aee_kernel_warning(MMDVFS_LOG_TAG, "error: "string, ##args); \
-} while(0)
-
-
-/* MMDVFS extern APIs */
-extern void mmdvfs_init(MTK_SMI_BWC_MM_INFO *info);
-extern void mmdvfs_handle_cmd(MTK_MMDVFS_CMD *cmd);
-extern void mmdvfs_notify_scenario_enter(MTK_SMI_BWC_SCEN scen);
-extern void mmdvfs_notify_scenario_exit(MTK_SMI_BWC_SCEN scen);
-extern void mmdvfs_notify_scenario_concurrency(unsigned int u4Concurrency);
-
-#endif /* __MMDVFS_MGR_H__ */
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_common.h b/drivers/misc/mediatek/smi/mt6735/smi_common.h
deleted file mode 100644
index 7422905ff..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_common.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef __SMI_COMMON_H__
-#define __SMI_COMMON_H__
-
-#include <linux/aee.h>
-#ifdef CONFIG_MTK_CMDQ
-#include "cmdq_core.h"
-#endif
-
-#define SMIMSG(string, args...) if(1){\
- pr_warn("[pid=%d]"string, current->tgid, ##args); \
- }
-#define SMIMSG2(string, args...) pr_warn(string, ##args)
-
-#ifdef CONFIG_MTK_CMDQ
-#define SMIMSG3(onoff, string, args...) if(onoff == 1){\
-cmdq_core_save_first_dump(string, ##args);\
-}\
-SMIMSG(string, ##args)
-#else
-#define SMIMSG3(string, args...) SMIMSG(string, ##args)
-#endif
-
-
-#define SMITMP(string, args...) pr_warn("[pid=%d]"string, current->tgid, ##args)
-#define SMIERR(string, args...) do{\
- pr_err("error: "string, ##args); \
- aee_kernel_warning(SMI_LOG_TAG, "error: "string, ##args); \
-}while(0)
-
-#define smi_aee_print(string, args...) do{\
- char smi_name[100];\
- snprintf(smi_name,100, "["SMI_LOG_TAG"]"string, ##args); \
- aee_kernel_warning(smi_name, "["SMI_LOG_TAG"]error:"string,##args); \
-}while(0)
-
-
-// Please use the function to instead gLarbBaseAddr to prevent the NULL pointer access error
-// when the corrosponding larb is not exist
-// extern unsigned int gLarbBaseAddr[SMI_LARB_NR];
-extern unsigned long get_larb_base_addr(int larb_id);
-extern char *smi_port_name[][21];
-
-extern void smi_dumpDebugMsg(void);
-
-#define SMI_CLIENT_DISP 0
-#define SMI_CLIENT_WFD 1
-
-#define SMI_EVENT_DIRECT_LINK ( 0x1 << 0 )
-#define SMI_EVENT_DECOUPLE ( 0x1 << 1 )
-#define SMI_EVENT_OVL_CASCADE ( 0x1 << 2 )
-#define SMI_EVENT_OVL1_EXTERNAL ( 0x1 << 3 )
-
-
-extern void smi_client_status_change_notify(int module, int mode);
-// module:
-// 0: DISP
-// 1: WFD
-// mode:
-// DISP:
-// SMI_EVENT_DIRECT_LINK - directlink mode
-// SMI_EVENT_DECOUPLE - decouple mode
-// SMI_EVENT_OVL_CASCADE - OVL cascade
-// SMI_EVENT_OVL1_EXTERNAL - OVL 1 for external display
-
-extern void SMI_DBG_Init(void);
-
-
-#endif
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_common_d1.c b/drivers/misc/mediatek/smi/mt6735/smi_common_d1.c
deleted file mode 100644
index 0cfe5570f..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_common_d1.c
+++ /dev/null
@@ -1,2112 +0,0 @@
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/kobject.h>
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/aee.h>
-#include <linux/xlog.h>
-
-// We can't remove mt_clkmgr.h now since SMI needs larb monitor APIs
-#include <mach/mt_clkmgr.h>
-
-// Define SMI_INTERNAL_CCF_SUPPORT when CCF needs to be enabled
-#if !defined(CONFIG_MTK_LEGACY)
- #define SMI_INTERNAL_CCF_SUPPORT
-#endif
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
-#include <linux/clk.h>
-#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
-
-#include <asm/io.h>
-
-#include <linux/ioctl.h>
-#include <linux/fs.h>
-
-#if IS_ENABLED(CONFIG_COMPAT)
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#endif
-
-#include <mach/mt_smi.h>
-
-
-#include "smi_reg_d1.h"
-#include "smi_common.h"
-#include "smi_debug.h"
-
-#include "mmdvfs_mgr.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) "[SMI]" fmt
-
-#define SMI_LOG_TAG "SMI"
-
-#define SMI_DT_SUPPORT
-
-#define LARB_BACKUP_REG_SIZE 128
-#define SMI_COMMON_BACKUP_REG_NUM 8
-
-#define SF_HWC_PIXEL_MAX_NORMAL (1920 * 1080 * 7)
-#define SF_HWC_PIXEL_MAX_VR (1920 * 1080 * 4 + 1036800) // 4.5 FHD size
-#define SF_HWC_PIXEL_MAX_VP (1920 * 1080 * 7)
-#define SF_HWC_PIXEL_MAX_ALWAYS_GPU (1920 * 1080 * 1)
-
-#define SMIDBG(level, x...) \
- do{ \
- if (smi_debug_level >= (level)) \
- SMIMSG(x); \
- } while (0)
-
-typedef struct {
- spinlock_t SMI_lock;
- unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT]; //one bit represent one module
-} SMI_struct;
-
-static SMI_struct g_SMIInfo;
-
-/* LARB BASE ADDRESS */
-static unsigned long gLarbBaseAddr[SMI_LARB_NR] = { 0, 0, 0, 0};
-
-// DT porting
-unsigned long smi_reg_base_common_ext = 0;
-unsigned long smi_reg_base_barb0 = 0;
-unsigned long smi_reg_base_barb1 = 0;
-unsigned long smi_reg_base_barb2 = 0;
-unsigned long smi_reg_base_barb3 = 0;
-
-#define SMI_REG_REGION_MAX 5
-#define SMI_COMMON_REG_INDX 0
-#define SMI_LARB0_REG_INDX 1
-#define SMI_LARB1_REG_INDX 2
-#define SMI_LARB2_REG_INDX 3
-#define SMI_LARB3_REG_INDX 4
-
-static unsigned long gSMIBaseAddrs[SMI_REG_REGION_MAX];
-void register_base_dump( void );
-
-//#ifdef SMI_DT_SUPPORT
-char* smi_get_region_name( unsigned int region_indx );
-//#endif //SMI_DT_SUPPORT
-
-struct smi_device{
- struct device *dev;
- void __iomem *regs[SMI_REG_REGION_MAX];
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
- struct clk *smi_common_clk;
- struct clk *smi_larb0_clk;
- struct clk *img_larb2_clk;
- struct clk *vdec0_vdec_clk;
- struct clk *vdec1_larb_clk;
- struct clk *venc_larb_clk;
-#endif
-};
-static struct smi_device *smi_dev = NULL;
-
-static struct device* smiDeviceUevent = NULL;
-
-static struct cdev * pSmiDev = NULL;
-
-static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
- SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM};
-
-static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
-static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
-static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
-static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
-
-/* SMI COMMON register list to be backuped */
-static unsigned short
-g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = { 0x100, 0x104,
- 0x108, 0x10c, 0x110, 0x230, 0x234, 0x238 };
-static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
-
-static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 0, 1 };
-
-static unsigned short int * larb_port_backup[SMI_LARB_NR] = {
- larb0_port_backup, larb1_port_backup, larb2_port_backup, larb3_port_backup };
-
-// To keep the HW's init value
-static int is_default_value_saved = 0;
-static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
-
-static unsigned int wifi_disp_transaction = 0;
-
-/* debug level */
-static unsigned int smi_debug_level = 0;
-
-/* tuning mode, 1 for register ioctl */
-static unsigned int smi_tuning_mode = 0;
-
-static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
-
-static unsigned int* pLarbRegBackUp[SMI_LARB_NR];
-static int g_bInited = 0;
-
-static MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = { 0, 0, { 0, 0 }, { 0, 0 }, { 0,
- 0 }, { 0, 0 }, 0, 0, 0, SF_HWC_PIXEL_MAX_NORMAL };
-
-char *smi_port_name[][21] = { { /* 0 MMSYS */
- "disp_ovl0", "disp_rdma0", "disp_rdma1", "disp_wdma0", "disp_ovl1",
- "disp_rdma2", "disp_wdma1", "disp_od_r", "disp_od_w", "mdp_rdma0",
- "mdp_rdma1", "mdp_wdma", "mdp_wrot0", "mdp_wrot1" }, { /* 1 VDEC */
- "hw_vdec_mc_ext", "hw_vdec_pp_ext", "hw_vdec_ufo_ext", "hw_vdec_vld_ext",
- "hw_vdec_vld2_ext", "hw_vdec_avc_mv_ext", "hw_vdec_pred_rd_ext",
- "hw_vdec_pred_wr_ext", "hw_vdec_ppwrap_ext" }, { /* 2 ISP */
- "imgo", "rrzo", "aao", "lcso", "esfko", "imgo_d", "lsci", "lsci_d", "bpci",
- "bpci_d", "ufdi", "imgi", "img2o", "img3o", "vipi", "vip2i", "vip3i",
- "lcei", "rb", "rp", "wr" }, { /* 3 VENC */
- "venc_rcpu", "venc_rec", "venc_bsdma", "venc_sv_comv", "venc_rd_comv",
- "jpgenc_bsdma", "remdc_sdma", "remdc_bsdma", "jpgenc_rdma", "jpgenc_sdma",
- "jpgdec_wdma", "jpgdec_bsdma", "venc_cur_luma", "venc_cur_chroma",
- "venc_ref_luma", "venc_ref_chroma", "remdc_wdma", "venc_nbm_rdma",
- "venc_nbm_wdma" }, { /* 4 MJC */
- "mjc_mv_rd", "mjc_mv_wr", "mjc_dma_rd", "mjc_dma_wr" } };
-
-static unsigned long smi_reg_pa_base[SMI_REG_REGION_MAX] = { 0x14016000,
- 0x14015000, 0x16010000, 0x15001000, 0x17001000 };
-
-static void initSetting( void );
-static void vpSetting( void );
-static void vrSetting( void );
-static void icfpSetting( void );
-static void vpWfdSetting( void );
-
-static void smi_dumpLarb( unsigned int index );
-static void smi_dumpCommon( void );
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
-static struct clk *get_smi_clk(char *smi_clk_name);
-#endif
-
-extern void smi_dumpDebugMsg( void );
-#if IS_ENABLED(CONFIG_COMPAT)
- long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
- #define MTK_SMI_COMPAT_ioctl NULL
-#endif
-
-// for slow motion force 30 fps
-extern int primary_display_force_set_vsync_fps( unsigned int fps );
-extern unsigned int primary_display_get_fps( void );
-
-// Use this function to get base address of Larb resgister
-// to support error checking
-unsigned long get_larb_base_addr( int larb_id ){
- unsigned long ret = 0;
- if( larb_id > SMI_LARB_NR || larb_id < 0 ){
- ret = SMI_ERROR_ADDR;
- }else{
- ret = gLarbBaseAddr[larb_id];
- }
- return ret;
-}
-
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
-static struct clk *get_smi_clk(char *smi_clk_name){
-
- struct clk *smi_clk_ptr = NULL;
- smi_clk_ptr = devm_clk_get(smi_dev->dev, smi_clk_name);
- if (IS_ERR(smi_clk_ptr)) {
- SMIMSG("cannot get %s\n", smi_clk_name);
- smi_clk_ptr = NULL;
- }
- return smi_clk_ptr;
-}
-
-static void smi_enable_clk(struct clk *smi_clk, char * name){
- if(smi_clk!= NULL){
- int ret = 0;
- ret = clk_prepare_enable(smi_clk);
- if(ret){
- SMIMSG("clk_prepare_enable return error %d, %s\n", ret, name);
- }
- }else{
- SMIMSG("clk_prepare_enable error, smi_clk can't be NULL, %s\n", name);
- }
-}
-
-static void smi_disable_clk(struct clk *smi_clk, char * name){
- if(smi_clk!= NULL){
- clk_disable_unprepare(smi_clk);
- }else{
- SMIMSG("smi_disable_clk error, smi_clk can't be NULL, %s\n", name);
- }
-}
-#endif
-
-static int larb_clock_on( int larb_id ){
-
-#if !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- char name[30];
- sprintf(name, "smi+%d", larb_id);
-
- switch( larb_id ){
-#if !defined(SMI_INTERNAL_CCF_SUPPORT)
- case 0:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_DISP0_SMI_LARB0, name);
- break;
- case 1:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_VDEC1_LARB, name);
- break;
- case 2:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_IMAGE_LARB2_SMI, name);
- break;
- case 3:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_VENC_LARB, name);
- break;
- //case 4:
- // enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- // enable_clock(MT_CG_MJC_SMI_LARB, name);
- // break;
-#else
- case 0:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->smi_larb0_clk, name);
- break;
- case 1:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->vdec1_larb_clk, name);
- break;
- case 2:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->img_larb2_clk, name);
- break;
- case 3:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->venc_larb_clk, name);
- break;
-#endif
- default:
- break;
- }
-#endif /* CONFIG_MTK_FPGA */
-
- return 0;
-}
-
-static int larb_clock_off( int larb_id ){
-
-#ifndef CONFIG_MTK_FPGA
- char name[30];
- sprintf(name, "smi+%d", larb_id);
-
- switch( larb_id ){
-#if !defined(SMI_INTERNAL_CCF_SUPPORT)
- case 0:
- disable_clock(MT_CG_DISP0_SMI_LARB0, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 1:
- disable_clock(MT_CG_VDEC1_LARB, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 2:
- disable_clock(MT_CG_IMAGE_LARB2_SMI, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 3:
- disable_clock(MT_CG_VENC_LARB, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- //case 4:
- // disable_clock(MT_CG_MJC_SMI_LARB, name);
- // disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- // break;
-#else
- case 0:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->smi_larb0_clk, name);
- break;
- case 1:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->vdec1_larb_clk, name);
- break;
- case 2:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->img_larb2_clk, name);
- break;
- case 3:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->venc_larb_clk, name);
- break;
-#endif
- default:
- break;
- }
-#endif /* CONFIG_MTK_FPGA */
-
- return 0;
-}
-
-static void backup_smi_common( void ){
- int i;
-
- for( i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++ ){
- g_smi_common_backup[i] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- (unsigned long) g_smi_common_backup_reg_offset[i]);
- }
-}
-
-static void restore_smi_common( void ){
- int i;
-
- for( i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++ ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE,
- (unsigned long) g_smi_common_backup_reg_offset[i],
- g_smi_common_backup[i]);
- }
-}
-
-static void backup_larb_smi( int index ){
- int port_index = 0;
- unsigned short int *backup_ptr = NULL;
- unsigned long larb_base = gLarbBaseAddr[index];
- unsigned long larb_offset = 0x200;
- int total_port_num = 0;
-
- // boundary check for larb_port_num and larb_port_backup access
- if( index < 0 || index >= SMI_LARB_NR ){
- return;
- }
-
- total_port_num = larb_port_num[index];
- backup_ptr = larb_port_backup[index];
-
- // boundary check for port value access
- if( total_port_num <= 0 || backup_ptr == NULL ){
- return;
- }
-
- for( port_index = 0; port_index < total_port_num; port_index++ ){
- *backup_ptr = (unsigned short int) (M4U_ReadReg32(larb_base,
- larb_offset));
- backup_ptr++;
- larb_offset += 4;
- }
-
- /* backup smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
- if( index == 0 ){
- backup_smi_common();
- }
-
- return;
-}
-
-static void restore_larb_smi( int index ){
- int port_index = 0;
- unsigned short int *backup_ptr = NULL;
- unsigned long larb_base = gLarbBaseAddr[index];
- unsigned long larb_offset = 0x200;
- unsigned int backup_value = 0;
- int total_port_num = 0;
-
- // boundary check for larb_port_num and larb_port_backup access
- if( index < 0 || index >= SMI_LARB_NR ){
- return;
- }
- total_port_num = larb_port_num[index];
- backup_ptr = larb_port_backup[index];
-
- // boundary check for port value access
- if( total_port_num <= 0 || backup_ptr == NULL ){
- return;
- }
-
- /* restore smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
- if( index == 0 ){
- restore_smi_common();
- }
-
- for( port_index = 0; port_index < total_port_num; port_index++ ){
- backup_value = *backup_ptr;
- M4U_WriteReg32(larb_base, larb_offset, backup_value);
- backup_ptr++;
- larb_offset += 4;
- }
-
- /* we do not backup 0x20 because it is a fixed setting */
- M4U_WriteReg32(larb_base, 0x20, larb_vc_setting[index]);
-
- /* turn off EMI empty OSTD dobule, fixed setting */
- M4U_WriteReg32(larb_base, 0x2c, 4);
-
- return;
-}
-
-static int larb_reg_backup( int larb ){
- unsigned int* pReg = pLarbRegBackUp[larb];
- unsigned long larb_base = gLarbBaseAddr[larb];
-
- *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
-
- // *(pReg++) = M4U_ReadReg32(larb_base, SMI_SHARE_EN);
- // *(pReg++) = M4U_ReadReg32(larb_base, SMI_ROUTE_SEL);
-
- backup_larb_smi(larb);
-
- if( 0 == larb ){
- g_bInited = 0;
- }
-
- return 0;
-}
-
-static int smi_larb_init( unsigned int larb ){
- unsigned int regval = 0;
- unsigned int regval1 = 0;
- unsigned int regval2 = 0;
- unsigned long larb_base = get_larb_base_addr(larb);
-
- // Clock manager enable LARB clock before call back restore already, it will be disabled after restore call back returns
- // Got to enable OSTD before engine starts
- regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
-
- // TODO: FIX ME
- // regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0);
- // regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1);
-
- if( 0 == regval ){
- SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
- M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
- }else{
- SMIMSG(
- "Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n",
- larb_base, regval, regval1, regval2);
- smi_dumpDebugMsg();
- if( smi_debug_level >= 1 ){
- SMIERR(
- "DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
- larb_base, regval, regval1, regval2);
- }else{
- dump_stack();
- }
- }
-
- restore_larb_smi(larb);
-
- return 0;
-}
-
-int larb_reg_restore( int larb ){
- unsigned long larb_base = SMI_ERROR_ADDR;
- unsigned int regval = 0;
- unsigned int* pReg = NULL;
-
- larb_base = get_larb_base_addr(larb);
-
- // The larb assign doesn't exist
- if( larb_base == SMI_ERROR_ADDR ){
- SMIMSG("Can't find the base address for Larb%d\n", larb);
- return 0;
- }
-
- pReg = pLarbRegBackUp[larb];
-
- SMIDBG(1, "+larb_reg_restore(), larb_idx=%d \n", larb);
- SMIDBG(1, "m4u part restore, larb_idx=%d \n", larb);
- //warning: larb_con is controlled by set/clr
- regval = *(pReg++);
- M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
- M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
-
- //M4U_WriteReg32(larb_base, SMI_SHARE_EN, *(pReg++) );
- //M4U_WriteReg32(larb_base, SMI_ROUTE_SEL, *(pReg++) );
-
- smi_larb_init(larb);
-
- return 0;
-}
-
-// callback after larb clock is enabled
-void on_larb_power_on( struct larb_monitor *h, int larb_idx ){
- //M4ULOG("on_larb_power_on(), larb_idx=%d \n", larb_idx);
- larb_reg_restore(larb_idx);
-
- return;
-}
-// callback before larb clock is disabled
-void on_larb_power_off( struct larb_monitor *h, int larb_idx ){
- //M4ULOG("on_larb_power_off(), larb_idx=%d \n", larb_idx);
- larb_reg_backup(larb_idx);
-}
-
-static void restSetting( void ){
- //initialize OSTD to 1
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x1); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_rdma1
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wrot
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0x1); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0x1); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); //hw_vdec_pred_rd_ext
- M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); //hw_vdec_pred_wr_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
- M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); //hw_vdec_ppwrap_ext
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x1); //imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x1); //rrzo
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //aao
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //lcso
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //esfko
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); //imgo_s
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //lsci
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //lsci_d
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //bpci
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //bpci_d
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); //ufdi
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //imgi
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //img2o
- M4U_WriteReg32(LARB2_BASE, 0x234, 0x1); //img3o
- M4U_WriteReg32(LARB2_BASE, 0x238, 0x1); //vipi
- M4U_WriteReg32(LARB2_BASE, 0x23c, 0x1); //vip2i
- M4U_WriteReg32(LARB2_BASE, 0x240, 0x1); //vip3i
- M4U_WriteReg32(LARB2_BASE, 0x244, 0x1); //lcei
- M4U_WriteReg32(LARB2_BASE, 0x248, 0x1); //rb
- M4U_WriteReg32(LARB2_BASE, 0x24c, 0x1); //rp
- M4U_WriteReg32(LARB2_BASE, 0x250, 0x1); //wr
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x214, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB3_BASE, 0x218, 0x1); //jpgenc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x21c, 0x1); //jpgdec_wdma
- M4U_WriteReg32(LARB3_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x1); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x1); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x1); //venc_ref_chroma
-}
-//Make sure clock is on
-static void initSetting( void ){
-
- /* save default larb regs */
- if( !is_default_value_saved ){
- SMIMSG("Save default config:\n");
- default_val_smi_l1arb[0] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB0);
- default_val_smi_l1arb[1] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB1);
- default_val_smi_l1arb[2] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB2);
- default_val_smi_l1arb[3] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB3);
- SMIMSG("l1arb[0-2]= 0x%x, 0x%x, 0x%x\n", default_val_smi_l1arb[0],
- default_val_smi_l1arb[1], default_val_smi_l1arb[2]);
- SMIMSG("l1arb[3]= 0x%x\n", default_val_smi_l1arb[3]);
-
- is_default_value_saved = 1;
- }
-
- // Keep the HW's init setting in REG_SMI_L1ARB0 ~ REG_SMI_L1ARB4
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0,
- default_val_smi_l1arb[0]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1,
- default_val_smi_l1arb[1]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2,
- default_val_smi_l1arb[2]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3,
- default_val_smi_l1arb[3]);
-
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x100, 0x1b);
- // 0x220 is controlled by M4U
- // M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x220, 0x1); //disp: emi0, other:emi1
- M4U_WriteReg32(
- SMI_COMMON_EXT_BASE,
- 0x234,
- (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
- + (0x4 << 10) + (0x4 << 5) + 0x5);
- // To be checked with DE
- //M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x238, (0x2 << 25) + (0x3 << 20) + (0x4 << 15) + (0x5 << 10) + (0x6 << 5) + 0x8);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x230, 0x1f + (0x8 << 4) + (0x7 << 9));
-
- // Set VC priority: MMSYS = ISP > VENC > VDEC = MJC
- M4U_WriteReg32(LARB0_BASE, 0x20, 0x0); // MMSYS
- M4U_WriteReg32(LARB1_BASE, 0x20, 0x2); // VDEC
- M4U_WriteReg32(LARB2_BASE, 0x20, 0x1); // ISP
- M4U_WriteReg32(LARB3_BASE, 0x20, 0x1); // VENC
- //M4U_WriteReg32(LARB4_BASE, 0x20, 0x2); // MJC
-
- // for ISP HRT
- M4U_WriteReg32(LARB2_BASE, 0x24,
- (M4U_ReadReg32(LARB2_BASE, 0x24) & 0xf7ffffff));
-
- // for UI
- restSetting();
-
- //SMI common BW limiter
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, default_val_smi_l1arb[0]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x1000);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10C, 0x1000);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x110, 0x1000);
- //M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x114, 0x1000);
-
- //LARB 0 DISP+MDP
- M4U_WriteReg32(LARB0_BASE, 0x200, 31); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x204, 4); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x208, 6); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 31); //disp_rdma1
- M4U_WriteReg32(LARB0_BASE, 0x210, 4); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); //mdp_wrot
-
-}
-
-static void icfpSetting( void ){
- vrSetting();
-}
-
-
-
-static void vrSetting( void ){
- //SMI BW limit
- // vss
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x11F1); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x1000); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x120A); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x11F3); //LARB3, VENC+JPG
- //M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB4, 0x1000); //LARB4, MJC
-
- //SMI LARB config
-
- restSetting();
-
- //LARB 0 DISP+MDP
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x8); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x2); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x4); //mdp_wrot
-
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x4); //rrzo
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //aao
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //esfko
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); //lsci
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //bpci
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x4); //imgi
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //img2o
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x2); //img3o
- M4U_WriteReg32(LARB2_BASE, 0x234, 0x2); //vipi
- M4U_WriteReg32(LARB2_BASE, 0x238, 0x1); //vip2i
- M4U_WriteReg32(LARB2_BASE, 0x23c, 0x1); //vip3i
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x214, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB3_BASE, 0x218, 0x1); //jpgenc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x2); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x3); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x2); //venc_ref_chroma
-}
-
-static void vpSetting( void ){
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x1262); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x11E9); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10C, 0x1000); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x110, 0x123D); //LARB3, VENC+JPG
- //M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x214, 0x1000); //LARB4, MJC
-
- restSetting();
-
-
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x8); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x2); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x3); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x4); //mdp_wrot
-
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0xb); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0xe); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
-
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x1); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x3); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x2); //venc_ref_chroma
-
-
-}
-
-
-
-
-static void vpWfdSetting( void ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x1262); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x11E9); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10C, 0x1000); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x110, 0x123D); //LARB3, VENC+JPG
- //M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x214, 0x1000); //LARB4, MJC
-
- restSetting();
-
-
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x8); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x2); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x3); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x4); //mdp_wrot
-
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0xb); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0xe); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
-
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x1); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x3); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x2); //venc_ref_chroma
-}
-
-// Fake mode check, e.g. WFD
-static int fake_mode_handling(
- MTK_SMI_BWC_CONFIG* p_conf,
- unsigned int *pu4LocalCnt ){
- if( p_conf->scenario == SMI_BWC_SCEN_WFD ){
- if( p_conf->b_on_off ){
- wifi_disp_transaction = 1;
- SMIMSG("Enable WFD in profile: %d\n", smi_profile);
- }else{
- wifi_disp_transaction = 0;
- SMIMSG("Disable WFD in profile: %d\n", smi_profile);
- }
- return 1;
- }else{
- return 0;
- }
-}
-
-static int ovl_limit_uevent( int bwc_scenario, int ovl_pixel_limit ){
- int err = 0;
- char *envp[3];
- char scenario_buf[32] = "";
- char ovl_limit_buf[32] = "";
-
- // scenario_buf = kzalloc(sizeof(char)*128, GFP_KERNEL);
- // ovl_limit_buf = kzalloc(sizeof(char)*128, GFP_KERNEL);
-
- snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
- snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
-
- envp[0] = scenario_buf;
- envp[1] = ovl_limit_buf;
- envp[2] = NULL;
-
- if( pSmiDev != NULL ){
- // err = kobject_uevent_env(&(pSmiDev->kobj), KOBJ_CHANGE, envp);
- // use smi_dev->dev.lobj instead
- // err = kobject_uevent_env(&(smi_dev->dev->kobj), KOBJ_CHANGE, envp);
- // user smiDeviceUevent->kobj instead
- err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
- SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit,
- bwc_scenario);
- }
- //kfree(scenario_buf);
- //kfree(ovl_limit_buf);
-
- if(err < 0)
- SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
-
- return err;
-}
-
-static int smi_bwc_config(
- MTK_SMI_BWC_CONFIG* p_conf,
- unsigned int *pu4LocalCnt ){
- int i;
- int result = 0;
- unsigned int u4Concurrency = 0;
- MTK_SMI_BWC_SCEN eFinalScen;
- static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
-
- if( smi_tuning_mode == 1 ){
- SMIMSG("Doesn't change profile in tunning mode");
- return 0;
- }
- //#ifdef SMI_DT_SUPPORT
- //register_base_dump();
- //#endif
-
- spin_lock(&g_SMIInfo.SMI_lock);
- result = fake_mode_handling(p_conf, pu4LocalCnt);
- spin_unlock(&g_SMIInfo.SMI_lock);
-
- // Fake mode is not a real SMI profile, so we need to return here
- if( result == 1 ){
- return 0;
- }
-
- if( (SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario) ){
- SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n",
- p_conf->scenario);
- return -1;
- }
-
- //Debug - S
- //SMIMSG("SMI setTo%d,%s,%d\n" , p_conf->scenario , (p_conf->b_on_off ? "on" : "off") , ePreviousFinalScen);
- //Debug - E
-
- if (p_conf->b_on_off) {
- /* set mmdvfs step according to certain scenarios */
- mmdvfs_notify_scenario_enter(p_conf->scenario);
- } else {
- /* set mmdvfs step to default after the scenario exits */
- mmdvfs_notify_scenario_exit(p_conf->scenario);
- }
-
- spin_lock(&g_SMIInfo.SMI_lock);
-
- if( p_conf->b_on_off ){
- //turn on certain scenario
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
-
- if( NULL != pu4LocalCnt ){
- pu4LocalCnt[p_conf->scenario] += 1;
- }
- }else{
- //turn off certain scenario
- if( 0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] ){
- SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
- p_conf->scenario,
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
- }else{
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
- }
-
- if( NULL != pu4LocalCnt ){
- if( 0 == pu4LocalCnt[p_conf->scenario] ){
- SMIMSG(
- "Process : %s did too many turning off for local SMI profile:%d,%d\n",
- current->comm, p_conf->scenario,
- pu4LocalCnt[p_conf->scenario]);
- }else{
- pu4LocalCnt[p_conf->scenario] -= 1;
- }
- }
- }
-
- for( i = 0; i < SMI_BWC_SCEN_CNT; i++ ){
- if( g_SMIInfo.pu4ConcurrencyTable[i] ){
- u4Concurrency |= (1 << i);
- }
- }
-
- /* notify mmdvfs concurrency */
- mmdvfs_notify_scenario_concurrency(u4Concurrency);
-
- if( (1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_MM_GPU;
- }else if( (1 << SMI_BWC_SCEN_ICFP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_ICFP;
- }else if( (1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VR_SLOW;
- }else if( (1 << SMI_BWC_SCEN_VR) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VR;
- }else if( (1 << SMI_BWC_SCEN_VP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VP;
- }else if( (1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
- }else if( (1 << SMI_BWC_SCEN_VENC) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VENC;
- }else{
- eFinalScen = SMI_BWC_SCEN_NORMAL;
- }
-
- if( ePreviousFinalScen == eFinalScen ){
- SMIMSG("Scen equal%d,don't change\n", eFinalScen);
- spin_unlock(&g_SMIInfo.SMI_lock);
- return 0;
- }else{
- ePreviousFinalScen = eFinalScen;
- }
-
- /* turn on larb clock */
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_on(i);
- }
-
- smi_profile = eFinalScen;
-
- /* Bandwidth Limiter */
- switch( eFinalScen ){
- case SMI_BWC_SCEN_VP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
- if( wifi_disp_transaction ){
- vpSetting();
- }else{
- vpWfdSetting();
- }
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
- break;
-
- case SMI_BWC_SCEN_SWDEC_VP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
- vpSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
- break;
-
- case SMI_BWC_SCEN_ICFP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_ICFP");
- icfpSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
- break;
- case SMI_BWC_SCEN_VR:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
- break;
-
- case SMI_BWC_SCEN_VR_SLOW:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
- smi_profile = SMI_BWC_SCEN_VR_SLOW;
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
-
- case SMI_BWC_SCEN_VENC:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
-
- case SMI_BWC_SCEN_NORMAL:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- initSetting();
- break;
-
- case SMI_BWC_SCEN_MM_GPU:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- initSetting();
- break;
-
- default:
- SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
- initSetting();
- g_smi_bwc_mm_info .hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
- }
-
- /*turn off larb clock*/
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_off(i);
- }
-
- spin_unlock(&g_SMIInfo.SMI_lock);
-
- ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
-
- /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex, call these APIs only when necessary */
- {
- static unsigned int current_fps = 0;
-
- if( (eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30) ){ /* force 30 fps in VR slow motion profile */
- primary_display_force_set_vsync_fps(30);
- current_fps = 30;
- SMIMSG("[SMI_PROFILE] set 30 fps\n");
- }else if( (eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30) ){ /* back to normal fps */
- current_fps = primary_display_get_fps();
- primary_display_force_set_vsync_fps(current_fps);
- SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
- }
- }
-
- SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
- (p_conf->b_on_off ? "on" : "off"), eFinalScen,
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
-
- //Debug usage - S
- //smi_dumpDebugMsg();
- //SMIMSG("Config:%d,%d,%d\n" , eFinalScen , g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL] , (NULL == pu4LocalCnt ? (-1) : pu4LocalCnt[p_conf->scenario]));
- //Debug usage - E
-
- return 0;
-}
-
-struct larb_monitor larb_monitor_handler =
-{
- .level = LARB_MONITOR_LEVEL_HIGH,
- .backup = on_larb_power_off,
- .restore = on_larb_power_on
-};
-
-int smi_common_init( void ){
- int i;
-
- SMIMSG("Enter smi_common_init\n")
- for( i = 0; i < SMI_LARB_NR; i++ ){
- pLarbRegBackUp[i] = (unsigned int*) kmalloc(LARB_BACKUP_REG_SIZE,
- GFP_KERNEL | __GFP_ZERO);
- if( pLarbRegBackUp[i] == NULL ){
- SMIERR("pLarbRegBackUp kmalloc fail %d \n", i);
- }
- }
-
- /*
- * make sure all larb power is on before we register callback func.
- * then, when larb power is first off, default register value will be backed up.
- */
-
- for( i = 0; i < SMI_LARB_NR; i++ ){
- SMIMSG("Enalbe CLK of larb%d\n", i );
- larb_clock_on(i);
- }
-
- /* apply init setting after kernel boot */
- SMIMSG("Enter smi_common_init\n")
- initSetting();
-
- register_larb_monitor(&larb_monitor_handler);
-
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_off(i);
- }
-
- return 0;
-}
-
-static int smi_open( struct inode *inode, struct file *file ){
- file->private_data = kmalloc(SMI_BWC_SCEN_CNT * sizeof(unsigned int),
- GFP_ATOMIC);
-
- if( NULL == file->private_data ){
- SMIMSG("Not enough entry for DDP open operation\n");
- return -ENOMEM;
- }
-
- memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
-
- return 0;
-}
-
-static int smi_release( struct inode *inode, struct file *file ){
-
-#if 0
- unsigned long u4Index = 0;
- unsigned long u4AssignCnt = 0;
- unsigned long * pu4Cnt = (unsigned long *)file->private_data;
- MTK_SMI_BWC_CONFIG config;
-
- for(; u4Index < SMI_BWC_SCEN_CNT; u4Index += 1)
- {
- if(pu4Cnt[u4Index])
- {
- SMIMSG("Process:%s does not turn off BWC properly , force turn off %d\n" , current->comm , u4Index);
- u4AssignCnt = pu4Cnt[u4Index];
- config.b_on_off = 0;
- config.scenario = (MTK_SMI_BWC_SCEN)u4Index;
- do
- {
- smi_bwc_config( &config , pu4Cnt);
- }
- while(0 < u4AssignCnt);
- }
- }
-#endif
-
- if( NULL != file->private_data ){
- kfree(file->private_data);
- file->private_data = NULL;
- }
-
- return 0;
-}
-/* GMP start */
-
-void smi_bwc_mm_info_set( int property_id, long val1, long val2 ){
-
- switch( property_id ){
- case SMI_BWC_INFO_CON_PROFILE:
- g_smi_bwc_mm_info.concurrent_profile = (int) val1;
- break;
- case SMI_BWC_INFO_SENSOR_SIZE:
- g_smi_bwc_mm_info.sensor_size[0] = val1;
- g_smi_bwc_mm_info.sensor_size[1] = val2;
- break;
- case SMI_BWC_INFO_VIDEO_RECORD_SIZE:
- g_smi_bwc_mm_info.video_record_size[0] = val1;
- g_smi_bwc_mm_info.video_record_size[1] = val2;
- break;
- case SMI_BWC_INFO_DISP_SIZE:
- g_smi_bwc_mm_info.display_size[0] = val1;
- g_smi_bwc_mm_info.display_size[1] = val2;
- break;
- case SMI_BWC_INFO_TV_OUT_SIZE:
- g_smi_bwc_mm_info.tv_out_size[0] = val1;
- g_smi_bwc_mm_info.tv_out_size[1] = val2;
- break;
- case SMI_BWC_INFO_FPS:
- g_smi_bwc_mm_info.fps = (int) val1;
- break;
- case SMI_BWC_INFO_VIDEO_ENCODE_CODEC:
- g_smi_bwc_mm_info.video_encode_codec = (int) val1;
- break;
- case SMI_BWC_INFO_VIDEO_DECODE_CODEC:
- g_smi_bwc_mm_info.video_decode_codec = (int) val1;
- break;
- }
-}
-
-/* GMP end */
-
-static long smi_ioctl(
- struct file * pFile,
- unsigned int cmd,
- unsigned long param ){
- int ret = 0;
-
- // unsigned long * pu4Cnt = (unsigned long *)pFile->private_data;
-
- switch( cmd ){
-
- /* disable reg access ioctl by default for possible security holes */
- // TBD: check valid SMI register range
- case MTK_IOC_SMI_BWC_CONFIG: {
- MTK_SMI_BWC_CONFIG cfg;
- ret = copy_from_user(&cfg, (void*) param,
- sizeof(MTK_SMI_BWC_CONFIG));
- if( ret ){
- SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
- return -EFAULT;
- }
-
- ret = smi_bwc_config(&cfg, NULL);
-
- break;
- }
- /* GMP start */
- case MTK_IOC_SMI_BWC_INFO_SET: {
- MTK_SMI_BWC_INFO_SET cfg;
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... start");
- ret = copy_from_user(&cfg, (void *) param,
- sizeof(MTK_SMI_BWC_INFO_SET));
- if( ret ){
- SMIMSG(" MTK_IOC_SMI_BWC_INFO_SET, copy_to_user failed: %d\n",
- ret);
- return -EFAULT;
- }
- /* Set the address to the value assigned by user space program */
- smi_bwc_mm_info_set(cfg.property, cfg.value1, cfg.value2);
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... finish");
- break;
- }
- case MTK_IOC_SMI_BWC_INFO_GET: {
- ret = copy_to_user((void *) param, (void *) &g_smi_bwc_mm_info,
- sizeof(MTK_SMI_BWC_MM_INFO));
-
- if( ret ){
- SMIMSG(" MTK_IOC_SMI_BWC_INFO_GET, copy_to_user failed: %d\n",
- ret);
- return -EFAULT;
- }
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_GET request... finish");
- break;
- }
- /* GMP end */
-
- case MTK_IOC_SMI_DUMP_LARB: {
- unsigned int larb_index;
-
- ret = copy_from_user(&larb_index, (void*) param,
- sizeof(unsigned int));
- if( ret ){
- return -EFAULT;
- }
-
- smi_dumpLarb(larb_index);
- }
- break;
-
- case MTK_IOC_SMI_DUMP_COMMON: {
- unsigned int arg;
-
- ret = copy_from_user(&arg, (void*) param, sizeof(unsigned int));
- if( ret ){
- return -EFAULT;
- }
-
- smi_dumpCommon();
- }
- break;
-
- default:
- return -1;
- }
-
- return ret;
-}
-
-static const struct file_operations smiFops =
-{
- .owner = THIS_MODULE,
- .open = smi_open,
- .release = smi_release,
- .unlocked_ioctl = smi_ioctl,
- .compat_ioctl = MTK_SMI_COMPAT_ioctl,
-};
-
-static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
-static inline int smi_register( void ){
- if( alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI") ){
- SMIERR("Allocate device No. failed");
- return -EAGAIN;
- }
- //Allocate driver
- pSmiDev = cdev_alloc();
-
- if( NULL == pSmiDev ){
- unregister_chrdev_region(smiDevNo, 1);
- SMIERR("Allocate mem for kobject failed");
- return -ENOMEM;
- }
-
- //Attatch file operation.
- cdev_init(pSmiDev, &smiFops);
- pSmiDev->owner = THIS_MODULE;
-
- //Add to system
- if( cdev_add(pSmiDev, smiDevNo, 1) ){
- SMIERR("Attatch file operation failed");
- unregister_chrdev_region(smiDevNo, 1);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-static struct class *pSmiClass = NULL;
-
-static int smi_probe( struct platform_device *pdev ){
-
- int i;
-
- static unsigned int smi_probe_cnt = 0;
- struct device* smiDevice = NULL;
- SMIMSG("Enter smi_probe\n");
- //Debug only
- if( smi_probe_cnt != 0 ){
- SMIERR("Onlye support 1 SMI driver probed\n");
- return 0;
- }
- smi_probe_cnt++;
- SMIMSG("Allocate smi_dev space\n");
- smi_dev = krealloc(smi_dev, sizeof(struct smi_device), GFP_KERNEL);
-
- if( smi_dev == NULL ){
- SMIERR("Unable to allocate memory for smi driver\n");
- return -ENOMEM;
- }
- if( NULL == pdev ){
- SMIERR("platform data missed\n");
- return -ENXIO;
- }
- // Keep the device structure
- smi_dev->dev = &pdev->dev;
-
- // Map registers
- for( i = 0; i < SMI_REG_REGION_MAX; i++ ){
- SMIMSG("Save region: %d\n", i);
- smi_dev->regs[i] = (void *) of_iomap(pdev->dev.of_node, i);
-
- if( !smi_dev->regs[i] ){
- SMIERR("Unable to ioremap registers, of_iomap fail, i=%d \n", i);
- return -ENOMEM;
- }
-
- // Record the register base in global variable
- gSMIBaseAddrs[i] = (unsigned long) (smi_dev->regs[i]);
- SMIMSG("DT, i=%d, region=%s, map_addr=0x%p, reg_pa=0x%lx\n", i,
- smi_get_region_name(i), smi_dev->regs[i], smi_reg_pa_base[i]);
- }
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
- smi_dev->smi_common_clk = get_smi_clk("smi-common");
- smi_dev->smi_larb0_clk = get_smi_clk("smi-larb0");
- smi_dev->img_larb2_clk = get_smi_clk("img-larb2");
- smi_dev->vdec0_vdec_clk = get_smi_clk("vdec0-vdec");
- smi_dev->vdec1_larb_clk = get_smi_clk("vdec1-larb");
- smi_dev->venc_larb_clk = get_smi_clk("venc-larb");
-#endif
-
- SMIMSG("Execute smi_register\n");
- if( smi_register() ){
- dev_err(&pdev->dev, "register char failed\n");
- return -EAGAIN;
- }
-
- pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
- if(IS_ERR(pSmiClass)) {
- int ret = PTR_ERR(pSmiClass);
- SMIERR("Unable to create class, err = %d", ret);
- return ret;
- }
- SMIMSG("Create davice\n");
- smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
- smiDeviceUevent = smiDevice;
-
- SMIMSG("SMI probe done.\n");
-
- // To adapt the legacy codes
- smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
- smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
- smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
- smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
- smi_reg_base_barb3 = gSMIBaseAddrs[SMI_LARB3_REG_INDX];
- //smi_reg_base_barb4 = gSMIBaseAddrs[SMI_LARB4_REG_INDX];
-
- gLarbBaseAddr[0] = LARB0_BASE;
- gLarbBaseAddr[1] = LARB1_BASE;
- gLarbBaseAddr[2] = LARB2_BASE;
- gLarbBaseAddr[3] = LARB3_BASE;
- //gLarbBaseAddr[4] = LARB4_BASE;
-
- SMIMSG("Execute smi_common_init\n");
- smi_common_init();
-
- SMIMSG("Execute SMI_DBG_Init\n");
- SMI_DBG_Init();
- return 0;
-
-}
-
-char* smi_get_region_name( unsigned int region_indx ){
- switch( region_indx ){
- case SMI_COMMON_REG_INDX:
- return "smi_common";
- case SMI_LARB0_REG_INDX:
- return "larb0";
- case SMI_LARB1_REG_INDX:
- return "larb1";
- case SMI_LARB2_REG_INDX:
- return "larb2";
- case SMI_LARB3_REG_INDX:
- return "larb3";
- //case SMI_LARB4_REG_INDX:
- // return "larb4";
- default:
- SMIMSG("invalid region id=%d", region_indx);
- return "unknown";
- }
-}
-
-void register_base_dump( void ){
- int i = 0;
- unsigned long pa_value = 0;
- unsigned long va_value = 0;
-
- for( i = 0; i < SMI_REG_REGION_MAX; i++ ){
- va_value = gSMIBaseAddrs[i];
- pa_value = virt_to_phys((void*) va_value);
- SMIMSG("REG BASE:%s-->VA=0x%lx,PA=0x%lx,SPEC=0x%lx\n",
- smi_get_region_name(i), va_value, pa_value, smi_reg_pa_base[i]);
- }
-}
-
-static int smi_remove( struct platform_device *pdev ){
- cdev_del(pSmiDev);
- unregister_chrdev_region(smiDevNo, 1);
- device_destroy(pSmiClass, smiDevNo);
- class_destroy( pSmiClass);
- return 0;
-}
-
-static int smi_suspend( struct platform_device *pdev, pm_message_t mesg ){
- return 0;
-}
-
-static int smi_resume( struct platform_device *pdev ){
- return 0;
-}
-
-#ifdef SMI_DT_SUPPORT
-static const struct of_device_id smi_of_ids[] ={
- { .compatible = "mediatek,SMI_COMMON",},
- {}
-};
-#endif //SMI_DT_SUPPORT
-static struct platform_driver smiDrv ={
- .probe = smi_probe,
- .remove = smi_remove,
- .suspend= smi_suspend,
- .resume = smi_resume,
- .driver ={
- .name = "MTK_SMI",
- .owner = THIS_MODULE,
-#ifdef SMI_DT_SUPPORT
- .of_match_table = smi_of_ids,
-#endif //SMI_DT_SUPPORT
- }
-};
-
-static int __init smi_init(void)
-{
- SMIMSG("smi_init enter\n");
- spin_lock_init(&g_SMIInfo.SMI_lock);
- /* MMDVFS init */
- mmdvfs_init(&g_smi_bwc_mm_info);
-
- memset(g_SMIInfo.pu4ConcurrencyTable , 0 , SMI_BWC_SCEN_CNT * sizeof(unsigned int));
-
- // Informs the kernel about the function to be called
- // if hardware matching MTK_SMI has been found
- SMIMSG("register platform driver\n");
- if (platform_driver_register(&smiDrv)){
- SMIERR("failed to register MAU driver");
- return -ENODEV;
- }
- SMIMSG("exit smi_init\n");
- return 0;
-}
-
-static void __exit smi_exit(void)
-{
- platform_driver_unregister(&smiDrv);
-
-}
-
-static void smi_dumpCommonDebugMsg( int output_gce_buffer ){
- unsigned long u4Base;
- int smiCommonClkEnabled = 0;
-
- smiCommonClkEnabled = clock_is_on(MT_CG_DISP0_SMI_COMMON);
- //SMI COMMON dump
- if( smi_debug_level == 0 && (!smiCommonClkEnabled) ){
- SMIMSG3(output_gce_buffer, "===SMI common clock is disabled===\n");
- return;
- }
-
- SMIMSG3(output_gce_buffer, "===SMI common reg dump, CLK: %d===\n", smiCommonClkEnabled);
-
- u4Base = SMI_COMMON_EXT_BASE;
- SMIMSG3(output_gce_buffer, "[0x100,0x104,0x108]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x100), M4U_ReadReg32(u4Base, 0x104),
- M4U_ReadReg32(u4Base, 0x108));
- SMIMSG3(output_gce_buffer, "[0x10C,0x110,0x114]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x10C), M4U_ReadReg32(u4Base, 0x110),
- M4U_ReadReg32(u4Base, 0x114));
- SMIMSG3(output_gce_buffer, "[0x220,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x220), M4U_ReadReg32(u4Base, 0x230),
- M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base, 0x238));
- SMIMSG3(output_gce_buffer, "[0x400,0x404,0x408]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x400), M4U_ReadReg32(u4Base, 0x404),
- M4U_ReadReg32(u4Base, 0x408));
- SMIMSG3(output_gce_buffer, "[0x40C,0x430,0x440]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x40C), M4U_ReadReg32(u4Base, 0x430),
- M4U_ReadReg32(u4Base, 0x440));
-
- // TBD: M4U should dump these
- /*
- // For VA and PA check:
- // 0x1000C5C0 , 0x1000C5C4, 0x1000C5C8, 0x1000C5CC, 0x1000C5D0
- u4Base = SMI_COMMON_AO_BASE;
- SMIMSG("===SMI always on reg dump===\n");
- SMIMSG("[0x5C0,0x5C4,0x5C8]=[0x%x,0x%x,0x%x]\n" ,M4U_ReadReg32(u4Base , 0x5C0),M4U_ReadReg32(u4Base , 0x5C4),M4U_ReadReg32(u4Base , 0x5C8));
- SMIMSG("[0x5CC,0x5D0]=[0x%x,0x%x]\n" ,M4U_ReadReg32(u4Base , 0x5CC),M4U_ReadReg32(u4Base , 0x5D0));
- */
-}
-static int smi_larb_clock_is_on( unsigned int larb_index ){
-
- int result = 0;
-#if !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- switch( larb_index ){
- case 0:
- result = clock_is_on(MT_CG_DISP0_SMI_LARB0);
- break;
- case 1:
- result = clock_is_on(MT_CG_VDEC1_LARB);
- break;
- case 2:
- result = clock_is_on(MT_CG_IMAGE_LARB2_SMI);
- break;
- case 3:
- result = clock_is_on(MT_CG_VENC_LARB);
- break;
-// case 4:
-// result = clock_is_on(MT_CG_MJC_SMI_LARB);
-// break;
- default:
- result = 0;
- break;
- }
-#endif // !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- return result;
-
-}
-static void smi_dumpLarbDebugMsg( unsigned int u4Index ){
- unsigned long u4Base = 0;
-
- int larbClkEnabled = 0;
-
- u4Base = get_larb_base_addr(u4Index);
-
- larbClkEnabled = smi_larb_clock_is_on(u4Index);
-
- if( u4Base == SMI_ERROR_ADDR ){
- SMIMSG("Doesn't support reg dump for Larb%d\n", u4Index);
-
- return;
- }else if( (larbClkEnabled != 0) || smi_debug_level > 0 ){
- SMIMSG("===SMI LARB%d reg dump, CLK: %d===\n", u4Index, larbClkEnabled);
-
- // Staus Registers
- SMIMSG("[0x0,0x8,0x10]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
- M4U_ReadReg32(u4Base, 0x8), M4U_ReadReg32(u4Base, 0x10));
- SMIMSG("[0x24,0x50,0x60]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x24), M4U_ReadReg32(u4Base, 0x50),
- M4U_ReadReg32(u4Base, 0x60));
- SMIMSG("[0xa0,0xa4,0xa8]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xa0), M4U_ReadReg32(u4Base, 0xa4),
- M4U_ReadReg32(u4Base, 0xa8));
- SMIMSG("[0xac,0xb0,0xb4]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xac), M4U_ReadReg32(u4Base, 0xb0),
- M4U_ReadReg32(u4Base, 0xb4));
- SMIMSG("[0xb8,0xbc,0xc0]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xb8), M4U_ReadReg32(u4Base, 0xbc),
- M4U_ReadReg32(u4Base, 0xc0));
- SMIMSG("[0xc8,0xcc]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xc8),
- M4U_ReadReg32(u4Base, 0xcc));
- // Settings
- SMIMSG("[0x200, 0x204, 0x208]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x200), M4U_ReadReg32(u4Base, 0x204),
- M4U_ReadReg32(u4Base, 0x208));
-
- SMIMSG("[0x20c, 0x210, 0x214]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x20c), M4U_ReadReg32(u4Base, 0x210),
- M4U_ReadReg32(u4Base, 0x214));
-
- SMIMSG("[0x218, 0x21c, 0x220]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x218), M4U_ReadReg32(u4Base, 0x21c),
- M4U_ReadReg32(u4Base, 0x220));
-
- SMIMSG("[0x224, 0x228, 0x22c]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x224), M4U_ReadReg32(u4Base, 0x228),
- M4U_ReadReg32(u4Base, 0x22c));
-
- SMIMSG("[0x230, 0x234, 0x238]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234),
- M4U_ReadReg32(u4Base, 0x238));
-
- SMIMSG("[0x23c, 0x240, 0x244]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x23c), M4U_ReadReg32(u4Base, 0x240),
- M4U_ReadReg32(u4Base, 0x244));
-
- SMIMSG("[0x248, 0x24c]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x248),
- M4U_ReadReg32(u4Base, 0x24c));
- }else{
- SMIMSG("===SMI LARB%d clock is disabled===\n", u4Index);
- }
-
-}
-
-static void smi_dump_format(
- unsigned long base,
- unsigned int from,
- unsigned int to ){
- int i, j, left;
- unsigned int value[8];
-
- for( i = from; i <= to; i += 32 ){
- for( j = 0; j < 8; j++ ){
- value[j] = M4U_ReadReg32(base, i + j * 4);
- }
-
- SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i, value[0], value[1],
- value[2], value[3], value[4], value[5], value[6], value[7]);
- }
-
- left = ((from - to) / 4 + 1) % 8;
-
- if( left ){
- memset(value, 0, 8 * sizeof(unsigned int));
-
- for( j = 0; j < left; j++ ){
- value[j] = M4U_ReadReg32(base, i - 32 + j * 4);
- }
-
- SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i - 32 + j * 4, value[0],
- value[1], value[2], value[3], value[4], value[5], value[6],
- value[7]);
- }
-}
-
-static void smi_dumpLarb( unsigned int index ){
- unsigned long u4Base;
-
- u4Base = get_larb_base_addr(index);
-
- if( u4Base == SMI_ERROR_ADDR ){
- SMIMSG2("Doesn't support reg dump for Larb%d\n", index);
-
- return;
- }else{
- SMIMSG2("===SMI LARB%d reg dump base 0x%lx===\n", index, u4Base);
-
- smi_dump_format(u4Base, 0, 0x434);
- smi_dump_format(u4Base, 0xF00, 0xF0C);
- }
-}
-
-static void smi_dumpCommon( void ){
- SMIMSG2("===SMI COMMON reg dump base 0x%lx===\n", SMI_COMMON_EXT_BASE);
-
- smi_dump_format(SMI_COMMON_EXT_BASE, 0x1A0, 0x444);
-}
-
-void smi_dumpDebugMsg( void ){
- unsigned int u4Index;
-
- // SMI COMMON dump, 0 stands for not pass log to CMDQ error dumping messages
- smi_dumpCommonDebugMsg(0);
-
- // dump all SMI LARB
- for( u4Index = 0; u4Index < SMI_LARB_NR; u4Index++ ){
- smi_dumpLarbDebugMsg(u4Index);
- }
-}
-
-int smi_debug_bus_hanging_detect( unsigned int larbs, int show_dump ){
- return smi_debug_bus_hanging_detect_ext(larbs, show_dump, 0);
-}
-
-//output_gce_buffer = 1, write log into kernel log and CMDQ buffer. dual_buffer = 0, write log into kernel log only
-int smi_debug_bus_hanging_detect_ext( unsigned int larbs, int show_dump, int output_gce_buffer){
-
- int i = 0;
- int dump_time = 0;
- int is_smi_issue = 0;
- int status_code = 0;
- // Keep the dump result
- unsigned char smi_common_busy_count = 0;
- volatile unsigned int reg_temp = 0;
- unsigned char smi_larb_busy_count[SMI_LARB_NR] = { 0 };
- unsigned char smi_larb_mmu_status[SMI_LARB_NR] = { 0 };
-
- // dump resister and save resgister status
- for( dump_time = 0; dump_time < 5; dump_time++ ){
- unsigned int u4Index = 0;
- reg_temp = M4U_ReadReg32(SMI_COMMON_EXT_BASE, 0x440);
- if( (reg_temp & (1 << 0)) == 0 ){
- // smi common is busy
- smi_common_busy_count++;
- }
- // Dump smi common regs
- if( show_dump != 0 ){
- smi_dumpCommonDebugMsg(output_gce_buffer);
- }
- for( u4Index = 0; u4Index < SMI_LARB_NR; u4Index++ ){
- unsigned long u4Base = get_larb_base_addr(u4Index);
- if( u4Base != SMI_ERROR_ADDR ){
- reg_temp = M4U_ReadReg32(u4Base, 0x0);
- if( reg_temp != 0 ){
- // Larb is busy
- smi_larb_busy_count[u4Index]++;
- }
- smi_larb_mmu_status[u4Index] = M4U_ReadReg32(u4Base, 0xa0);
- if( show_dump != 0 ){
- smi_dumpLarbDebugMsg(u4Index);
- }
- }
- }
-
- }
-
- // Show the checked result
- for( i = 0; i < SMI_LARB_NR; i++ ){ // Check each larb
- if( SMI_DGB_LARB_SELECT(larbs, i) ){
- // larb i has been selected
- // Get status code
-
- if( smi_larb_busy_count[i] == 5 ){ // The larb is always busy
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 1;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 2;
- }else{
- status_code = 5; // smi common is sometimes busy and idle
- }
- }else if( smi_larb_busy_count[i] == 0 ){ // The larb is always idle
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 3;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 4;
- }else{
- status_code = 6; // smi common is sometimes busy and idle
- }
- }else{ //sometime the larb is busy
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 7;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 8;
- }else{
- status_code = 9; // smi common is sometimes busy and idle
- }
- }
-
- // Send the debug message according to the final result
- switch( status_code ){
- case 1:
- case 3:
- case 5:
- case 7:
- case 8:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine's state first\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- SMIMSG3(
- output_gce_buffer,
- "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
- i);
- break;
- case 2:
- if( smi_larb_mmu_status[i] == 0 ){
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine state first\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- SMIMSG3(
- output_gce_buffer,
- "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
- i);
- }else{
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> MMU port config error\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- is_smi_issue = 1;
- }
- break;
- case 4:
- case 6:
- case 9:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> not SMI issue\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- break;
- default:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> status unknown\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- break;
- }
- }
-
- }
-
- return is_smi_issue;
-}
-
-void smi_client_status_change_notify( int module, int mode ){
-
-}
-
-#if IS_ENABLED(CONFIG_COMPAT)
-// 32 bits process ioctl support:
-// This is prepared for the future extension since currently the sizes of 32 bits
-// and 64 bits smi parameters are the same.
-
-typedef struct
-{
- compat_int_t scenario;
- compat_int_t b_on_off; //0 : exit this scenario , 1 : enter this scenario
-}MTK_SMI_COMPAT_BWC_CONFIG;
-
-typedef struct
-{
- compat_int_t property;
- compat_int_t value1;
- compat_int_t value2;
-}MTK_SMI_COMPAT_BWC_INFO_SET;
-
-typedef struct
-{
- compat_uint_t flag; // Reserved
- compat_int_t concurrent_profile;
- compat_int_t sensor_size[2];
- compat_int_t video_record_size[2];
- compat_int_t display_size[2];
- compat_int_t tv_out_size[2];
- compat_int_t fps;
- compat_int_t video_encode_codec;
- compat_int_t video_decode_codec;
- compat_int_t hw_ovl_limit;
-}MTK_SMI_COMPAT_BWC_MM_INFO;
-
-#define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, MTK_SMI_COMPAT_BWC_CONFIG)
-#define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, MTK_SMI_COMPAT_BWC_INFO_SET)
-#define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, MTK_SMI_COMPAT_BWC_MM_INFO)
-
-static int compat_get_smi_bwc_config_struct(
- MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
- MTK_SMI_BWC_CONFIG __user *data){
-
- compat_int_t i;
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(i, &(data32->scenario));
- err |= put_user(i, &(data->scenario));
- err |= get_user(i, &(data32->b_on_off));
- err |= put_user(i, &(data->b_on_off));
-
- return err;
-}
-
-static int compat_get_smi_bwc_mm_info_set_struct(
- MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
- MTK_SMI_BWC_INFO_SET __user *data){
-
- compat_int_t i;
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(i, &(data32->property));
- err |= put_user(i, &(data->property));
- err |= get_user(i, &(data32->value1));
- err |= put_user(i, &(data->value1));
- err |= get_user(i, &(data32->value2));
- err |= put_user(i, &(data->value2));
-
- return err;
-}
-
-static int compat_get_smi_bwc_mm_info_struct(
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
- MTK_SMI_BWC_MM_INFO __user *data)
-{
- compat_uint_t u;
- compat_int_t i;
- compat_int_t p[2];
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(u, &(data32->flag));
- err |= put_user(u, &(data->flag));
- err |= get_user(i, &(data32->concurrent_profile));
- err |= put_user(i, &(data->concurrent_profile));
- err |= copy_from_user(p, &(data32->sensor_size),sizeof(p));
- err |= copy_to_user(&(data->sensor_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->video_record_size),sizeof(p));
- err |= copy_to_user(&(data->video_record_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->display_size),sizeof(p));
- err |= copy_to_user(&(data->display_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->tv_out_size),sizeof(p));
- err |= copy_to_user(&(data->tv_out_size),p ,sizeof(p));
- err |= get_user(i, &(data32->fps));
- err |= put_user(i, &(data->fps));
- err |= get_user(i, &(data32->video_encode_codec));
- err |= put_user(i, &(data->video_encode_codec));
- err |= get_user(i, &(data32->video_decode_codec));
- err |= put_user(i, &(data->video_decode_codec));
- err |= get_user(i, &(data32->hw_ovl_limit));
- err |= put_user(i, &(data->hw_ovl_limit));
-
-
- return err;
-}
-
-static int compat_put_smi_bwc_mm_info_struct(
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
- MTK_SMI_BWC_MM_INFO __user *data)
-{
-
- compat_uint_t u;
- compat_int_t i;
- compat_int_t p[2];
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(u, &(data->flag));
- err |= put_user(u, &(data32->flag));
- err |= get_user(i, &(data->concurrent_profile));
- err |= put_user(i, &(data32->concurrent_profile));
- err |= copy_from_user(p, &(data->sensor_size),sizeof(p));
- err |= copy_to_user(&(data32->sensor_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->video_record_size),sizeof(p));
- err |= copy_to_user(&(data32->video_record_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->display_size),sizeof(p));
- err |= copy_to_user(&(data32->display_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->tv_out_size),sizeof(p));
- err |= copy_to_user(&(data32->tv_out_size),p ,sizeof(p));
- err |= get_user(i, &(data->fps));
- err |= put_user(i, &(data32->fps));
- err |= get_user(i, &(data->video_encode_codec));
- err |= put_user(i, &(data32->video_encode_codec));
- err |= get_user(i, &(data->video_decode_codec));
- err |= put_user(i, &(data32->video_decode_codec));
- err |= get_user(i, &(data->hw_ovl_limit));
- err |= put_user(i, &(data32->hw_ovl_limit));
- return err;
-}
-
-long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op || !filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd){
- case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
- {
- if(COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG)
- {
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_CONFIG");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
-
- MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
- MTK_SMI_BWC_CONFIG __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
-
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_config_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
- (unsigned long)data);
- return ret;
- }
- }
-
- case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
- {
-
- if(COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET)
- {
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_SET");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
-
- MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
- MTK_SMI_BWC_INFO_SET __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
- (unsigned long)data);
- }
- }
-
- case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
- {
-
- if(COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET){
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_GET");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
- MTK_SMI_BWC_MM_INFO __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
-
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_mm_info_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
- (unsigned long)data);
-
- err = compat_put_smi_bwc_mm_info_struct(data32, data);
-
- if (err)
- return err;
-
- return ret;
- }
- }
-
- case MTK_IOC_SMI_DUMP_LARB:
- case MTK_IOC_SMI_DUMP_COMMON:
-
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-
-}
-
-#endif
-
-module_init( smi_init);
-module_exit( smi_exit);
-
-module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
-module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
-module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
-
-MODULE_DESCRIPTION("MTK SMI driver");
-MODULE_AUTHOR("Frederic Chen<frederic.chen@mediatek.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_common_d2.c b/drivers/misc/mediatek/smi/mt6735/smi_common_d2.c
deleted file mode 100644
index cbf1cca56..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_common_d2.c
+++ /dev/null
@@ -1,1970 +0,0 @@
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/kobject.h>
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/aee.h>
-#include <linux/xlog.h>
-#include <mach/mt_clkmgr.h>
-#include <asm/io.h>
-
-#include <linux/ioctl.h>
-#include <linux/fs.h>
-
-#if IS_ENABLED(CONFIG_COMPAT)
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#endif
-
-#include <mach/mt_smi.h>
-
-
-#include "smi_reg_d2.h"
-#include "smi_common.h"
-#include "smi_debug.h"
-
-#include "mmdvfs_mgr.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) "[SMI]" fmt
-
-#define SMI_LOG_TAG "SMI"
-
-#define SMI_DT_SUPPORT
-
-#define LARB_BACKUP_REG_SIZE 128
-#define SMI_COMMON_BACKUP_REG_NUM 8
-
-#define SF_HWC_PIXEL_MAX_NORMAL (1920 * 1080 * 7)
-#define SF_HWC_PIXEL_MAX_VR (1920 * 1080 * 4 + 1036800) // 4.5 FHD size
-#define SF_HWC_PIXEL_MAX_VP (1920 * 1080 * 7)
-#define SF_HWC_PIXEL_MAX_ALWAYS_GPU (1920 * 1080 * 1)
-
-#define SMIDBG(level, x...) \
- do{ \
- if (smi_debug_level >= (level)) \
- SMIMSG(x); \
- } while (0)
-
-typedef struct {
- spinlock_t SMI_lock;
- unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT]; //one bit represent one module
-} SMI_struct;
-
-static SMI_struct g_SMIInfo;
-
-/* LARB BASE ADDRESS */
-static unsigned long gLarbBaseAddr[SMI_LARB_NR] = { 0, 0, 0};
-
-// DT porting
-unsigned long smi_reg_base_common_ext = 0;
-unsigned long smi_reg_base_barb0 = 0;
-unsigned long smi_reg_base_barb1 = 0;
-unsigned long smi_reg_base_barb2 = 0;
-
-#define SMI_REG_REGION_MAX 4
-#define SMI_COMMON_REG_INDX 0
-#define SMI_LARB0_REG_INDX 1
-#define SMI_LARB1_REG_INDX 2
-#define SMI_LARB2_REG_INDX 3
-
-static unsigned long gSMIBaseAddrs[SMI_REG_REGION_MAX];
-void register_base_dump( void );
-
-//#ifdef SMI_DT_SUPPORT
-char* smi_get_region_name( unsigned int region_indx );
-//#endif //SMI_DT_SUPPORT
-
-struct smi_device{
- struct device *dev;void __iomem *regs[SMI_REG_REGION_MAX];
-};
-static struct smi_device *smi_dev = NULL;
-
-static struct device* smiDeviceUevent = NULL;
-
-static struct cdev * pSmiDev = NULL;
-
-static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
- SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM};
-
-static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
-static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
-static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
-
-/* SMI COMMON register list to be backuped */
-static unsigned short
-g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = { 0x100, 0x104,
- 0x108, 0x10c, 0x110, 0x230, 0x234, 0x238 };
-static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
-
-static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 1};
-
-static unsigned short int * larb_port_backup[SMI_LARB_NR] = {
- larb0_port_backup, larb1_port_backup, larb2_port_backup };
-
-// To keep the HW's init value
-static int is_default_value_saved = 0;
-static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
-
-static unsigned int wifi_disp_transaction = 0;
-
-/* debug level */
-static unsigned int smi_debug_level = 0;
-
-/* tuning mode, 1 for register ioctl */
-static unsigned int smi_tuning_mode = 0;
-
-static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
-
-static unsigned int* pLarbRegBackUp[SMI_LARB_NR];
-static int g_bInited = 0;
-
-static MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = { 0, 0, { 0, 0 }, { 0, 0 }, { 0,
- 0 }, { 0, 0 }, 0, 0, 0, SF_HWC_PIXEL_MAX_NORMAL };
-
-char *smi_port_name[][21] = { { /* 0 MMSYS */
- "disp_ovl0", "disp_rdma0", "disp_rdma1", "disp_wdma0", "disp_ovl1",
- "disp_rdma2", "disp_wdma1", "disp_od_r", "disp_od_w", "mdp_rdma0",
- "mdp_rdma1", "mdp_wdma", "mdp_wrot0", "mdp_wrot1" }, { /* 1 VDEC */
- "hw_vdec_mc_ext", "hw_vdec_pp_ext", "hw_vdec_ufo_ext", "hw_vdec_vld_ext",
- "hw_vdec_vld2_ext", "hw_vdec_avc_mv_ext", "hw_vdec_pred_rd_ext",
- "hw_vdec_pred_wr_ext", "hw_vdec_ppwrap_ext" }, { /* 2 ISP */
- "imgo", "rrzo", "aao", "lcso", "esfko", "imgo_d", "lsci", "lsci_d", "bpci",
- "bpci_d", "ufdi", "imgi", "img2o", "img3o", "vipi", "vip2i", "vip3i",
- "lcei", "rb", "rp", "wr" }, { /* 3 VENC */
- "venc_rcpu", "venc_rec", "venc_bsdma", "venc_sv_comv", "venc_rd_comv",
- "jpgenc_bsdma", "remdc_sdma", "remdc_bsdma", "jpgenc_rdma", "jpgenc_sdma",
- "jpgdec_wdma", "jpgdec_bsdma", "venc_cur_luma", "venc_cur_chroma",
- "venc_ref_luma", "venc_ref_chroma", "remdc_wdma", "venc_nbm_rdma",
- "venc_nbm_wdma" }, { /* 4 MJC */
- "mjc_mv_rd", "mjc_mv_wr", "mjc_dma_rd", "mjc_dma_wr" } };
-
-static unsigned long smi_reg_pa_base[SMI_REG_REGION_MAX] = { 0x14017000,
- 0x14016000, 0x16010000, 0x15001000 };
-
-static void initSetting( void );
-static void vpSetting( void );
-static void vrSetting( void );
-static void icfpSetting( void );
-static void vpWfdSetting( void );
-
-static void smi_dumpLarb( unsigned int index );
-static void smi_dumpCommon( void );
-extern void smi_dumpDebugMsg( void );
-#if IS_ENABLED(CONFIG_COMPAT)
- long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
- #define MTK_SMI_COMPAT_ioctl NULL
-#endif
-
-// for slow motion force 30 fps
-extern int primary_display_force_set_vsync_fps( unsigned int fps );
-extern unsigned int primary_display_get_fps( void );
-
-// Use this function to get base address of Larb resgister
-// to support error checking
-unsigned long get_larb_base_addr( int larb_id ){
- if( larb_id > SMI_LARB_NR || larb_id < 0 ){
- return SMI_ERROR_ADDR;
- }else{
- return gLarbBaseAddr[larb_id];
- }
-}
-
-static int larb_clock_on( int larb_id ){
-
-#if !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- char name[30];
- sprintf(name, "smi+%d", larb_id);
-
- switch( larb_id ){
- case 0:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_DISP0_SMI_LARB0, name);
- break;
- case 1:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_VDEC1_LARB, name);
- break;
- case 2:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_IMAGE_LARB2_SMI, name);
- break;
- default:
- break;
- }
-#endif /* CONFIG_MTK_FPGA */
-
- return 0;
-}
-
-static int larb_clock_off( int larb_id ){
-
-#ifndef CONFIG_MTK_FPGA
- char name[30];
- sprintf(name, "smi+%d", larb_id);
-
- switch( larb_id ){
- case 0:
- disable_clock(MT_CG_DISP0_SMI_LARB0, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 1:
- disable_clock(MT_CG_VDEC1_LARB, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 2:
- disable_clock(MT_CG_IMAGE_LARB2_SMI, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- default:
- break;
- }
-#endif /* CONFIG_MTK_FPGA */
-
- return 0;
-}
-
-static void backup_smi_common( void ){
- int i;
-
- for( i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++ ){
- g_smi_common_backup[i] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- (unsigned long) g_smi_common_backup_reg_offset[i]);
- }
-}
-
-static void restore_smi_common( void ){
- int i;
-
- for( i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++ ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE,
- (unsigned long) g_smi_common_backup_reg_offset[i],
- g_smi_common_backup[i]);
- }
-}
-
-static void backup_larb_smi( int index ){
- int port_index = 0;
- unsigned short int *backup_ptr = NULL;
- unsigned long larb_base = gLarbBaseAddr[index];
- unsigned long larb_offset = 0x200;
- int total_port_num = 0;
-
- // boundary check for larb_port_num and larb_port_backup access
- if( index < 0 || index >= SMI_LARB_NR ){
- return;
- }
-
- total_port_num = larb_port_num[index];
- backup_ptr = larb_port_backup[index];
-
- // boundary check for port value access
- if( total_port_num <= 0 || backup_ptr == NULL ){
- return;
- }
-
- for( port_index = 0; port_index < total_port_num; port_index++ ){
- *backup_ptr = (unsigned short int) (M4U_ReadReg32(larb_base,
- larb_offset));
- backup_ptr++;
- larb_offset += 4;
- }
-
- /* backup smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
- if( index == 0 ){
- backup_smi_common();
- }
-
- return;
-}
-
-static void restore_larb_smi( int index ){
- int port_index = 0;
- unsigned short int *backup_ptr = NULL;
- unsigned long larb_base = gLarbBaseAddr[index];
- unsigned long larb_offset = 0x200;
- unsigned int backup_value = 0;
- int total_port_num = 0;
-
- // boundary check for larb_port_num and larb_port_backup access
- if( index < 0 || index >= SMI_LARB_NR ){
- return;
- }
- total_port_num = larb_port_num[index];
- backup_ptr = larb_port_backup[index];
-
- // boundary check for port value access
- if( total_port_num <= 0 || backup_ptr == NULL ){
- return;
- }
-
- /* restore smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
- if( index == 0 ){
- restore_smi_common();
- }
-
- for( port_index = 0; port_index < total_port_num; port_index++ ){
- backup_value = *backup_ptr;
- M4U_WriteReg32(larb_base, larb_offset, backup_value);
- backup_ptr++;
- larb_offset += 4;
- }
-
- /* we do not backup 0x20 because it is a fixed setting */
- M4U_WriteReg32(larb_base, 0x20, larb_vc_setting[index]);
-
- /* turn off EMI empty OSTD dobule, fixed setting */
- M4U_WriteReg32(larb_base, 0x2c, 4);
-
- return;
-}
-
-static int larb_reg_backup( int larb ){
- unsigned int* pReg = pLarbRegBackUp[larb];
- unsigned long larb_base = gLarbBaseAddr[larb];
-
- *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
-
- // *(pReg++) = M4U_ReadReg32(larb_base, SMI_SHARE_EN);
- // *(pReg++) = M4U_ReadReg32(larb_base, SMI_ROUTE_SEL);
-
- backup_larb_smi(larb);
-
- if( 0 == larb ){
- g_bInited = 0;
- }
-
- return 0;
-}
-
-static int smi_larb_init( unsigned int larb ){
- unsigned int regval = 0;
- unsigned int regval1 = 0;
- unsigned int regval2 = 0;
- unsigned long larb_base = get_larb_base_addr(larb);
-
- // Clock manager enable LARB clock before call back restore already, it will be disabled after restore call back returns
- // Got to enable OSTD before engine starts
- regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
-
- // TODO: FIX ME
- // regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0);
- // regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1);
-
- if( 0 == regval ){
- SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
- M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
- }else{
- SMIMSG(
- "Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n",
- larb_base, regval, regval1, regval2);
- smi_dumpDebugMsg();
- if( smi_debug_level >= 1 ){
- SMIERR(
- "DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
- larb_base, regval, regval1, regval2);
- }else{
- dump_stack();
- }
- }
-
- restore_larb_smi(larb);
-
- return 0;
-}
-
-int larb_reg_restore( int larb ){
- unsigned long larb_base = SMI_ERROR_ADDR;
- unsigned int regval = 0;
- unsigned int* pReg = NULL;
-
- larb_base = get_larb_base_addr(larb);
-
- // The larb assign doesn't exist
- if( larb_base == SMI_ERROR_ADDR ){
- SMIMSG("Can't find the base address for Larb%d\n", larb);
- return 0;
- }
-
- pReg = pLarbRegBackUp[larb];
-
- SMIDBG(1, "+larb_reg_restore(), larb_idx=%d \n", larb);
- SMIDBG(1, "m4u part restore, larb_idx=%d \n", larb);
- //warning: larb_con is controlled by set/clr
- regval = *(pReg++);
- M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
- M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
-
- //M4U_WriteReg32(larb_base, SMI_SHARE_EN, *(pReg++) );
- //M4U_WriteReg32(larb_base, SMI_ROUTE_SEL, *(pReg++) );
-
- smi_larb_init(larb);
-
- return 0;
-}
-
-// callback after larb clock is enabled
-void on_larb_power_on( struct larb_monitor *h, int larb_idx ){
- //M4ULOG("on_larb_power_on(), larb_idx=%d \n", larb_idx);
- larb_reg_restore(larb_idx);
-
- return;
-}
-// callback before larb clock is disabled
-void on_larb_power_off( struct larb_monitor *h, int larb_idx ){
- //M4ULOG("on_larb_power_off(), larb_idx=%d \n", larb_idx);
- larb_reg_backup(larb_idx);
-}
-
-static void restSetting( void ){
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x1); //disp_ovl0_port0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x1); //disp_ovl0_port1
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); //mdp_wrot
- M4U_WriteReg32(LARB0_BASE, 0x21C, 0x1); //disp_fake
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0x1); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0x1); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); //hw_vdec_pred_rd_ext
- M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); //hw_vdec_pred_wr_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
- M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); //hw_vdec_ppwrap_ext
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x1); //cam_imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x1); //cam_img2o
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //cam_lsci
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); //cam_imgi
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //cam_esfko
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //cam_aao
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //venc_mvqp
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); //venc_mc
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //venc_cdma
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //venc_rec
-}
-//Make sure clock is on
-static void initSetting( void ){
-
- /* save default larb regs */
- if( !is_default_value_saved ){
- SMIMSG("Save default config:\n");
- default_val_smi_l1arb[0] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB0);
- default_val_smi_l1arb[1] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB1);
- default_val_smi_l1arb[2] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB2);
-
- SMIMSG("l1arb[0-2]= 0x%x, 0x%x, 0x%x\n", default_val_smi_l1arb[0],
- default_val_smi_l1arb[1], default_val_smi_l1arb[2]);
-
- is_default_value_saved = 1;
- }
-
- // Keep the HW's init setting in REG_SMI_L1ARB0 ~ REG_SMI_L1ARB4
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0,
- default_val_smi_l1arb[0]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1,
- default_val_smi_l1arb[1]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2,
- default_val_smi_l1arb[2]);
-
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x100, 0xb);
-
- M4U_WriteReg32(
- SMI_COMMON_EXT_BASE,
- 0x234,
- (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
- + (0x4 << 10) + (0x4 << 5) + 0x5);
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x230, (0x7+(0x8<<3)+(0x7<<8)));
-
- // Set VC priority: MMSYS = ISP > VENC > VDEC = MJC
- M4U_WriteReg32(LARB0_BASE, 0x20, 0x0); // MMSYS
- M4U_WriteReg32(LARB1_BASE, 0x20, 0x2); // VDEC
- M4U_WriteReg32(LARB2_BASE, 0x20, 0x1); // ISP
-
-
- // for UI
- restSetting();
-
- //LARB 0 DISP+MDP
- M4U_WriteReg32(LARB0_BASE, 0x200, 31); //disp_ovl0_port0
- M4U_WriteReg32(LARB0_BASE, 0x204, 31); //disp_ovl0_port1
- M4U_WriteReg32(LARB0_BASE, 0x208, 4); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 6); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 4); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 1); //mdp_wrot
- M4U_WriteReg32(LARB0_BASE, 0x21C, 1); //disp_fake
-
-}
-
-static void icfpSetting( void ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x11da); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x1000); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10c, 0x1318); //LARB3, VENC+JPG
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x6); //disp_ovl0_port0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x6); //disp_ovl0_port1
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); //mdp_wrot
- M4U_WriteReg32(LARB0_BASE, 0x21C, 0x1); //disp_fake
-
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x8); //cam_imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x6); //cam_img2o
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //cam_lsci
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x2); //jpgenc_rdma
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x4); //cam_imgi
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //cam_esfko
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //cam_aao
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //venc_mvqp
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); //venc_mc
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //venc_cdma
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //venc_rec
-}
-
-
-
-static void vrSetting( void ){
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x11ff); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x1000); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10c, 0x1361); //LARB3, VENC+JPG
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x6); //disp_ovl0_port0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x6); //disp_ovl0_port1
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); //mdp_wrot
- M4U_WriteReg32(LARB0_BASE, 0x21C, 0x1); //disp_fake
-
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x8); //cam_imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x6); //cam_img2o
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //cam_lsci
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x4); //cam_imgi
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //cam_esfko
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //cam_aao
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //venc_mvqp
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x2); //venc_mc
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //venc_cdma
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //venc_rec
-}
-
-static void vpSetting( void ){
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x11ff); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, default_val_smi_l1arb[1]); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10c, 0x1361); //LARB3, VENC+JPG
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x8); //disp_ovl0_port0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x8); //disp_ovl0_port1
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x3); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x4); //mdp_wrot
- M4U_WriteReg32(LARB0_BASE, 0x21C, 0x1); //disp_fake
-
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0xb); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0xe); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x8); //cam_imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x6); //cam_img2o
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //cam_lsci
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x4); //cam_imgi
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //cam_esfko
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //cam_aao
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //venc_mvqp
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x2); //venc_mc
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //venc_cdma
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //venc_rec
-}
-
-
-
-
-static void vpWfdSetting( void ){
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x11ff); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, default_val_smi_l1arb[1]); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10c, 0x1361); //LARB3, VENC+JPG
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x8); //disp_ovl0_port0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x8); //disp_ovl0_port1
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x3); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x4); //mdp_wrot
- M4U_WriteReg32(LARB0_BASE, 0x21C, 0x1); //disp_fake
-
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0xb); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0xe); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x8); //cam_imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x6); //cam_img2o
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //cam_lsci
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x4); //cam_imgi
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //cam_esfko
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //cam_aao
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //venc_mvqp
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x2); //venc_mc
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //venc_cdma
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //venc_rec
-}
-
-// Fake mode check, e.g. WFD
-static int fake_mode_handling(
- MTK_SMI_BWC_CONFIG* p_conf,
- unsigned int *pu4LocalCnt ){
- if( p_conf->scenario == SMI_BWC_SCEN_WFD ){
- if( p_conf->b_on_off ){
- wifi_disp_transaction = 1;
- SMIMSG("Enable WFD in profile: %d\n", smi_profile);
- }else{
- wifi_disp_transaction = 0;
- SMIMSG("Disable WFD in profile: %d\n", smi_profile);
- }
- return 1;
- }else{
- return 0;
- }
-}
-
-static int ovl_limit_uevent( int bwc_scenario, int ovl_pixel_limit ){
- int err = 0;
- char *envp[3];
- char scenario_buf[32] = "";
- char ovl_limit_buf[32] = "";
-
- // scenario_buf = kzalloc(sizeof(char)*128, GFP_KERNEL);
- // ovl_limit_buf = kzalloc(sizeof(char)*128, GFP_KERNEL);
-
- snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
- snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
-
- envp[0] = scenario_buf;
- envp[1] = ovl_limit_buf;
- envp[2] = NULL;
-
- if( pSmiDev != NULL ){
- // err = kobject_uevent_env(&(pSmiDev->kobj), KOBJ_CHANGE, envp);
- // use smi_dev->dev.lobj instead
- // err = kobject_uevent_env(&(smi_dev->dev->kobj), KOBJ_CHANGE, envp);
- // user smiDeviceUevent->kobj instead
- err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
- SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit,
- bwc_scenario);
- }
- //kfree(scenario_buf);
- //kfree(ovl_limit_buf);
-
- if(err < 0)
- SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
-
- return err;
-}
-
-static int smi_bwc_config(
- MTK_SMI_BWC_CONFIG* p_conf,
- unsigned int *pu4LocalCnt ){
- int i;
- int result = 0;
- unsigned int u4Concurrency = 0;
- MTK_SMI_BWC_SCEN eFinalScen;
- static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
-
- if( smi_tuning_mode == 1 ){
- SMIMSG("Doesn't change profile in tunning mode");
- return 0;
- }
- //#ifdef SMI_DT_SUPPORT
- //register_base_dump();
- //#endif
-
- spin_lock(&g_SMIInfo.SMI_lock);
- result = fake_mode_handling(p_conf, pu4LocalCnt);
- spin_unlock(&g_SMIInfo.SMI_lock);
-
- // Fake mode is not a real SMI profile, so we need to return here
- if( result == 1 ){
- return 0;
- }
-
- if( (SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario) ){
- SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n",
- p_conf->scenario);
- return -1;
- }
-
- //Debug - S
- //SMIMSG("SMI setTo%d,%s,%d\n" , p_conf->scenario , (p_conf->b_on_off ? "on" : "off") , ePreviousFinalScen);
- //Debug - E
-
- if (p_conf->b_on_off) {
- /* set mmdvfs step according to certain scenarios */
- mmdvfs_notify_scenario_enter(p_conf->scenario);
- } else {
- /* set mmdvfs step to default after the scenario exits */
- mmdvfs_notify_scenario_exit(p_conf->scenario);
- }
-
- spin_lock(&g_SMIInfo.SMI_lock);
-
- if( p_conf->b_on_off ){
- //turn on certain scenario
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
-
- if( NULL != pu4LocalCnt ){
- pu4LocalCnt[p_conf->scenario] += 1;
- }
- }else{
- //turn off certain scenario
- if( 0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] ){
- SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
- p_conf->scenario,
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
- }else{
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
- }
-
- if( NULL != pu4LocalCnt ){
- if( 0 == pu4LocalCnt[p_conf->scenario] ){
- SMIMSG(
- "Process : %s did too many turning off for local SMI profile:%d,%d\n",
- current->comm, p_conf->scenario,
- pu4LocalCnt[p_conf->scenario]);
- }else{
- pu4LocalCnt[p_conf->scenario] -= 1;
- }
- }
- }
-
- for( i = 0; i < SMI_BWC_SCEN_CNT; i++ ){
- if( g_SMIInfo.pu4ConcurrencyTable[i] ){
- u4Concurrency |= (1 << i);
- }
- }
-
- /* notify mmdvfs concurrency */
- mmdvfs_notify_scenario_concurrency(u4Concurrency);
-
- if( (1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_MM_GPU;
- }else if( (1 << SMI_BWC_SCEN_ICFP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_ICFP;
- }else if( (1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VR_SLOW;
- }else if( (1 << SMI_BWC_SCEN_VR) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VR;
- }else if( (1 << SMI_BWC_SCEN_VP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VP;
- }else if( (1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
- }else if( (1 << SMI_BWC_SCEN_VENC) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VENC;
- }else{
- eFinalScen = SMI_BWC_SCEN_NORMAL;
- }
-
- if( ePreviousFinalScen == eFinalScen ){
- SMIMSG("Scen equal%d,don't change\n", eFinalScen);
- spin_unlock(&g_SMIInfo.SMI_lock);
- return 0;
- }else{
- ePreviousFinalScen = eFinalScen;
- }
-
- /* turn on larb clock */
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_on(i);
- }
-
- smi_profile = eFinalScen;
-
- /* Bandwidth Limiter */
- switch( eFinalScen ){
- case SMI_BWC_SCEN_VP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
- if( wifi_disp_transaction ){
- vpSetting();
- }else{
- vpWfdSetting();
- }
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
- break;
-
- case SMI_BWC_SCEN_SWDEC_VP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
- vpSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
- break;
-
- case SMI_BWC_SCEN_ICFP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_ICFP");
- icfpSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
- break;
- case SMI_BWC_SCEN_VR:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
- break;
-
- case SMI_BWC_SCEN_VR_SLOW:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
- smi_profile = SMI_BWC_SCEN_VR_SLOW;
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
-
- case SMI_BWC_SCEN_VENC:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
-
- case SMI_BWC_SCEN_NORMAL:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- initSetting();
- break;
-
- case SMI_BWC_SCEN_MM_GPU:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- initSetting();
- break;
-
- default:
- SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
- initSetting();
- g_smi_bwc_mm_info .hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
- }
-
-
- /*turn off larb clock*/
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_off(i);
- }
-
- spin_unlock(&g_SMIInfo.SMI_lock);
-
- ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
-
- /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex, call these APIs only when necessary */
- {
- static unsigned int current_fps = 0;
-
- if( (eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30) ){ /* force 30 fps in VR slow motion profile */
- primary_display_force_set_vsync_fps(30);
- current_fps = 30;
- SMIMSG("[SMI_PROFILE] set 30 fps\n");
- }else if( (eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30) ){ /* back to normal fps */
- current_fps = primary_display_get_fps();
- primary_display_force_set_vsync_fps(current_fps);
- SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
- }
- }
-
- SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
- (p_conf->b_on_off ? "on" : "off"), eFinalScen,
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
-
- //Debug usage - S
- //smi_dumpDebugMsg();
- //SMIMSG("Config:%d,%d,%d\n" , eFinalScen , g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL] , (NULL == pu4LocalCnt ? (-1) : pu4LocalCnt[p_conf->scenario]));
- //Debug usage - E
-
- return 0;
-}
-
-struct larb_monitor larb_monitor_handler =
-{
- .level = LARB_MONITOR_LEVEL_HIGH,
- .backup = on_larb_power_off,
- .restore = on_larb_power_on
-};
-
-int smi_common_init( void ){
- int i;
-
- SMIMSG("Enter smi_common_init\n")
- for( i = 0; i < SMI_LARB_NR; i++ ){
- pLarbRegBackUp[i] = (unsigned int*) kmalloc(LARB_BACKUP_REG_SIZE,
- GFP_KERNEL | __GFP_ZERO);
- if( pLarbRegBackUp[i] == NULL ){
- SMIERR("pLarbRegBackUp kmalloc fail %d \n", i);
- }
- }
-
- /*
- * make sure all larb power is on before we register callback func.
- * then, when larb power is first off, default register value will be backed up.
- */
-
- for( i = 0; i < SMI_LARB_NR; i++ ){
- SMIMSG("Enalbe CLK of larb%d\n", i );
- larb_clock_on(i);
- }
-
- /* apply init setting after kernel boot */
- SMIMSG("Enter smi_common_init\n")
- initSetting();
-
- register_larb_monitor(&larb_monitor_handler);
-
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_off(i);
- }
-
- return 0;
-}
-
-static int smi_open( struct inode *inode, struct file *file ){
- file->private_data = kmalloc(SMI_BWC_SCEN_CNT * sizeof(unsigned int),
- GFP_ATOMIC);
-
- if( NULL == file->private_data ){
- SMIMSG("Not enough entry for DDP open operation\n");
- return -ENOMEM;
- }
-
- memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
-
- return 0;
-}
-
-static int smi_release( struct inode *inode, struct file *file ){
-
-#if 0
- unsigned long u4Index = 0;
- unsigned long u4AssignCnt = 0;
- unsigned long * pu4Cnt = (unsigned long *)file->private_data;
- MTK_SMI_BWC_CONFIG config;
-
- for(; u4Index < SMI_BWC_SCEN_CNT; u4Index += 1)
- {
- if(pu4Cnt[u4Index])
- {
- SMIMSG("Process:%s does not turn off BWC properly , force turn off %d\n" , current->comm , u4Index);
- u4AssignCnt = pu4Cnt[u4Index];
- config.b_on_off = 0;
- config.scenario = (MTK_SMI_BWC_SCEN)u4Index;
- do
- {
- smi_bwc_config( &config , pu4Cnt);
- }
- while(0 < u4AssignCnt);
- }
- }
-#endif
-
- if( NULL != file->private_data ){
- kfree(file->private_data);
- file->private_data = NULL;
- }
-
- return 0;
-}
-/* GMP start */
-
-void smi_bwc_mm_info_set( int property_id, long val1, long val2 ){
-
- switch( property_id ){
- case SMI_BWC_INFO_CON_PROFILE:
- g_smi_bwc_mm_info.concurrent_profile = (int) val1;
- break;
- case SMI_BWC_INFO_SENSOR_SIZE:
- g_smi_bwc_mm_info.sensor_size[0] = val1;
- g_smi_bwc_mm_info.sensor_size[1] = val2;
- break;
- case SMI_BWC_INFO_VIDEO_RECORD_SIZE:
- g_smi_bwc_mm_info.video_record_size[0] = val1;
- g_smi_bwc_mm_info.video_record_size[1] = val2;
- break;
- case SMI_BWC_INFO_DISP_SIZE:
- g_smi_bwc_mm_info.display_size[0] = val1;
- g_smi_bwc_mm_info.display_size[1] = val2;
- break;
- case SMI_BWC_INFO_TV_OUT_SIZE:
- g_smi_bwc_mm_info.tv_out_size[0] = val1;
- g_smi_bwc_mm_info.tv_out_size[1] = val2;
- break;
- case SMI_BWC_INFO_FPS:
- g_smi_bwc_mm_info.fps = (int) val1;
- break;
- case SMI_BWC_INFO_VIDEO_ENCODE_CODEC:
- g_smi_bwc_mm_info.video_encode_codec = (int) val1;
- break;
- case SMI_BWC_INFO_VIDEO_DECODE_CODEC:
- g_smi_bwc_mm_info.video_decode_codec = (int) val1;
- break;
- }
-}
-
-/* GMP end */
-
-static long smi_ioctl(
- struct file * pFile,
- unsigned int cmd,
- unsigned long param ){
- int ret = 0;
-
- // unsigned long * pu4Cnt = (unsigned long *)pFile->private_data;
-
- switch( cmd ){
-
- /* disable reg access ioctl by default for possible security holes */
- // TBD: check valid SMI register range
- case MTK_IOC_SMI_BWC_CONFIG: {
- MTK_SMI_BWC_CONFIG cfg;
- ret = copy_from_user(&cfg, (void*) param,
- sizeof(MTK_SMI_BWC_CONFIG));
- if( ret ){
- SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
- return -EFAULT;
- }
-
- ret = smi_bwc_config(&cfg, NULL);
-
- break;
- }
- /* GMP start */
- case MTK_IOC_SMI_BWC_INFO_SET: {
- MTK_SMI_BWC_INFO_SET cfg;
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... start");
- ret = copy_from_user(&cfg, (void *) param,
- sizeof(MTK_SMI_BWC_INFO_SET));
- if( ret ){
- SMIMSG(" MTK_IOC_SMI_BWC_INFO_SET, copy_to_user failed: %d\n",
- ret);
- return -EFAULT;
- }
- /* Set the address to the value assigned by user space program */
- smi_bwc_mm_info_set(cfg.property, cfg.value1, cfg.value2);
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... finish");
- break;
- }
- case MTK_IOC_SMI_BWC_INFO_GET: {
- ret = copy_to_user((void *) param, (void *) &g_smi_bwc_mm_info,
- sizeof(MTK_SMI_BWC_MM_INFO));
-
- if( ret ){
- SMIMSG(" MTK_IOC_SMI_BWC_INFO_GET, copy_to_user failed: %d\n",
- ret);
- return -EFAULT;
- }
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_GET request... finish");
- break;
- }
- /* GMP end */
-
- case MTK_IOC_SMI_DUMP_LARB: {
- unsigned int larb_index;
-
- ret = copy_from_user(&larb_index, (void*) param,
- sizeof(unsigned int));
- if( ret ){
- return -EFAULT;
- }
-
- smi_dumpLarb(larb_index);
- }
- break;
-
- case MTK_IOC_SMI_DUMP_COMMON: {
- unsigned int arg;
-
- ret = copy_from_user(&arg, (void*) param, sizeof(unsigned int));
- if( ret ){
- return -EFAULT;
- }
-
- smi_dumpCommon();
- }
- break;
-
- case MTK_IOC_MMDVFS_CMD:
- {
- MTK_MMDVFS_CMD mmdvfs_cmd;
-
- if (copy_from_user(&mmdvfs_cmd, (void*)param, sizeof(MTK_MMDVFS_CMD))) {
- return -EFAULT;
- }
-
- mmdvfs_handle_cmd(&mmdvfs_cmd);
-
- if (copy_to_user((void*)param, (void*)&mmdvfs_cmd, sizeof(MTK_MMDVFS_CMD))) {
- return -EFAULT;
- }
- }
- break;
-
- default:
- return -1;
- }
-
- return ret;
-}
-
-static const struct file_operations smiFops =
-{
- .owner = THIS_MODULE,
- .open = smi_open,
- .release = smi_release,
- .unlocked_ioctl = smi_ioctl,
- .compat_ioctl = MTK_SMI_COMPAT_ioctl,
-};
-
-static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
-static inline int smi_register( void ){
- if( alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI") ){
- SMIERR("Allocate device No. failed");
- return -EAGAIN;
- }
- //Allocate driver
- pSmiDev = cdev_alloc();
-
- if( NULL == pSmiDev ){
- unregister_chrdev_region(smiDevNo, 1);
- SMIERR("Allocate mem for kobject failed");
- return -ENOMEM;
- }
-
- //Attatch file operation.
- cdev_init(pSmiDev, &smiFops);
- pSmiDev->owner = THIS_MODULE;
-
- //Add to system
- if( cdev_add(pSmiDev, smiDevNo, 1) ){
- SMIERR("Attatch file operation failed");
- unregister_chrdev_region(smiDevNo, 1);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-static struct class *pSmiClass = NULL;
-
-static int smi_probe( struct platform_device *pdev ){
-
- int i;
-
- static unsigned int smi_probe_cnt = 0;
- struct device* smiDevice = NULL;
- SMIMSG("Enter smi_probe\n");
- //Debug only
- if( smi_probe_cnt != 0 ){
- SMIERR("Onlye support 1 SMI driver probed\n");
- return 0;
- }
- smi_probe_cnt++;
- SMIMSG("Allocate smi_dev space\n");
- smi_dev = krealloc(smi_dev, sizeof(struct smi_device), GFP_KERNEL);
-
- if( smi_dev == NULL ){
- SMIERR("Unable to allocate memory for smi driver\n");
- return -ENOMEM;
- }
- if( NULL == pdev ){
- SMIERR("platform data missed\n");
- return -ENXIO;
- }
- // Keep the device structure
- smi_dev->dev = &pdev->dev;
-
- // Map registers
- for( i = 0; i < SMI_REG_REGION_MAX; i++ ){
- SMIMSG("Save region: %d\n", i);
- smi_dev->regs[i] = (void *) of_iomap(pdev->dev.of_node, i);
-
- if( !smi_dev->regs[i] ){
- SMIERR("Unable to ioremap registers, of_iomap fail, i=%d \n", i);
- return -ENOMEM;
- }
-
- // Record the register base in global variable
- gSMIBaseAddrs[i] = (unsigned long) (smi_dev->regs[i]);
- SMIMSG("DT, i=%d, region=%s, map_addr=0x%p, reg_pa=0x%lx\n", i,
- smi_get_region_name(i), smi_dev->regs[i], smi_reg_pa_base[i]);
- }
-
- SMIMSG("Execute smi_register\n");
- if( smi_register() ){
- dev_err(&pdev->dev, "register char failed\n");
- return -EAGAIN;
- }
-
- pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
- if(IS_ERR(pSmiClass)) {
- int ret = PTR_ERR(pSmiClass);
- SMIERR("Unable to create class, err = %d", ret);
- return ret;
- }
- SMIMSG("Create davice\n");
- smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
- smiDeviceUevent = smiDevice;
-
- SMIMSG("SMI probe done.\n");
-
- // To adapt the legacy codes
- smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
- smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
- smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
- smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
- //smi_reg_base_barb4 = gSMIBaseAddrs[SMI_LARB4_REG_INDX];
-
- gLarbBaseAddr[0] = LARB0_BASE;
- gLarbBaseAddr[1] = LARB1_BASE;
- gLarbBaseAddr[2] = LARB2_BASE;
-
- SMIMSG("Execute smi_common_init\n");
- smi_common_init();
-
- SMIMSG("Execute SMI_DBG_Init\n");
- SMI_DBG_Init();
- return 0;
-
-}
-
-char* smi_get_region_name( unsigned int region_indx ){
- switch( region_indx ){
- case SMI_COMMON_REG_INDX:
- return "smi_common";
- case SMI_LARB0_REG_INDX:
- return "larb0";
- case SMI_LARB1_REG_INDX:
- return "larb1";
- case SMI_LARB2_REG_INDX:
- return "larb2";
- default:
- SMIMSG("invalid region id=%d", region_indx);
- return "unknown";
- }
-}
-
-void register_base_dump( void ){
- int i = 0;
- unsigned long pa_value = 0;
- unsigned long va_value = 0;
-
- for( i = 0; i < SMI_REG_REGION_MAX; i++ ){
- va_value = gSMIBaseAddrs[i];
- pa_value = virt_to_phys((void*) va_value);
- SMIMSG("REG BASE:%s-->VA=0x%lx,PA=0x%lx,SPEC=0x%lx\n",
- smi_get_region_name(i), va_value, pa_value, smi_reg_pa_base[i]);
- }
-}
-
-static int smi_remove( struct platform_device *pdev ){
- cdev_del(pSmiDev);
- unregister_chrdev_region(smiDevNo, 1);
- device_destroy(pSmiClass, smiDevNo);
- class_destroy( pSmiClass);
- return 0;
-}
-
-static int smi_suspend( struct platform_device *pdev, pm_message_t mesg ){
- return 0;
-}
-
-static int smi_resume( struct platform_device *pdev ){
- return 0;
-}
-
-#ifdef SMI_DT_SUPPORT
-static const struct of_device_id smi_of_ids[] ={
- { .compatible = "mediatek,SMI_COMMON",},
- {}
-};
-#endif //SMI_DT_SUPPORT
-static struct platform_driver smiDrv ={
- .probe = smi_probe,
- .remove = smi_remove,
- .suspend= smi_suspend,
- .resume = smi_resume,
- .driver ={
- .name = "MTK_SMI",
- .owner = THIS_MODULE,
-#ifdef SMI_DT_SUPPORT
- .of_match_table = smi_of_ids,
-#endif //SMI_DT_SUPPORT
- }
-};
-
-static int __init smi_init(void)
-{
- SMIMSG("smi_init enter\n");
- spin_lock_init(&g_SMIInfo.SMI_lock);
- /* MMDVFS init */
- mmdvfs_init(&g_smi_bwc_mm_info);
-
- memset(g_SMIInfo.pu4ConcurrencyTable , 0 , SMI_BWC_SCEN_CNT * sizeof(unsigned int));
-
- // Informs the kernel about the function to be called
- // if hardware matching MTK_SMI has been found
- SMIMSG("register platform driver\n");
- if (platform_driver_register(&smiDrv)){
- SMIERR("failed to register MAU driver");
- return -ENODEV;
- }
- SMIMSG("exit smi_init\n");
- return 0;
-}
-
-static void __exit smi_exit(void)
-{
- platform_driver_unregister(&smiDrv);
-
-}
-
-static void smi_dumpCommonDebugMsg( int output_gce_buffer ){
- unsigned long u4Base;
- int smiCommonClkEnabled = 0;
-
- smiCommonClkEnabled = clock_is_on(MT_CG_DISP0_SMI_COMMON);
- //SMI COMMON dump
- if( smi_debug_level == 0 && (!smiCommonClkEnabled) ){
- SMIMSG3(output_gce_buffer, "===SMI common clock is disabled===\n");
- return;
- }
-
- SMIMSG3(output_gce_buffer, "===SMI common reg dump, CLK: %d===\n", smiCommonClkEnabled);
-
- u4Base = SMI_COMMON_EXT_BASE;
- SMIMSG3(output_gce_buffer, "[0x100,0x104,0x108]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x100), M4U_ReadReg32(u4Base, 0x104),
- M4U_ReadReg32(u4Base, 0x108));
- SMIMSG3(output_gce_buffer, "[0x10C,0x110,0x114]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x10C), M4U_ReadReg32(u4Base, 0x110),
- M4U_ReadReg32(u4Base, 0x114));
- SMIMSG3(output_gce_buffer, "[0x220,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x220), M4U_ReadReg32(u4Base, 0x230),
- M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base, 0x238));
- SMIMSG3(output_gce_buffer, "[0x400,0x404,0x408]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x400), M4U_ReadReg32(u4Base, 0x404),
- M4U_ReadReg32(u4Base, 0x408));
- SMIMSG3(output_gce_buffer, "[0x40C,0x430,0x440]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x40C), M4U_ReadReg32(u4Base, 0x430),
- M4U_ReadReg32(u4Base, 0x440));
-
- // TBD: M4U should dump these
- /*
- // For VA and PA check:
- // 0x1000C5C0 , 0x1000C5C4, 0x1000C5C8, 0x1000C5CC, 0x1000C5D0
- u4Base = SMI_COMMON_AO_BASE;
- SMIMSG("===SMI always on reg dump===\n");
- SMIMSG("[0x5C0,0x5C4,0x5C8]=[0x%x,0x%x,0x%x]\n" ,M4U_ReadReg32(u4Base , 0x5C0),M4U_ReadReg32(u4Base , 0x5C4),M4U_ReadReg32(u4Base , 0x5C8));
- SMIMSG("[0x5CC,0x5D0]=[0x%x,0x%x]\n" ,M4U_ReadReg32(u4Base , 0x5CC),M4U_ReadReg32(u4Base , 0x5D0));
- */
-}
-static int smi_larb_clock_is_on( unsigned int larb_index ){
-
- int result = 0;
-#if !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- switch( larb_index ){
- case 0:
- result = clock_is_on(MT_CG_DISP0_SMI_LARB0);
- break;
- case 1:
- result = clock_is_on(MT_CG_VDEC1_LARB);
- break;
- case 2:
- result = clock_is_on(MT_CG_IMAGE_LARB2_SMI);
- break;
-// case 3:
-// result = clock_is_on(MT_CG_VENC_LARB);
-// break;
-// case 4:
-// result = clock_is_on(MT_CG_MJC_SMI_LARB);
-// break;
- default:
- result = 0;
- break;
- }
-#endif // !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- return result;
-
-}
-static void smi_dumpLarbDebugMsg( unsigned int u4Index ){
- unsigned long u4Base = 0;
-
- int larbClkEnabled = 0;
-
- u4Base = get_larb_base_addr(u4Index);
-
- larbClkEnabled = smi_larb_clock_is_on(u4Index);
-
- if( u4Base == SMI_ERROR_ADDR ){
- SMIMSG("Doesn't support reg dump for Larb%d\n", u4Index);
-
- return;
- }else if( (larbClkEnabled != 0) || smi_debug_level > 0 ){
- SMIMSG("===SMI LARB%d reg dump, CLK: %d===\n", u4Index, larbClkEnabled);
-
- // Staus Registers
- SMIMSG("[0x0,0x8,0x10]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
- M4U_ReadReg32(u4Base, 0x8), M4U_ReadReg32(u4Base, 0x10));
- SMIMSG("[0x24,0x50,0x60]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x24), M4U_ReadReg32(u4Base, 0x50),
- M4U_ReadReg32(u4Base, 0x60));
- SMIMSG("[0xa0,0xa4,0xa8]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xa0), M4U_ReadReg32(u4Base, 0xa4),
- M4U_ReadReg32(u4Base, 0xa8));
- SMIMSG("[0xac,0xb0,0xb4]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xac), M4U_ReadReg32(u4Base, 0xb0),
- M4U_ReadReg32(u4Base, 0xb4));
- SMIMSG("[0xb8,0xbc,0xc0]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xb8), M4U_ReadReg32(u4Base, 0xbc),
- M4U_ReadReg32(u4Base, 0xc0));
- SMIMSG("[0xc8,0xcc]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xc8),
- M4U_ReadReg32(u4Base, 0xcc));
- // Settings
- SMIMSG("[0x200, 0x204, 0x208]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x200), M4U_ReadReg32(u4Base, 0x204),
- M4U_ReadReg32(u4Base, 0x208));
-
- SMIMSG("[0x20c, 0x210, 0x214]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x20c), M4U_ReadReg32(u4Base, 0x210),
- M4U_ReadReg32(u4Base, 0x214));
-
- SMIMSG("[0x218, 0x21c, 0x220]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x218), M4U_ReadReg32(u4Base, 0x21c),
- M4U_ReadReg32(u4Base, 0x220));
-
- SMIMSG("[0x224, 0x228, 0x22c]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x224), M4U_ReadReg32(u4Base, 0x228),
- M4U_ReadReg32(u4Base, 0x22c));
-
- SMIMSG("[0x230, 0x234, 0x238]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234),
- M4U_ReadReg32(u4Base, 0x238));
-
- SMIMSG("[0x23c, 0x240, 0x244]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x23c), M4U_ReadReg32(u4Base, 0x240),
- M4U_ReadReg32(u4Base, 0x244));
-
- SMIMSG("[0x248, 0x24c]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x248),
- M4U_ReadReg32(u4Base, 0x24c));
- }else{
- SMIMSG("===SMI LARB%d clock is disabled===\n", u4Index);
- }
-
-}
-
-static void smi_dump_format(
- unsigned long base,
- unsigned int from,
- unsigned int to ){
- int i, j, left;
- unsigned int value[8];
-
- for( i = from; i <= to; i += 32 ){
- for( j = 0; j < 8; j++ ){
- value[j] = M4U_ReadReg32(base, i + j * 4);
- }
-
- SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i, value[0], value[1],
- value[2], value[3], value[4], value[5], value[6], value[7]);
- }
-
- left = ((from - to) / 4 + 1) % 8;
-
- if( left ){
- memset(value, 0, 8 * sizeof(unsigned int));
-
- for( j = 0; j < left; j++ ){
- value[j] = M4U_ReadReg32(base, i - 32 + j * 4);
- }
-
- SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i - 32 + j * 4, value[0],
- value[1], value[2], value[3], value[4], value[5], value[6],
- value[7]);
- }
-}
-
-static void smi_dumpLarb( unsigned int index ){
- unsigned long u4Base;
-
- u4Base = get_larb_base_addr(index);
-
- if( u4Base == SMI_ERROR_ADDR ){
- SMIMSG2("Doesn't support reg dump for Larb%d\n", index);
-
- return;
- }else{
- SMIMSG2("===SMI LARB%d reg dump base 0x%lx===\n", index, u4Base);
-
- smi_dump_format(u4Base, 0, 0x434);
- smi_dump_format(u4Base, 0xF00, 0xF0C);
- }
-}
-
-static void smi_dumpCommon( void ){
- SMIMSG2("===SMI COMMON reg dump base 0x%lx===\n", SMI_COMMON_EXT_BASE);
-
- smi_dump_format(SMI_COMMON_EXT_BASE, 0x1A0, 0x444);
-}
-
-void smi_dumpDebugMsg( void ){
- unsigned int u4Index;
-
- // SMI COMMON dump, 0 stands for not pass log to CMDQ error dumping messages
- smi_dumpCommonDebugMsg(0);
-
- // dump all SMI LARB
- for( u4Index = 0; u4Index < SMI_LARB_NR; u4Index++ ){
- smi_dumpLarbDebugMsg(u4Index);
- }
-}
-
-
-int smi_debug_bus_hanging_detect( unsigned int larbs, int show_dump){
- return smi_debug_bus_hanging_detect_ext(larbs, show_dump, 0);
-}
-
-//dual_buffer = 1, write log into kernel log and CMDQ buffer. dual_buffer = 0, write log into kernel log only
-int smi_debug_bus_hanging_detect_ext( unsigned int larbs, int show_dump, int output_gce_buffer){
-
- int i = 0;
- int dump_time = 0;
- int is_smi_issue = 0;
- int status_code = 0;
- // Keep the dump result
- unsigned char smi_common_busy_count = 0;
- volatile unsigned int reg_temp = 0;
- unsigned char smi_larb_busy_count[SMI_LARB_NR] = { 0 };
- unsigned char smi_larb_mmu_status[SMI_LARB_NR] = { 0 };
-
- // dump resister and save resgister status
- for( dump_time = 0; dump_time < 5; dump_time++ ){
- unsigned int u4Index = 0;
- reg_temp = M4U_ReadReg32(SMI_COMMON_EXT_BASE, 0x440);
- if( (reg_temp & (1 << 0)) == 0 ){
- // smi common is busy
- smi_common_busy_count++;
- }
- // Dump smi common regs
- if( show_dump != 0 ){
- smi_dumpCommonDebugMsg(output_gce_buffer);
- }
- for( u4Index = 0; u4Index < SMI_LARB_NR; u4Index++ ){
- unsigned long u4Base = get_larb_base_addr(u4Index);
- if( u4Base != SMI_ERROR_ADDR ){
- reg_temp = M4U_ReadReg32(u4Base, 0x0);
- if( reg_temp != 0 ){
- // Larb is busy
- smi_larb_busy_count[u4Index]++;
- }
- smi_larb_mmu_status[u4Index] = M4U_ReadReg32(u4Base, 0xa0);
- if( show_dump != 0 ){
- smi_dumpLarbDebugMsg(u4Index);
- }
- }
- }
-
- }
-
- // Show the checked result
- for( i = 0; i < SMI_LARB_NR; i++ ){ // Check each larb
- if( SMI_DGB_LARB_SELECT(larbs, i) ){
- // larb i has been selected
- // Get status code
-
- if( smi_larb_busy_count[i] == 5 ){ // The larb is always busy
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 1;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 2;
- }else{
- status_code = 5; // smi common is sometimes busy and idle
- }
- }else if( smi_larb_busy_count[i] == 0 ){ // The larb is always idle
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 3;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 4;
- }else{
- status_code = 6; // smi common is sometimes busy and idle
- }
- }else{ //sometime the larb is busy
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 7;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 8;
- }else{
- status_code = 9; // smi common is sometimes busy and idle
- }
- }
-
- // Send the debug message according to the final result
- switch( status_code ){
- case 1:
- case 3:
- case 5:
- case 7:
- case 8:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine's state first\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- SMIMSG3(
- output_gce_buffer,
- "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
- i);
- break;
- case 2:
- if( smi_larb_mmu_status[i] == 0 ){
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine state first\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- SMIMSG3(
- output_gce_buffer,
- "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
- i);
- }else{
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> MMU port config error\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- is_smi_issue = 1;
- }
- break;
- case 4:
- case 6:
- case 9:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> not SMI issue\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- break;
- default:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> status unknown\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- break;
- }
- }
-
- }
-
- return is_smi_issue;
-}
-
-
-void smi_client_status_change_notify( int module, int mode ){
-
-}
-
-#if IS_ENABLED(CONFIG_COMPAT)
-// 32 bits process ioctl support:
-// This is prepared for the future extension since currently the sizes of 32 bits
-// and 64 bits smi parameters are the same.
-
-typedef struct
-{
- compat_int_t scenario;
- compat_int_t b_on_off; //0 : exit this scenario , 1 : enter this scenario
-}MTK_SMI_COMPAT_BWC_CONFIG;
-
-typedef struct
-{
- compat_int_t property;
- compat_int_t value1;
- compat_int_t value2;
-}MTK_SMI_COMPAT_BWC_INFO_SET;
-
-typedef struct
-{
- compat_uint_t flag; // Reserved
- compat_int_t concurrent_profile;
- compat_int_t sensor_size[2];
- compat_int_t video_record_size[2];
- compat_int_t display_size[2];
- compat_int_t tv_out_size[2];
- compat_int_t fps;
- compat_int_t video_encode_codec;
- compat_int_t video_decode_codec;
- compat_int_t hw_ovl_limit;
-}MTK_SMI_COMPAT_BWC_MM_INFO;
-
-#define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, MTK_SMI_COMPAT_BWC_CONFIG)
-#define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, MTK_SMI_COMPAT_BWC_INFO_SET)
-#define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, MTK_SMI_COMPAT_BWC_MM_INFO)
-
-static int compat_get_smi_bwc_config_struct(
- MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
- MTK_SMI_BWC_CONFIG __user *data){
-
- compat_int_t i;
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(i, &(data32->scenario));
- err |= put_user(i, &(data->scenario));
- err |= get_user(i, &(data32->b_on_off));
- err |= put_user(i, &(data->b_on_off));
-
- return err;
-}
-
-static int compat_get_smi_bwc_mm_info_set_struct(
- MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
- MTK_SMI_BWC_INFO_SET __user *data){
-
- compat_int_t i;
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(i, &(data32->property));
- err |= put_user(i, &(data->property));
- err |= get_user(i, &(data32->value1));
- err |= put_user(i, &(data->value1));
- err |= get_user(i, &(data32->value2));
- err |= put_user(i, &(data->value2));
-
- return err;
-}
-
-static int compat_get_smi_bwc_mm_info_struct(
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
- MTK_SMI_BWC_MM_INFO __user *data)
-{
- compat_uint_t u;
- compat_int_t i;
- compat_int_t p[2];
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(u, &(data32->flag));
- err |= put_user(u, &(data->flag));
- err |= get_user(i, &(data32->concurrent_profile));
- err |= put_user(i, &(data->concurrent_profile));
- err |= copy_from_user(p, &(data32->sensor_size),sizeof(p));
- err |= copy_to_user(&(data->sensor_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->video_record_size),sizeof(p));
- err |= copy_to_user(&(data->video_record_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->display_size),sizeof(p));
- err |= copy_to_user(&(data->display_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->tv_out_size),sizeof(p));
- err |= copy_to_user(&(data->tv_out_size),p ,sizeof(p));
- err |= get_user(i, &(data32->fps));
- err |= put_user(i, &(data->fps));
- err |= get_user(i, &(data32->video_encode_codec));
- err |= put_user(i, &(data->video_encode_codec));
- err |= get_user(i, &(data32->video_decode_codec));
- err |= put_user(i, &(data->video_decode_codec));
- err |= get_user(i, &(data32->hw_ovl_limit));
- err |= put_user(i, &(data->hw_ovl_limit));
-
-
- return err;
-}
-
-static int compat_put_smi_bwc_mm_info_struct(
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
- MTK_SMI_BWC_MM_INFO __user *data)
-{
-
- compat_uint_t u;
- compat_int_t i;
- compat_int_t p[2];
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(u, &(data->flag));
- err |= put_user(u, &(data32->flag));
- err |= get_user(i, &(data->concurrent_profile));
- err |= put_user(i, &(data32->concurrent_profile));
- err |= copy_from_user(p, &(data->sensor_size),sizeof(p));
- err |= copy_to_user(&(data32->sensor_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->video_record_size),sizeof(p));
- err |= copy_to_user(&(data32->video_record_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->display_size),sizeof(p));
- err |= copy_to_user(&(data32->display_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->tv_out_size),sizeof(p));
- err |= copy_to_user(&(data32->tv_out_size),p ,sizeof(p));
- err |= get_user(i, &(data->fps));
- err |= put_user(i, &(data32->fps));
- err |= get_user(i, &(data->video_encode_codec));
- err |= put_user(i, &(data32->video_encode_codec));
- err |= get_user(i, &(data->video_decode_codec));
- err |= put_user(i, &(data32->video_decode_codec));
- err |= get_user(i, &(data->hw_ovl_limit));
- err |= put_user(i, &(data32->hw_ovl_limit));
- return err;
-}
-
-long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op || !filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd){
- case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
- {
- if(COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG)
- {
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_CONFIG");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
-
- MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
- MTK_SMI_BWC_CONFIG __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
-
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_config_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
- (unsigned long)data);
- return ret;
- }
- }
-
- case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
- {
-
- if(COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET)
- {
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_SET");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
-
- MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
- MTK_SMI_BWC_INFO_SET __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
- (unsigned long)data);
- }
- }
-
- case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
- {
-
- if(COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET){
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_GET");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
- MTK_SMI_BWC_MM_INFO __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
-
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_mm_info_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
- (unsigned long)data);
-
- err = compat_put_smi_bwc_mm_info_struct(data32, data);
-
- if (err)
- return err;
-
- return ret;
- }
- }
-
- case MTK_IOC_SMI_DUMP_LARB:
- case MTK_IOC_SMI_DUMP_COMMON:
- case MTK_IOC_MMDVFS_CMD:
-
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-
-}
-
-#endif
-
-module_init( smi_init);
-module_exit( smi_exit);
-
-module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
-module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
-module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
-
-MODULE_DESCRIPTION("MTK SMI driver");
-MODULE_AUTHOR("Frederic Chen<frederic.chen@mediatek.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_common_d3.c b/drivers/misc/mediatek/smi/mt6735/smi_common_d3.c
deleted file mode 100644
index 66534dffc..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_common_d3.c
+++ /dev/null
@@ -1,2077 +0,0 @@
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/kobject.h>
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/aee.h>
-#include <linux/xlog.h>
-
-// We can't remove mt_clkmgr.h now since SMI needs larb monitor APIs
-#include <mach/mt_clkmgr.h>
-
-// Define SMI_INTERNAL_CCF_SUPPORT when CCF needs to be enabled
-#if !defined(CONFIG_MTK_LEGACY)
- #define SMI_INTERNAL_CCF_SUPPORT
-#endif
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
-#include <linux/clk.h>
-#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
-
-#include <asm/io.h>
-
-#include <linux/ioctl.h>
-#include <linux/fs.h>
-
-#if IS_ENABLED(CONFIG_COMPAT)
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#endif
-
-#include <mach/mt_smi.h>
-
-#include "smi_reg_d3.h"
-#include "smi_common.h"
-#include "smi_debug.h"
-
-#include "mmdvfs_mgr.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) "[SMI]" fmt
-
-#define SMI_LOG_TAG "SMI"
-
-#define LARB_BACKUP_REG_SIZE 128
-#define SMI_COMMON_BACKUP_REG_NUM 7
-
-#define SF_HWC_PIXEL_MAX_NORMAL (1920 * 1080 * 7)
-#define SF_HWC_PIXEL_MAX_VR (1920 * 1080 * 4 + 1036800) // 4.5 FHD size
-#define SF_HWC_PIXEL_MAX_VP (1920 * 1080 * 7)
-#define SF_HWC_PIXEL_MAX_ALWAYS_GPU (1920 * 1080 * 1)
-
-#define SMIDBG(level, x...) \
- do{ \
- if (smi_debug_level >= (level)) \
- SMIMSG(x); \
- } while (0)
-
-typedef struct {
- spinlock_t SMI_lock;
- unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT]; //one bit represent one module
-} SMI_struct;
-
-static SMI_struct g_SMIInfo;
-
-/* LARB BASE ADDRESS */
-static unsigned long gLarbBaseAddr[SMI_LARB_NR] = { 0, 0, 0, 0};
-
-// DT porting
-unsigned long smi_reg_base_common_ext = 0;
-unsigned long smi_reg_base_barb0 = 0;
-unsigned long smi_reg_base_barb1 = 0;
-unsigned long smi_reg_base_barb2 = 0;
-unsigned long smi_reg_base_barb3 = 0;
-
-#define SMI_REG_REGION_MAX 5
-#define SMI_COMMON_REG_INDX 0
-#define SMI_LARB0_REG_INDX 1
-#define SMI_LARB1_REG_INDX 2
-#define SMI_LARB2_REG_INDX 3
-#define SMI_LARB3_REG_INDX 4
-
-static unsigned long gSMIBaseAddrs[SMI_REG_REGION_MAX];
-void register_base_dump( void );
-
-char* smi_get_region_name( unsigned int region_indx );
-
-struct smi_device{
- struct device *dev;
- void __iomem *regs[SMI_REG_REGION_MAX];
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
- struct clk *smi_common_clk;
- struct clk *smi_larb0_clk;
- struct clk *img_larb2_clk;
- struct clk *vdec0_vdec_clk;
- struct clk *vdec1_larb_clk;
- struct clk *venc_larb_clk;
-#endif
-};
-static struct smi_device *smi_dev = NULL;
-
-static struct device* smiDeviceUevent = NULL;
-
-static struct cdev * pSmiDev = NULL;
-
-static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
- SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM};
-
-static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
-static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
-static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
-static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
-
-/* SMI COMMON register list to be backuped */
-static unsigned short
-g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = { 0x100, 0x104,
- 0x108, 0x10c, 0x110, 0x230, 0x234 };
-static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
-
-static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 1, 1 };
-
-static unsigned short int * larb_port_backup[SMI_LARB_NR] = {
- larb0_port_backup, larb1_port_backup, larb2_port_backup, larb3_port_backup };
-
-// To keep the HW's init value
-static int is_default_value_saved = 0;
-static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
-
-static unsigned int wifi_disp_transaction = 0;
-
-/* debug level */
-static unsigned int smi_debug_level = 0;
-
-/* tuning mode, 1 for register ioctl */
-static unsigned int smi_tuning_mode = 0;
-
-static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
-
-static unsigned int* pLarbRegBackUp[SMI_LARB_NR];
-static int g_bInited = 0;
-
-static MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = { 0, 0, { 0, 0 }, { 0, 0 }, { 0,
- 0 }, { 0, 0 }, 0, 0, 0, SF_HWC_PIXEL_MAX_NORMAL };
-
-char *smi_port_name[][21] = { { /* 0 MMSYS */
- "disp_ovl0", "disp_rdma0", "disp_rdma1", "disp_wdma0", "disp_ovl1",
- "disp_rdma2", "disp_wdma1", "disp_od_r", "disp_od_w", "mdp_rdma0",
- "mdp_rdma1", "mdp_wdma", "mdp_wrot0", "mdp_wrot1" }, { /* 1 VDEC */
- "hw_vdec_mc_ext", "hw_vdec_pp_ext", "hw_vdec_ufo_ext", "hw_vdec_vld_ext",
- "hw_vdec_vld2_ext", "hw_vdec_avc_mv_ext", "hw_vdec_pred_rd_ext",
- "hw_vdec_pred_wr_ext", "hw_vdec_ppwrap_ext" }, { /* 2 ISP */
- "imgo", "rrzo", "aao", "lcso", "esfko", "imgo_d", "lsci", "lsci_d", "bpci",
- "bpci_d", "ufdi", "imgi", "img2o", "img3o", "vipi", "vip2i", "vip3i",
- "lcei", "rb", "rp", "wr" }, { /* 3 VENC */
- "venc_rcpu", "venc_rec", "venc_bsdma", "venc_sv_comv", "venc_rd_comv",
- "jpgenc_bsdma", "remdc_sdma", "remdc_bsdma", "jpgenc_rdma", "jpgenc_sdma",
- "jpgdec_wdma", "jpgdec_bsdma", "venc_cur_luma", "venc_cur_chroma",
- "venc_ref_luma", "venc_ref_chroma", "remdc_wdma", "venc_nbm_rdma",
- "venc_nbm_wdma" }, { /* 4 MJC */
- "mjc_mv_rd", "mjc_mv_wr", "mjc_dma_rd", "mjc_dma_wr" } };
-
-static unsigned long smi_reg_pa_base[SMI_REG_REGION_MAX] = { 0x14017000,
- 0x14016000, 0x16010000, 0x15001000, 0x17001000 };
-
-static void initSetting( void );
-static void vpSetting( void );
-static void vrSetting( void );
-static void icfpSetting( void );
-static void vpWfdSetting( void );
-
-static void smi_dumpLarb( unsigned int index );
-static void smi_dumpCommon( void );
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
-static struct clk *get_smi_clk(char *smi_clk_name);
-#endif
-
-extern void smi_dumpDebugMsg( void );
-#if IS_ENABLED(CONFIG_COMPAT)
- long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
- #define MTK_SMI_COMPAT_ioctl NULL
-#endif
-
-// for slow motion force 30 fps
-extern int primary_display_force_set_vsync_fps( unsigned int fps );
-extern unsigned int primary_display_get_fps( void );
-
-// Use this function to get base address of Larb resgister
-// to support error checking
-unsigned long get_larb_base_addr( int larb_id ){
- unsigned long ret;
- if( larb_id > SMI_LARB_NR || larb_id < 0 ){
- ret = SMI_ERROR_ADDR;
- }else{
- ret = gLarbBaseAddr[larb_id];
- }
- return ret;
-}
-
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
-static struct clk *get_smi_clk(char *smi_clk_name){
- struct clk *smi_clk_ptr = NULL;
- smi_clk_ptr = devm_clk_get(smi_dev->dev, smi_clk_name);
- if (IS_ERR(smi_clk_ptr)) {
- SMIMSG("cannot get %s\n", smi_clk_name);
- smi_clk_ptr = NULL;
- }
- return smi_clk_ptr;
-}
-
-static void smi_enable_clk(struct clk *smi_clk, char *name){
- if (smi_clk != NULL){
- int ret = 0;
- ret = clk_prepare_enable(smi_clk);
- if (ret){
- SMIMSG("clk_prepare_enable return error %d, %s\n", ret, name);
- }
- }else{
- SMIMSG("clk_prepare_enable error, smi_clk can't be NULL, %s\n", name);
- }
-}
-
-static void smi_disable_clk(struct clk *smi_clk, char *name){
- if (smi_clk != NULL){
- clk_disable_unprepare(smi_clk);
- }else{
- SMIMSG("smi_disable_clk error, smi_clk can't be NULL, %s\n", name);
- }
-}
-#endif
-
-static int larb_clock_on( int larb_id ){
-
-#if !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- char name[30];
- sprintf(name, "smi+%d", larb_id);
-
- switch( larb_id ){
-#if !defined(SMI_INTERNAL_CCF_SUPPORT)
- case 0:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_DISP0_SMI_LARB0, name);
- break;
- case 1:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_VDEC1_LARB, name);
- break;
- case 2:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_IMAGE_LARB2_SMI, name);
- break;
- case 3:
- enable_clock(MT_CG_DISP0_SMI_COMMON, name);
- enable_clock(MT_CG_VENC_VENC, name);
- break;
-#else
- case 0:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->smi_larb0_clk, name);
- break;
- case 1:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->vdec1_larb_clk, name);
- break;
- case 2:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->img_larb2_clk, name);
- break;
- case 3:
- smi_enable_clk(smi_dev->smi_common_clk, name);
- smi_enable_clk(smi_dev->venc_larb_clk, name);
- break;
-#endif
- default:
- break;
- }
-#endif /* CONFIG_MTK_FPGA */
-
- return 0;
-}
-
-static int larb_clock_off( int larb_id ){
-
-#ifndef CONFIG_MTK_FPGA
- char name[30];
- sprintf(name, "smi+%d", larb_id);
-
- switch( larb_id ){
-#if !defined(SMI_INTERNAL_CCF_SUPPORT)
- case 0:
- disable_clock(MT_CG_DISP0_SMI_LARB0, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 1:
- disable_clock(MT_CG_VDEC1_LARB, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 2:
- disable_clock(MT_CG_IMAGE_LARB2_SMI, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
- case 3:
- disable_clock(MT_CG_VENC_VENC, name);
- disable_clock(MT_CG_DISP0_SMI_COMMON, name);
- break;
-#else
- case 0:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->smi_larb0_clk, name);
- break;
- case 1:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->vdec1_larb_clk, name);
- break;
- case 2:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->img_larb2_clk, name);
- break;
- case 3:
- smi_disable_clk(smi_dev->smi_common_clk, name);
- smi_disable_clk(smi_dev->venc_larb_clk, name);
- break;
-#endif
- default:
- break;
- }
-#endif /* CONFIG_MTK_FPGA */
-
- return 0;
-}
-
-static void backup_smi_common( void ){
- int i;
-
- for( i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++ ){
- g_smi_common_backup[i] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- (unsigned long) g_smi_common_backup_reg_offset[i]);
- }
-}
-
-static void restore_smi_common( void ){
- int i;
-
- for( i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++ ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE,
- (unsigned long) g_smi_common_backup_reg_offset[i],
- g_smi_common_backup[i]);
- }
-}
-
-static void backup_larb_smi( int index ){
- int port_index = 0;
- unsigned short int *backup_ptr = NULL;
- unsigned long larb_base = gLarbBaseAddr[index];
- unsigned long larb_offset = 0x200;
- int total_port_num = 0;
-
- // boundary check for larb_port_num and larb_port_backup access
- if( index < 0 || index >= SMI_LARB_NR ){
- return;
- }
-
- total_port_num = larb_port_num[index];
- backup_ptr = larb_port_backup[index];
-
- // boundary check for port value access
- if( total_port_num <= 0 || backup_ptr == NULL ){
- return;
- }
-
- for( port_index = 0; port_index < total_port_num; port_index++ ){
- *backup_ptr = (unsigned short int) (M4U_ReadReg32(larb_base,
- larb_offset));
- backup_ptr++;
- larb_offset += 4;
- }
-
- /* backup smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
- if( index == 0 ){
- backup_smi_common();
- }
-
- return;
-}
-
-static void restore_larb_smi( int index ){
- int port_index = 0;
- unsigned short int *backup_ptr = NULL;
- unsigned long larb_base = gLarbBaseAddr[index];
- unsigned long larb_offset = 0x200;
- unsigned int backup_value = 0;
- int total_port_num = 0;
-
- // boundary check for larb_port_num and larb_port_backup access
- if( index < 0 || index >= SMI_LARB_NR ){
- return;
- }
- total_port_num = larb_port_num[index];
- backup_ptr = larb_port_backup[index];
-
- // boundary check for port value access
- if( total_port_num <= 0 || backup_ptr == NULL ){
- return;
- }
-
- /* restore smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
- if( index == 0 ){
- restore_smi_common();
- }
-
- for( port_index = 0; port_index < total_port_num; port_index++ ){
- backup_value = *backup_ptr;
- M4U_WriteReg32(larb_base, larb_offset, backup_value);
- backup_ptr++;
- larb_offset += 4;
- }
-
- /* we do not backup 0x20 because it is a fixed setting */
- M4U_WriteReg32(larb_base, 0x20, larb_vc_setting[index]);
-
- /* turn off EMI empty OSTD dobule, fixed setting */
- M4U_WriteReg32(larb_base, 0x2c, 4);
-
- return;
-}
-
-static int larb_reg_backup( int larb ){
- unsigned int* pReg = pLarbRegBackUp[larb];
- unsigned long larb_base = gLarbBaseAddr[larb];
-
- *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
-
- backup_larb_smi(larb);
-
- if( 0 == larb ){
- g_bInited = 0;
- }
-
- return 0;
-}
-
-static int smi_larb_init( unsigned int larb ){
- unsigned int regval = 0;
- unsigned int regval1 = 0;
- unsigned int regval2 = 0;
- unsigned long larb_base = get_larb_base_addr(larb);
-
- // Clock manager enable LARB clock before call back restore already, it will be disabled after restore call back returns
- // Got to enable OSTD before engine starts
- regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
-
- // TODO: FIX ME
- // regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0);
- // regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1);
-
- if( 0 == regval ){
- SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
- M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
- }else{
- SMIMSG(
- "Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n",
- larb_base, regval, regval1, regval2);
- smi_dumpDebugMsg();
- if( smi_debug_level >= 1 ){
- SMIERR(
- "DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
- larb_base, regval, regval1, regval2);
- }else{
- dump_stack();
- }
- }
-
- restore_larb_smi(larb);
-
- return 0;
-}
-
-int larb_reg_restore( int larb ){
- unsigned long larb_base = SMI_ERROR_ADDR;
- unsigned int regval = 0;
- unsigned int* pReg = NULL;
-
- larb_base = get_larb_base_addr(larb);
-
- // The larb assign doesn't exist
- if( larb_base == SMI_ERROR_ADDR ){
- SMIMSG("Can't find the base address for Larb%d\n", larb);
- return 0;
- }
-
- pReg = pLarbRegBackUp[larb];
-
- SMIDBG(1, "+larb_reg_restore(), larb_idx=%d \n", larb);
- SMIDBG(1, "m4u part restore, larb_idx=%d \n", larb);
- //warning: larb_con is controlled by set/clr
- regval = *(pReg++);
- M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
- M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
-
- smi_larb_init(larb);
-
- return 0;
-}
-
-// callback after larb clock is enabled
-void on_larb_power_on( struct larb_monitor *h, int larb_idx ){
- larb_reg_restore(larb_idx);
-
- return;
-}
-// callback before larb clock is disabled
-void on_larb_power_off( struct larb_monitor *h, int larb_idx ){
- larb_reg_backup(larb_idx);
-}
-
-static void restSetting( void ){
- //initialize OSTD to 1
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x1); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x1); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x1); //disp_ovl1
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); //disp_rdma1
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //disp_od_r
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); //disp_od_w
- M4U_WriteReg32(LARB0_BASE, 0x21c, 0x1); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x220, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x224, 0x1); //mdp_wrot
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0x1); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0x1); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); //hw_vdec_pred_rd_ext
- M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); //hw_vdec_pred_wr_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
- M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); //hw_vdec_ppwrap_ext
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0x1); //imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x1); //rrzo
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //aao
- M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); //lcso
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //esfko
- M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); //imgo_s
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //lsci
- M4U_WriteReg32(LARB2_BASE, 0x21c, 0x1); //lsci_d
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //bpci
- M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); //bpci_d
- M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); //ufdi
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x1); //imgi
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //img2o
- M4U_WriteReg32(LARB2_BASE, 0x234, 0x1); //img3o
- M4U_WriteReg32(LARB2_BASE, 0x238, 0x1); //vipi
- M4U_WriteReg32(LARB2_BASE, 0x23c, 0x1); //vip2i
- M4U_WriteReg32(LARB2_BASE, 0x240, 0x1); //vip3i
- M4U_WriteReg32(LARB2_BASE, 0x244, 0x1); //lcei
- M4U_WriteReg32(LARB2_BASE, 0x248, 0x1); //rb
- M4U_WriteReg32(LARB2_BASE, 0x24c, 0x1); //rp
- M4U_WriteReg32(LARB2_BASE, 0x250, 0x1); //wr
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x214, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB3_BASE, 0x218, 0x1); //jpgenc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x21c, 0x1); //jpgdec_wdma
- M4U_WriteReg32(LARB3_BASE, 0x220, 0x1); //jpgdec_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x1); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x1); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x1); //venc_ref_chroma
-}
-//Make sure clock is on
-static void initSetting( void ){
-
- /* save default larb regs */
- if( !is_default_value_saved ){
- SMIMSG("Save default config:\n");
- default_val_smi_l1arb[0] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB0);
- default_val_smi_l1arb[1] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB1);
- default_val_smi_l1arb[2] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB2);
- default_val_smi_l1arb[3] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
- REG_OFFSET_SMI_L1ARB3);
- SMIMSG("l1arb[0-2]= 0x%x, 0x%x, 0x%x\n", default_val_smi_l1arb[0],
- default_val_smi_l1arb[1], default_val_smi_l1arb[2]);
- SMIMSG("l1arb[3]= 0x%x\n", default_val_smi_l1arb[3]);
-
- is_default_value_saved = 1;
- }
-
- // Keep the HW's init setting in REG_SMI_L1ARB0 ~ REG_SMI_L1ARB4
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0,
- default_val_smi_l1arb[0]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1,
- default_val_smi_l1arb[1]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2,
- default_val_smi_l1arb[2]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3,
- default_val_smi_l1arb[3]);
-
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x100, 0xb);
- M4U_WriteReg32(
- SMI_COMMON_EXT_BASE,
- 0x234,
- (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
- + (0x4 << 10) + (0x4 << 5) + 0x5);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x230, 0xf + (0x8 << 4) + (0x7 << 9));
-
- // Set VC priority: MMSYS = ISP > VENC > VDEC = MJC
- M4U_WriteReg32(LARB0_BASE, 0x20, 0x0); // MMSYS
- M4U_WriteReg32(LARB1_BASE, 0x20, 0x2); // VDEC
- M4U_WriteReg32(LARB2_BASE, 0x20, 0x1); // ISP
- M4U_WriteReg32(LARB3_BASE, 0x20, 0x1); // VENC
-
- // for ISP HRT
- M4U_WriteReg32(LARB2_BASE, 0x24,
- (M4U_ReadReg32(LARB2_BASE, 0x24) & 0xf7ffffff));
-
- // for UI
- restSetting();
-
- //SMI common BW limiter
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, default_val_smi_l1arb[0]);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x1000);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10C, 0x1000);
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x110, 0x1000);
-
- //LARB 0 DISP+MDP
- M4U_WriteReg32(LARB0_BASE, 0x200, 31); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x204, 8); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x208, 6); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 31); //disp_ovl1
- M4U_WriteReg32(LARB0_BASE, 0x210, 4); //disp_rdma1
- M4U_WriteReg32(LARB0_BASE, 0x21c, 2); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x224, 3); //mdp_wrot
-
-}
-
-static void icfpSetting( void ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x14E2); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x1000); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x1310); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x106F); //LARB3, VENC+JPG
-
- restSetting();
-
- //LARB 0 DISP+MDP
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x14); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x21c, 0x2); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x220, 0x2); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x224, 0x3); //mdp_wrot
-
- M4U_WriteReg32(LARB2_BASE, 0x200, 0xc); //imgo
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x4); //aao
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //esfko
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //lsci
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); //bpci
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); //imgi
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x3); //img2o
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //img2o
-
- M4U_WriteReg32(LARB3_BASE, 0x214, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB3_BASE, 0x218, 0x1); //jpgenc_bsdma
-
-}
-
-
-
-static void vrSetting( void ){
- //SMI BW limit
- // vss
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x1417); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x1000); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x11D0); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x11F8); //LARB3, VENC+JPG
-
- restSetting();
-
- //LARB 0 DISP+MDP
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x10); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x21c, 0x4); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x220, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x224, 0x6); //mdp_wrot
-
- M4U_WriteReg32(LARB2_BASE, 0x204, 0x2); //rrzo
- M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); //aao
- M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); //esfko
- M4U_WriteReg32(LARB2_BASE, 0x218, 0x2); //lsci
- M4U_WriteReg32(LARB2_BASE, 0x220, 0x2); //bpci
- M4U_WriteReg32(LARB2_BASE, 0x22c, 0x8); //imgi
- M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); //img2o
- M4U_WriteReg32(LARB2_BASE, 0x238, 0x2); //vipi
- M4U_WriteReg32(LARB2_BASE, 0x23c, 0x2); //vip2i
- M4U_WriteReg32(LARB2_BASE, 0x240, 0x2); //vip3i
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x214, 0x1); //jpgenc_rdma
- M4U_WriteReg32(LARB3_BASE, 0x218, 0x1); //jpgenc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x2); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x3); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x2); //venc_ref_chroma
-}
-
-static void vpSetting( void ){
-
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x1262); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x11E9); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10C, 0x1000); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x110, 0x123D); //LARB3, VENC+JPG
-
- restSetting();
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x8); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x2); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x3); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x214, 0x1); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x218, 0x4); //mdp_wrot
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0xb); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0xe); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x1); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x3); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x2); //venc_ref_chroma
-}
-
-static void vpWfdSetting( void ){
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x104, 0x14B6); //LARB0, DISP+MDP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x108, 0x11EE); //LARB1, VDEC
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x10C, 0x1000); //LARB2, ISP
- M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x110, 0x11F2); //LARB3, VENC+JPG
-
- restSetting();
-
- M4U_WriteReg32(LARB0_BASE, 0x200, 0x12); //disp_ovl0
- M4U_WriteReg32(LARB0_BASE, 0x204, 0x8); //disp_rdma0
- M4U_WriteReg32(LARB0_BASE, 0x208, 0x6); //disp_wdma0
- M4U_WriteReg32(LARB0_BASE, 0x20c, 0x12); //disp_ovl1
- M4U_WriteReg32(LARB0_BASE, 0x210, 0x4); //disp_rdma1
- M4U_WriteReg32(LARB0_BASE, 0x21c, 0x3); //mdp_rdma
- M4U_WriteReg32(LARB0_BASE, 0x220, 0x2); //mdp_wdma
- M4U_WriteReg32(LARB0_BASE, 0x224, 0x5); //mdp_wrot
-
-
- M4U_WriteReg32(LARB1_BASE, 0x200, 0xb); //hw_vdec_mc_ext
- M4U_WriteReg32(LARB1_BASE, 0x204, 0xe); //hw_vdec_pp_ext
- M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); //hw_vdec_avc_mv_ext
- M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); //hw_vdec_vld_ext
-
-
- M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); //venc_rcpu
- M4U_WriteReg32(LARB3_BASE, 0x204, 0x2); //venc_rec
- M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); //venc_bsdma
- M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); //venc_sv_comv
- M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); //venc_rd_comv
- M4U_WriteReg32(LARB3_BASE, 0x224, 0x2); //venc_cur_luma
- M4U_WriteReg32(LARB3_BASE, 0x228, 0x1); //venc_cur_chroma
- M4U_WriteReg32(LARB3_BASE, 0x22c, 0x3); //venc_ref_luma
- M4U_WriteReg32(LARB3_BASE, 0x230, 0x2); //venc_ref_chroma
-}
-
-// Fake mode check, e.g. WFD
-static int fake_mode_handling(
- MTK_SMI_BWC_CONFIG* p_conf,
- unsigned int *pu4LocalCnt ){
- if( p_conf->scenario == SMI_BWC_SCEN_WFD ){
- if( p_conf->b_on_off ){
- wifi_disp_transaction = 1;
- SMIMSG("Enable WFD in profile: %d\n", smi_profile);
- }else{
- wifi_disp_transaction = 0;
- SMIMSG("Disable WFD in profile: %d\n", smi_profile);
- }
- return 1;
- }else{
- return 0;
- }
-}
-
-static int ovl_limit_uevent( int bwc_scenario, int ovl_pixel_limit ){
- int err = 0;
- char *envp[3];
- char scenario_buf[32] = "";
- char ovl_limit_buf[32] = "";
-
- snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
- snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
-
- envp[0] = scenario_buf;
- envp[1] = ovl_limit_buf;
- envp[2] = NULL;
-
- if( pSmiDev != NULL ){
- err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
- SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit,
- bwc_scenario);
- }
-
- if(err < 0)
- SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
-
- return err;
-}
-
-static int smi_bwc_config(
- MTK_SMI_BWC_CONFIG* p_conf,
- unsigned int *pu4LocalCnt ){
- int i;
- int result = 0;
- unsigned int u4Concurrency = 0;
- MTK_SMI_BWC_SCEN eFinalScen;
- static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
-
- if( smi_tuning_mode == 1 ){
- SMIMSG("Doesn't change profile in tunning mode");
- return 0;
- }
-
- if( (SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario) ){
- SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n",
- p_conf->scenario);
- return -1;
- }
-
- if (p_conf->b_on_off) {
- /* set mmdvfs step according to certain scenarios */
- mmdvfs_notify_scenario_enter(p_conf->scenario);
- } else {
- /* set mmdvfs step to default after the scenario exits */
- mmdvfs_notify_scenario_exit(p_conf->scenario);
- }
-
- spin_lock(&g_SMIInfo.SMI_lock);
- result = fake_mode_handling(p_conf, pu4LocalCnt);
- spin_unlock(&g_SMIInfo.SMI_lock);
-
- // Fake mode is not a real SMI profile, so we need to return here
- if( result == 1 ){
- return 0;
- }
-
- spin_lock(&g_SMIInfo.SMI_lock);
-
- if( p_conf->b_on_off ){
- //turn on certain scenario
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
-
- if( NULL != pu4LocalCnt ){
- pu4LocalCnt[p_conf->scenario] += 1;
- }
- }else{
- //turn off certain scenario
- if( 0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] ){
- SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
- p_conf->scenario,
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
- }else{
- g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
- }
-
- if( NULL != pu4LocalCnt ){
- if( 0 == pu4LocalCnt[p_conf->scenario] ){
- SMIMSG(
- "Process : %s did too many turning off for local SMI profile:%d,%d\n",
- current->comm, p_conf->scenario,
- pu4LocalCnt[p_conf->scenario]);
- }else{
- pu4LocalCnt[p_conf->scenario] -= 1;
- }
- }
- }
-
- for( i = 0; i < SMI_BWC_SCEN_CNT; i++ ){
- if( g_SMIInfo.pu4ConcurrencyTable[i] ){
- u4Concurrency |= (1 << i);
- }
- }
-
- /* notify mmdvfs concurrency */
- mmdvfs_notify_scenario_concurrency(u4Concurrency);
-
- if( (1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_MM_GPU;
- }else if( (1 << SMI_BWC_SCEN_ICFP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_ICFP;
- } else if ((1 << SMI_BWC_SCEN_VSS) & u4Concurrency) {
- eFinalScen = SMI_BWC_SCEN_VR;
- }else if( (1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VR_SLOW;
- }else if( (1 << SMI_BWC_SCEN_VR) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VR;
- }else if( (1 << SMI_BWC_SCEN_VP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VP;
- }else if( (1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
- }else if( (1 << SMI_BWC_SCEN_VENC) & u4Concurrency ){
- eFinalScen = SMI_BWC_SCEN_VENC;
- }else{
- eFinalScen = SMI_BWC_SCEN_NORMAL;
- }
-
- if( ePreviousFinalScen == eFinalScen ){
- SMIMSG("Scen equal%d,don't change\n", eFinalScen);
- spin_unlock(&g_SMIInfo.SMI_lock);
- return 0;
- }else{
- ePreviousFinalScen = eFinalScen;
- }
-
- /* turn on larb clock */
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_on(i);
- }
-
- smi_profile = eFinalScen;
-
- /* Bandwidth Limiter */
- switch( eFinalScen ){
- case SMI_BWC_SCEN_VP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
- //fixed wrong judgement
- if( !wifi_disp_transaction ){
- vpSetting();
- }else{
- vpWfdSetting();
- }
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
- break;
-
- case SMI_BWC_SCEN_SWDEC_VP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
- vpSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
- break;
-
- case SMI_BWC_SCEN_ICFP:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_ICFP");
- icfpSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
- break;
- case SMI_BWC_SCEN_VR:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
- break;
-
- case SMI_BWC_SCEN_VR_SLOW:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
- smi_profile = SMI_BWC_SCEN_VR_SLOW;
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
-
- case SMI_BWC_SCEN_VENC:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
- vrSetting();
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
-
- case SMI_BWC_SCEN_NORMAL:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- initSetting();
- break;
-
- case SMI_BWC_SCEN_MM_GPU:
- SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
- g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- initSetting();
- break;
-
- default:
- SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
- initSetting();
- g_smi_bwc_mm_info .hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
- break;
- }
-
- /*turn off larb clock*/
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_off(i);
- }
-
- spin_unlock(&g_SMIInfo.SMI_lock);
-
- ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
-
- /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex, call these APIs only when necessary */
- {
- static unsigned int current_fps = 0;
-
- if( (eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30) ){ /* force 30 fps in VR slow motion profile */
- primary_display_force_set_vsync_fps(30);
- current_fps = 30;
- SMIMSG("[SMI_PROFILE] set 30 fps\n");
- }else if( (eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30) ){ /* back to normal fps */
- current_fps = primary_display_get_fps();
- primary_display_force_set_vsync_fps(current_fps);
- SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
- }
- }
-
- SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
- (p_conf->b_on_off ? "on" : "off"), eFinalScen,
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
- g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
-
- return 0;
-}
-
-struct larb_monitor larb_monitor_handler =
-{
- .level = LARB_MONITOR_LEVEL_HIGH,
- .backup = on_larb_power_off,
- .restore = on_larb_power_on
-};
-
-int smi_common_init( void ){
- int i;
-
- SMIMSG("Enter smi_common_init\n")
- for( i = 0; i < SMI_LARB_NR; i++ ){
- pLarbRegBackUp[i] = (unsigned int*) kmalloc(LARB_BACKUP_REG_SIZE,
- GFP_KERNEL | __GFP_ZERO);
- if( pLarbRegBackUp[i] == NULL ){
- SMIERR("pLarbRegBackUp kmalloc fail %d \n", i);
- }
- }
-
- /*
- * make sure all larb power is on before we register callback func.
- * then, when larb power is first off, default register value will be backed up.
- */
-
- for( i = 0; i < SMI_LARB_NR; i++ ){
- SMIMSG("Enalbe CLK of larb%d\n", i );
- larb_clock_on(i);
- }
-
- /* apply init setting after kernel boot */
- SMIMSG("Enter smi_common_init\n")
- initSetting();
-
- register_larb_monitor(&larb_monitor_handler);
-
- for( i = 0; i < SMI_LARB_NR; i++ ){
- larb_clock_off(i);
- }
-
- return 0;
-}
-
-static int smi_open( struct inode *inode, struct file *file ){
- file->private_data = kmalloc(SMI_BWC_SCEN_CNT * sizeof(unsigned int),
- GFP_ATOMIC);
-
- if( NULL == file->private_data ){
- SMIMSG("Not enough entry for DDP open operation\n");
- return -ENOMEM;
- }
-
- memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
-
- return 0;
-}
-
-static int smi_release( struct inode *inode, struct file *file ){
-
-#if 0
- unsigned long u4Index = 0;
- unsigned long u4AssignCnt = 0;
- unsigned long * pu4Cnt = (unsigned long *)file->private_data;
- MTK_SMI_BWC_CONFIG config;
-
- for(; u4Index < SMI_BWC_SCEN_CNT; u4Index += 1)
- {
- if(pu4Cnt[u4Index])
- {
- SMIMSG("Process:%s does not turn off BWC properly , force turn off %d\n" , current->comm , u4Index);
- u4AssignCnt = pu4Cnt[u4Index];
- config.b_on_off = 0;
- config.scenario = (MTK_SMI_BWC_SCEN)u4Index;
- do
- {
- smi_bwc_config( &config , pu4Cnt);
- }
- while(0 < u4AssignCnt);
- }
- }
-#endif
-
- if( NULL != file->private_data ){
- kfree(file->private_data);
- file->private_data = NULL;
- }
-
- return 0;
-}
-/* GMP start */
-
-void smi_bwc_mm_info_set( int property_id, long val1, long val2 ){
-
- switch( property_id ){
- case SMI_BWC_INFO_CON_PROFILE:
- g_smi_bwc_mm_info.concurrent_profile = (int) val1;
- break;
- case SMI_BWC_INFO_SENSOR_SIZE:
- g_smi_bwc_mm_info.sensor_size[0] = val1;
- g_smi_bwc_mm_info.sensor_size[1] = val2;
- break;
- case SMI_BWC_INFO_VIDEO_RECORD_SIZE:
- g_smi_bwc_mm_info.video_record_size[0] = val1;
- g_smi_bwc_mm_info.video_record_size[1] = val2;
- break;
- case SMI_BWC_INFO_DISP_SIZE:
- g_smi_bwc_mm_info.display_size[0] = val1;
- g_smi_bwc_mm_info.display_size[1] = val2;
- break;
- case SMI_BWC_INFO_TV_OUT_SIZE:
- g_smi_bwc_mm_info.tv_out_size[0] = val1;
- g_smi_bwc_mm_info.tv_out_size[1] = val2;
- break;
- case SMI_BWC_INFO_FPS:
- g_smi_bwc_mm_info.fps = (int) val1;
- break;
- case SMI_BWC_INFO_VIDEO_ENCODE_CODEC:
- g_smi_bwc_mm_info.video_encode_codec = (int) val1;
- break;
- case SMI_BWC_INFO_VIDEO_DECODE_CODEC:
- g_smi_bwc_mm_info.video_decode_codec = (int) val1;
- break;
- }
-}
-
-/* GMP end */
-
-static long smi_ioctl(
- struct file * pFile,
- unsigned int cmd,
- unsigned long param ){
- int ret = 0;
-
- // unsigned long * pu4Cnt = (unsigned long *)pFile->private_data;
-
- switch( cmd ){
-
- /* disable reg access ioctl by default for possible security holes */
- // TBD: check valid SMI register range
- case MTK_IOC_SMI_BWC_CONFIG: {
- MTK_SMI_BWC_CONFIG cfg;
- ret = copy_from_user(&cfg, (void*) param,
- sizeof(MTK_SMI_BWC_CONFIG));
- if( ret ){
- SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
- return -EFAULT;
- }
-
- ret = smi_bwc_config(&cfg, NULL);
-
- break;
- }
- /* GMP start */
- case MTK_IOC_SMI_BWC_INFO_SET: {
- MTK_SMI_BWC_INFO_SET cfg;
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... start");
- ret = copy_from_user(&cfg, (void *) param,
- sizeof(MTK_SMI_BWC_INFO_SET));
- if( ret ){
- SMIMSG(" MTK_IOC_SMI_BWC_INFO_SET, copy_to_user failed: %d\n",
- ret);
- return -EFAULT;
- }
- /* Set the address to the value assigned by user space program */
- smi_bwc_mm_info_set(cfg.property, cfg.value1, cfg.value2);
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... finish");
- break;
- }
- case MTK_IOC_SMI_BWC_INFO_GET: {
- ret = copy_to_user((void *) param, (void *) &g_smi_bwc_mm_info,
- sizeof(MTK_SMI_BWC_MM_INFO));
-
- if( ret ){
- SMIMSG(" MTK_IOC_SMI_BWC_INFO_GET, copy_to_user failed: %d\n",
- ret);
- return -EFAULT;
- }
- //SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_GET request... finish");
- break;
- }
- /* GMP end */
-
- case MTK_IOC_SMI_DUMP_LARB: {
- unsigned int larb_index;
-
- ret = copy_from_user(&larb_index, (void*) param,
- sizeof(unsigned int));
- if( ret ){
- return -EFAULT;
- }
-
- smi_dumpLarb(larb_index);
- }
- break;
-
- case MTK_IOC_SMI_DUMP_COMMON: {
- unsigned int arg;
-
- ret = copy_from_user(&arg, (void*) param, sizeof(unsigned int));
- if( ret ){
- return -EFAULT;
- }
-
- smi_dumpCommon();
- }
- break;
-
- default:
- return -1;
- }
-
- return ret;
-}
-
-static const struct file_operations smiFops =
-{
- .owner = THIS_MODULE,
- .open = smi_open,
- .release = smi_release,
- .unlocked_ioctl = smi_ioctl,
- .compat_ioctl = MTK_SMI_COMPAT_ioctl,
-};
-
-static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
-static inline int smi_register( void ){
- if( alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI") ){
- SMIERR("Allocate device No. failed");
- return -EAGAIN;
- }
- //Allocate driver
- pSmiDev = cdev_alloc();
-
- if( NULL == pSmiDev ){
- unregister_chrdev_region(smiDevNo, 1);
- SMIERR("Allocate mem for kobject failed");
- return -ENOMEM;
- }
-
- //Attatch file operation.
- cdev_init(pSmiDev, &smiFops);
- pSmiDev->owner = THIS_MODULE;
-
- //Add to system
- if( cdev_add(pSmiDev, smiDevNo, 1) ){
- SMIERR("Attatch file operation failed");
- unregister_chrdev_region(smiDevNo, 1);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-static struct class *pSmiClass = NULL;
-
-static int smi_probe( struct platform_device *pdev ){
-
- int i;
-
- static unsigned int smi_probe_cnt = 0;
- struct device* smiDevice = NULL;
- SMIMSG("Enter smi_probe\n");
- //Debug only
- if( smi_probe_cnt != 0 ){
- SMIERR("Onlye support 1 SMI driver probed\n");
- return 0;
- }
- smi_probe_cnt++;
- SMIMSG("Allocate smi_dev space\n");
- smi_dev = krealloc(smi_dev, sizeof(struct smi_device), GFP_KERNEL);
-
- if( smi_dev == NULL ){
- SMIERR("Unable to allocate memory for smi driver\n");
- return -ENOMEM;
- }
- if( NULL == pdev ){
- SMIERR("platform data missed\n");
- return -ENXIO;
- }
- // Keep the device structure
- smi_dev->dev = &pdev->dev;
-
- // Map registers
- for( i = 0; i < SMI_REG_REGION_MAX; i++ ){
- SMIMSG("Save region: %d\n", i);
- smi_dev->regs[i] = (void *) of_iomap(pdev->dev.of_node, i);
-
- if( !smi_dev->regs[i] ){
- SMIERR("Unable to ioremap registers, of_iomap fail, i=%d \n", i);
- return -ENOMEM;
- }
-
- // Record the register base in global variable
- gSMIBaseAddrs[i] = (unsigned long) (smi_dev->regs[i]);
- SMIMSG("DT, i=%d, region=%s, map_addr=0x%p, reg_pa=0x%lx\n", i,
- smi_get_region_name(i), smi_dev->regs[i], smi_reg_pa_base[i]);
- }
-
-#if defined(SMI_INTERNAL_CCF_SUPPORT)
- smi_dev->smi_common_clk = get_smi_clk("smi-common");
- smi_dev->smi_larb0_clk = get_smi_clk("smi-larb0");
- smi_dev->img_larb2_clk = get_smi_clk("img-larb2");
- smi_dev->vdec0_vdec_clk = get_smi_clk("vdec0-vdec");
- smi_dev->vdec1_larb_clk = get_smi_clk("vdec1-larb");
- smi_dev->venc_larb_clk = get_smi_clk("venc-larb");
-#endif
-
- SMIMSG("Execute smi_register\n");
- if( smi_register() ){
- dev_err(&pdev->dev, "register char failed\n");
- return -EAGAIN;
- }
-
- pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
- if(IS_ERR(pSmiClass)) {
- int ret = PTR_ERR(pSmiClass);
- SMIERR("Unable to create class, err = %d", ret);
- return ret;
- }
- SMIMSG("Create davice\n");
- smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
- smiDeviceUevent = smiDevice;
-
- SMIMSG("SMI probe done.\n");
-
- // To adapt the legacy codes
- smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
- smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
- smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
- smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
- smi_reg_base_barb3 = gSMIBaseAddrs[SMI_LARB3_REG_INDX];
-
- gLarbBaseAddr[0] = LARB0_BASE;
- gLarbBaseAddr[1] = LARB1_BASE;
- gLarbBaseAddr[2] = LARB2_BASE;
- gLarbBaseAddr[3] = LARB3_BASE;
-
- SMIMSG("Execute smi_common_init\n");
- smi_common_init();
-
- SMIMSG("Execute SMI_DBG_Init\n");
- SMI_DBG_Init();
- return 0;
-
-}
-
-char* smi_get_region_name( unsigned int region_indx ){
- switch( region_indx ){
- case SMI_COMMON_REG_INDX:
- return "smi_common";
- case SMI_LARB0_REG_INDX:
- return "larb0";
- case SMI_LARB1_REG_INDX:
- return "larb1";
- case SMI_LARB2_REG_INDX:
- return "larb2";
- case SMI_LARB3_REG_INDX:
- return "larb3";
- default:
- SMIMSG("invalid region id=%d", region_indx);
- return "unknown";
- }
-}
-
-void register_base_dump( void ){
- int i = 0;
- unsigned long pa_value = 0;
- unsigned long va_value = 0;
-
- for( i = 0; i < SMI_REG_REGION_MAX; i++ ){
- va_value = gSMIBaseAddrs[i];
- pa_value = virt_to_phys((void*) va_value);
- SMIMSG("REG BASE:%s-->VA=0x%lx,PA=0x%lx,SPEC=0x%lx\n",
- smi_get_region_name(i), va_value, pa_value, smi_reg_pa_base[i]);
- }
-}
-
-static int smi_remove( struct platform_device *pdev ){
- cdev_del(pSmiDev);
- unregister_chrdev_region(smiDevNo, 1);
- device_destroy(pSmiClass, smiDevNo);
- class_destroy( pSmiClass);
- return 0;
-}
-
-static int smi_suspend( struct platform_device *pdev, pm_message_t mesg ){
- return 0;
-}
-
-static int smi_resume( struct platform_device *pdev ){
- return 0;
-}
-
-
-static const struct of_device_id smi_of_ids[] ={
- { .compatible = "mediatek,SMI_COMMON",},
- {}
-};
-
-static struct platform_driver smiDrv ={
- .probe = smi_probe,
- .remove = smi_remove,
- .suspend= smi_suspend,
- .resume = smi_resume,
- .driver ={
- .name = "MTK_SMI",
- .owner = THIS_MODULE,
- .of_match_table = smi_of_ids,
-
- }
-};
-
-static int __init smi_init(void)
-{
- SMIMSG("smi_init enter\n");
- spin_lock_init(&g_SMIInfo.SMI_lock);
- /* MMDVFS init */
- mmdvfs_init(&g_smi_bwc_mm_info);
-
- memset(g_SMIInfo.pu4ConcurrencyTable , 0 , SMI_BWC_SCEN_CNT * sizeof(unsigned int));
-
- // Informs the kernel about the function to be called
- // if hardware matching MTK_SMI has been found
- SMIMSG("register platform driver\n");
- if (platform_driver_register(&smiDrv)){
- SMIERR("failed to register MAU driver");
- return -ENODEV;
- }
- SMIMSG("exit smi_init\n");
- return 0;
-}
-
-static void __exit smi_exit(void)
-{
- platform_driver_unregister(&smiDrv);
-
-}
-
-static void smi_dumpCommonDebugMsg( int output_gce_buffer ){
- unsigned long u4Base;
- int smiCommonClkEnabled = 0;
-
- smiCommonClkEnabled = clock_is_on(MT_CG_DISP0_SMI_COMMON);
- //SMI COMMON dump
- if( smi_debug_level == 0 && (!smiCommonClkEnabled) ){
- SMIMSG3(output_gce_buffer, "===SMI common clock is disabled===\n");
- return;
- }
-
- SMIMSG3(output_gce_buffer, "===SMI common reg dump, CLK: %d===\n", smiCommonClkEnabled);
-
- u4Base = SMI_COMMON_EXT_BASE;
- SMIMSG3(output_gce_buffer, "[0x100,0x104,0x108]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x100), M4U_ReadReg32(u4Base, 0x104),
- M4U_ReadReg32(u4Base, 0x108));
- SMIMSG3(output_gce_buffer, "[0x10C,0x110,0x114]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x10C), M4U_ReadReg32(u4Base, 0x110),
- M4U_ReadReg32(u4Base, 0x114));
- SMIMSG3(output_gce_buffer, "[0x220,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x220), M4U_ReadReg32(u4Base, 0x230),
- M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base, 0x238));
- SMIMSG3(output_gce_buffer, "[0x400,0x404,0x408]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x400), M4U_ReadReg32(u4Base, 0x404),
- M4U_ReadReg32(u4Base, 0x408));
- SMIMSG3(output_gce_buffer, "[0x40C,0x430,0x440]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x40C), M4U_ReadReg32(u4Base, 0x430),
- M4U_ReadReg32(u4Base, 0x440));
-
-}
-static int smi_larb_clock_is_on( unsigned int larb_index ){
-
- int result = 0;
-#if !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- switch( larb_index ){
- case 0:
- result = clock_is_on(MT_CG_DISP0_SMI_LARB0);
- break;
- case 1:
- result = clock_is_on(MT_CG_VDEC1_LARB);
- break;
- case 2:
- result = clock_is_on(MT_CG_IMAGE_LARB2_SMI);
- break;
- case 3:
- result = clock_is_on(MT_CG_VENC_VENC);
- break;
- default:
- result = 0;
- break;
- }
-#endif // !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING)
- return result;
-
-}
-static void smi_dumpLarbDebugMsg( unsigned int u4Index ){
- unsigned long u4Base = 0;
-
- int larbClkEnabled = 0;
-
- u4Base = get_larb_base_addr(u4Index);
-
- larbClkEnabled = smi_larb_clock_is_on(u4Index);
-
- if( u4Base == SMI_ERROR_ADDR ){
- SMIMSG("Doesn't support reg dump for Larb%d\n", u4Index);
-
- return;
- }else if( (larbClkEnabled != 0) || smi_debug_level > 0 ){
- SMIMSG("===SMI LARB%d reg dump, CLK: %d===\n", u4Index, larbClkEnabled);
-
- // Staus Registers
- SMIMSG("[0x0,0x8,0x10]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
- M4U_ReadReg32(u4Base, 0x8), M4U_ReadReg32(u4Base, 0x10));
- SMIMSG("[0x24,0x50,0x60]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x24), M4U_ReadReg32(u4Base, 0x50),
- M4U_ReadReg32(u4Base, 0x60));
- SMIMSG("[0xa0,0xa4,0xa8]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xa0), M4U_ReadReg32(u4Base, 0xa4),
- M4U_ReadReg32(u4Base, 0xa8));
- SMIMSG("[0xac,0xb0,0xb4]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xac), M4U_ReadReg32(u4Base, 0xb0),
- M4U_ReadReg32(u4Base, 0xb4));
- SMIMSG("[0xb8,0xbc,0xc0]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0xb8), M4U_ReadReg32(u4Base, 0xbc),
- M4U_ReadReg32(u4Base, 0xc0));
- SMIMSG("[0xc8,0xcc]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xc8),
- M4U_ReadReg32(u4Base, 0xcc));
- // Settings
- SMIMSG("[0x200, 0x204, 0x208]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x200), M4U_ReadReg32(u4Base, 0x204),
- M4U_ReadReg32(u4Base, 0x208));
-
- SMIMSG("[0x20c, 0x210, 0x214]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x20c), M4U_ReadReg32(u4Base, 0x210),
- M4U_ReadReg32(u4Base, 0x214));
-
- SMIMSG("[0x218, 0x21c, 0x220]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x218), M4U_ReadReg32(u4Base, 0x21c),
- M4U_ReadReg32(u4Base, 0x220));
-
- SMIMSG("[0x224, 0x228, 0x22c]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x224), M4U_ReadReg32(u4Base, 0x228),
- M4U_ReadReg32(u4Base, 0x22c));
-
- SMIMSG("[0x230, 0x234, 0x238]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234),
- M4U_ReadReg32(u4Base, 0x238));
-
- SMIMSG("[0x23c, 0x240, 0x244]=[0x%x,0x%x,0x%x]\n",
- M4U_ReadReg32(u4Base, 0x23c), M4U_ReadReg32(u4Base, 0x240),
- M4U_ReadReg32(u4Base, 0x244));
-
- SMIMSG("[0x248, 0x24c]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x248),
- M4U_ReadReg32(u4Base, 0x24c));
- }else{
- SMIMSG("===SMI LARB%d clock is disabled===\n", u4Index);
- }
-
-}
-
-static void smi_dump_format(
- unsigned long base,
- unsigned int from,
- unsigned int to ){
- int i, j, left;
- unsigned int value[8];
-
- for( i = from; i <= to; i += 32 ){
- for( j = 0; j < 8; j++ ){
- value[j] = M4U_ReadReg32(base, i + j * 4);
- }
-
- SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i, value[0], value[1],
- value[2], value[3], value[4], value[5], value[6], value[7]);
- }
-
- left = ((from - to) / 4 + 1) % 8;
-
- if( left ){
- memset(value, 0, 8 * sizeof(unsigned int));
-
- for( j = 0; j < left; j++ ){
- value[j] = M4U_ReadReg32(base, i - 32 + j * 4);
- }
-
- SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i - 32 + j * 4, value[0],
- value[1], value[2], value[3], value[4], value[5], value[6],
- value[7]);
- }
-}
-
-static void smi_dumpLarb( unsigned int index ){
- unsigned long u4Base;
-
- u4Base = get_larb_base_addr(index);
-
- if( u4Base == SMI_ERROR_ADDR ){
- SMIMSG2("Doesn't support reg dump for Larb%d\n", index);
-
- return;
- }else{
- SMIMSG2("===SMI LARB%d reg dump base 0x%lx===\n", index, u4Base);
-
- smi_dump_format(u4Base, 0, 0x434);
- smi_dump_format(u4Base, 0xF00, 0xF0C);
- }
-}
-
-static void smi_dumpCommon( void ){
- SMIMSG2("===SMI COMMON reg dump base 0x%lx===\n", SMI_COMMON_EXT_BASE);
-
- smi_dump_format(SMI_COMMON_EXT_BASE, 0x1A0, 0x444);
-}
-
-void smi_dumpDebugMsg( void ){
- unsigned int u4Index;
-
- // SMI COMMON dump, 0 stands for not pass log to CMDQ error dumping messages
- smi_dumpCommonDebugMsg(0);
-
- // dump all SMI LARB
- for( u4Index = 0; u4Index < SMI_LARB_NR; u4Index++ ){
- smi_dumpLarbDebugMsg(u4Index);
- }
-}
-
-int smi_debug_bus_hanging_detect( unsigned int larbs, int show_dump ){
- return smi_debug_bus_hanging_detect_ext(larbs, show_dump, 0);
-}
-
-//output_gce_buffer = 1, write log into kernel log and CMDQ buffer. output_gce_buffer = 0, write log into kernel log only
-int smi_debug_bus_hanging_detect_ext( unsigned int larbs, int show_dump, int output_gce_buffer){
-
- int i = 0;
- int dump_time = 0;
- int is_smi_issue = 0;
- int status_code = 0;
- // Keep the dump result
- unsigned char smi_common_busy_count = 0;
- volatile unsigned int reg_temp = 0;
- unsigned char smi_larb_busy_count[SMI_LARB_NR] = { 0 };
- unsigned char smi_larb_mmu_status[SMI_LARB_NR] = { 0 };
- int smi_larb_clk_status[SMI_LARB_NR] = { 0 };
- // dump resister and save resgister status
- for( dump_time = 0; dump_time < 5; dump_time++ ){
- unsigned int u4Index = 0;
- reg_temp = M4U_ReadReg32(SMI_COMMON_EXT_BASE, 0x440);
- if( (reg_temp & (1 << 0)) == 0 ){
- // smi common is busy
- smi_common_busy_count++;
- }
- // Dump smi common regs
- if( show_dump != 0 ){
- smi_dumpCommonDebugMsg(output_gce_buffer);
- }
- for( u4Index = 0; u4Index < SMI_LARB_NR; u4Index++ ){
- unsigned long u4Base = get_larb_base_addr(u4Index);
- smi_larb_clk_status[u4Index] = smi_larb_clock_is_on(u4Index);
- //check larb clk is enable
- if( smi_larb_clk_status[u4Index] != 0){
- if( u4Base != SMI_ERROR_ADDR ){
- reg_temp = M4U_ReadReg32(u4Base, 0x0);
- if( reg_temp != 0 ){
- // Larb is busy
- smi_larb_busy_count[u4Index]++;
- }
- smi_larb_mmu_status[u4Index] = M4U_ReadReg32(u4Base, 0xa0);
- if( show_dump != 0 ){
- smi_dumpLarbDebugMsg(u4Index);
- }
- }
- }
- }
-
- }
-
- // Show the checked result
- for( i = 0; i < SMI_LARB_NR; i++ ){ // Check each larb
- if( SMI_DGB_LARB_SELECT(larbs, i) ){
- // larb i has been selected
- // Get status code
-
- //check busy count when larb clk is enable
- if( smi_larb_clk_status[i] != 0){
- if( smi_larb_busy_count[i] == 5 ){ // The larb is always busy
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 1;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 2;
- }else{
- status_code = 5; // smi common is sometimes busy and idle
- }
- }else if( smi_larb_busy_count[i] == 0 ){ // The larb is always idle
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 3;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 4;
- }else{
- status_code = 6; // smi common is sometimes busy and idle
- }
- }else{ //sometime the larb is busy
- if( smi_common_busy_count == 5 ){ // smi common is always busy
- status_code = 7;
- }else if( smi_common_busy_count == 0 ){ // smi common is always idle
- status_code = 8;
- }else{
- status_code = 9; // smi common is sometimes busy and idle
- }
- }
- }else{
- status_code = 10;
- }
-
- // Send the debug message according to the final result
- switch( status_code ){
- case 1:
- case 3:
- case 5:
- case 7:
- case 8:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine's state first\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- SMIMSG3(
- output_gce_buffer,
- "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
- i);
- break;
- case 2:
- if( smi_larb_mmu_status[i] == 0 ){
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine state first\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- SMIMSG3(
- output_gce_buffer,
- "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
- i);
- }else{
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> MMU port config error\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- is_smi_issue = 1;
- }
- break;
- case 4:
- case 6:
- case 9:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> not SMI issue\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- break;
- case 10:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d clk is disbable, status=%d ==> no need to check\n",
- i, status_code);
- break;
- default:
- SMIMSG3(
- output_gce_buffer,
- "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> status unknown\n",
- i, smi_larb_busy_count[i], smi_common_busy_count,
- status_code);
- break;
- }
- }
-
- }
-
- return is_smi_issue;
-}
-
-void smi_client_status_change_notify( int module, int mode ){
-
-}
-
-#if IS_ENABLED(CONFIG_COMPAT)
-// 32 bits process ioctl support:
-// This is prepared for the future extension since currently the sizes of 32 bits
-// and 64 bits smi parameters are the same.
-
-typedef struct
-{
- compat_int_t scenario;
- compat_int_t b_on_off; //0 : exit this scenario , 1 : enter this scenario
-}MTK_SMI_COMPAT_BWC_CONFIG;
-
-typedef struct
-{
- compat_int_t property;
- compat_int_t value1;
- compat_int_t value2;
-}MTK_SMI_COMPAT_BWC_INFO_SET;
-
-typedef struct
-{
- compat_uint_t flag; // Reserved
- compat_int_t concurrent_profile;
- compat_int_t sensor_size[2];
- compat_int_t video_record_size[2];
- compat_int_t display_size[2];
- compat_int_t tv_out_size[2];
- compat_int_t fps;
- compat_int_t video_encode_codec;
- compat_int_t video_decode_codec;
- compat_int_t hw_ovl_limit;
-}MTK_SMI_COMPAT_BWC_MM_INFO;
-
-#define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, MTK_SMI_COMPAT_BWC_CONFIG)
-#define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, MTK_SMI_COMPAT_BWC_INFO_SET)
-#define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, MTK_SMI_COMPAT_BWC_MM_INFO)
-
-static int compat_get_smi_bwc_config_struct(
- MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
- MTK_SMI_BWC_CONFIG __user *data){
-
- compat_int_t i;
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(i, &(data32->scenario));
- err |= put_user(i, &(data->scenario));
- err |= get_user(i, &(data32->b_on_off));
- err |= put_user(i, &(data->b_on_off));
-
- return err;
-}
-
-static int compat_get_smi_bwc_mm_info_set_struct(
- MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
- MTK_SMI_BWC_INFO_SET __user *data){
-
- compat_int_t i;
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(i, &(data32->property));
- err |= put_user(i, &(data->property));
- err |= get_user(i, &(data32->value1));
- err |= put_user(i, &(data->value1));
- err |= get_user(i, &(data32->value2));
- err |= put_user(i, &(data->value2));
-
- return err;
-}
-
-static int compat_get_smi_bwc_mm_info_struct(
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
- MTK_SMI_BWC_MM_INFO __user *data)
-{
- compat_uint_t u;
- compat_int_t i;
- compat_int_t p[2];
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(u, &(data32->flag));
- err |= put_user(u, &(data->flag));
- err |= get_user(i, &(data32->concurrent_profile));
- err |= put_user(i, &(data->concurrent_profile));
- err |= copy_from_user(p, &(data32->sensor_size),sizeof(p));
- err |= copy_to_user(&(data->sensor_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->video_record_size),sizeof(p));
- err |= copy_to_user(&(data->video_record_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->display_size),sizeof(p));
- err |= copy_to_user(&(data->display_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data32->tv_out_size),sizeof(p));
- err |= copy_to_user(&(data->tv_out_size),p ,sizeof(p));
- err |= get_user(i, &(data32->fps));
- err |= put_user(i, &(data->fps));
- err |= get_user(i, &(data32->video_encode_codec));
- err |= put_user(i, &(data->video_encode_codec));
- err |= get_user(i, &(data32->video_decode_codec));
- err |= put_user(i, &(data->video_decode_codec));
- err |= get_user(i, &(data32->hw_ovl_limit));
- err |= put_user(i, &(data->hw_ovl_limit));
-
-
- return err;
-}
-
-static int compat_put_smi_bwc_mm_info_struct(
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
- MTK_SMI_BWC_MM_INFO __user *data)
-{
-
- compat_uint_t u;
- compat_int_t i;
- compat_int_t p[2];
- int err;
-
- // since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here
- err = get_user(u, &(data->flag));
- err |= put_user(u, &(data32->flag));
- err |= get_user(i, &(data->concurrent_profile));
- err |= put_user(i, &(data32->concurrent_profile));
- err |= copy_from_user(p, &(data->sensor_size),sizeof(p));
- err |= copy_to_user(&(data32->sensor_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->video_record_size),sizeof(p));
- err |= copy_to_user(&(data32->video_record_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->display_size),sizeof(p));
- err |= copy_to_user(&(data32->display_size),p ,sizeof(p));
- err |= copy_from_user(p, &(data->tv_out_size),sizeof(p));
- err |= copy_to_user(&(data32->tv_out_size),p ,sizeof(p));
- err |= get_user(i, &(data->fps));
- err |= put_user(i, &(data32->fps));
- err |= get_user(i, &(data->video_encode_codec));
- err |= put_user(i, &(data32->video_encode_codec));
- err |= get_user(i, &(data->video_decode_codec));
- err |= put_user(i, &(data32->video_decode_codec));
- err |= get_user(i, &(data->hw_ovl_limit));
- err |= put_user(i, &(data32->hw_ovl_limit));
- return err;
-}
-
-long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- long ret;
-
- if (!filp->f_op || !filp->f_op->unlocked_ioctl)
- return -ENOTTY;
-
- switch (cmd){
- case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
- {
- if(COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG)
- {
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_CONFIG");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
-
- MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
- MTK_SMI_BWC_CONFIG __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
-
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_config_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
- (unsigned long)data);
- return ret;
- }
- }
-
- case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
- {
-
- if(COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET)
- {
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_SET");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
-
- MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
- MTK_SMI_BWC_INFO_SET __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
- if (err)
- return err;
-
- return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
- (unsigned long)data);
- }
- }
-
- case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
- {
-
- if(COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET){
- SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_GET");
- return filp->f_op->unlocked_ioctl(filp, cmd,(unsigned long)compat_ptr(arg));
- } else{
- MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
- MTK_SMI_BWC_MM_INFO __user *data;
- int err;
-
- data32 = compat_ptr(arg);
- data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
-
- if (data == NULL)
- return -EFAULT;
-
- err = compat_get_smi_bwc_mm_info_struct(data32, data);
- if (err)
- return err;
-
- ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
- (unsigned long)data);
-
- err = compat_put_smi_bwc_mm_info_struct(data32, data);
-
- if (err)
- return err;
-
- return ret;
- }
- }
-
- case MTK_IOC_SMI_DUMP_LARB:
- case MTK_IOC_SMI_DUMP_COMMON:
-
- return filp->f_op->unlocked_ioctl(filp, cmd,
- (unsigned long)compat_ptr(arg));
- default:
- return -ENOIOCTLCMD;
- }
-
-}
-
-#endif
-
-module_init( smi_init);
-module_exit( smi_exit);
-
-module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
-module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
-module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
-
-MODULE_DESCRIPTION("MTK SMI driver");
-MODULE_AUTHOR("Kendrick Hsu<kendrick.hsu@mediatek.com>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_debug.c b/drivers/misc/mediatek/smi/mt6735/smi_debug.c
deleted file mode 100644
index bf06deae9..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_debug.c
+++ /dev/null
@@ -1,153 +0,0 @@
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/platform_device.h>
-#include <linux/cdev.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/earlysuspend.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/aee.h>
-#include <linux/timer.h>
-//#include <asm/system.h>
-#include <asm-generic/irq_regs.h>
-//#include <asm/mach/map.h>
-#include <mach/sync_write.h>
-#include <mach/irqs.h>
-#include <asm/cacheflush.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/fb.h>
-#include <linux/debugfs.h>
-#include <mach/mt_typedefs.h>
-#include <mach/m4u.h>
-#include <mach/mt_smi.h>
-
-#include "smi_common.h"
-
-#include <linux/xlog.h>
-
-#ifdef D1
- #include "smi_reg_d1.h"
-#elif defined D2
- #include "smi_reg_d2.h"
-#else
- #include "smi_reg_d3.h"
-#endif
-
-#define SMI_LOG_TAG "smi"
-
-static char debug_buffer[4096];
-
-static void process_dbg_opt(const char *opt)
-{
- if (0 == strncmp(opt, "set_reg:", 8 ))
- {
- unsigned long addr;
- unsigned int val;
- char *p = (char *)opt + 8;
-
- addr = (unsigned long) simple_strtoul(p, &p, 16);
- p++;
- val = (unsigned int) simple_strtoul(p, &p, 16);
-
- SMIMSG("set register: 0x%lx = 0x%x\n", addr, val);
-
- COM_WriteReg32(addr, val);
- }
- if (0 == strncmp(opt, "get_reg:", 8 ))
- {
- unsigned long addr;
- char *p = (char *)opt + 8;
-
- addr = (unsigned long) simple_strtoul(p, &p, 16);
-
- SMIMSG("get register: 0x%lx = 0x%x \n", addr, COM_ReadReg32(addr));
- }
-
-
-
- return;
-}
-
-
-static void process_dbg_cmd(char *cmd)
-{
- char *tok;
- while ((tok = strsep(&cmd, " ")) != NULL)
- {
- process_dbg_opt(tok);
- }
-}
-
-
-// ---------------------------------------------------------------------------
-// Debug FileSystem Routines
-// ---------------------------------------------------------------------------
-
-struct dentry *smi_dbgfs = NULL;
-
-
-static int debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t debug_read(struct file *file,
- char __user *ubuf, size_t count, loff_t *ppos)
-{
- int n = 0;
- return simple_read_from_buffer(ubuf, count, ppos, debug_buffer, n);
-}
-
-
-static ssize_t debug_write(struct file *file,
- const char __user *ubuf, size_t count, loff_t *ppos)
-{
- const int debug_bufmax = sizeof(debug_buffer) - 1;
- size_t ret;
-
- ret = count;
-
- if (count > debug_bufmax)
- count = debug_bufmax;
-
- if (copy_from_user(&debug_buffer, ubuf, count))
- return -EFAULT;
-
- debug_buffer[count] = 0;
-
- process_dbg_cmd(debug_buffer);
-
- return ret;
-}
-
-
-static struct file_operations debug_fops = {
- .read = debug_read,
- .write = debug_write,
- .open = debug_open,
-};
-
-
-void SMI_DBG_Init(void)
-{
- smi_dbgfs = debugfs_create_file("smi",
- S_IFREG|S_IRUGO, NULL, (void *)0, &debug_fops);
-}
-
-
-void SMI_DBG_Deinit(void)
-{
- debugfs_remove(smi_dbgfs);
-}
-
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_debug.h b/drivers/misc/mediatek/smi/mt6735/smi_debug.h
deleted file mode 100644
index 94512dc12..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_debug.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __MT6735_SMI_DEBUG_H__
-#define __MT6735_SMI_DEBUG_H__
-
-#define SMI_DBG_DISPSYS (1<<0)
-#define SMI_DBG_VDEC (1<<1)
-#define SMI_DBG_IMGSYS (1<<2)
-#define SMI_DBG_VENC (1<<3)
-#define SMI_DBG_MJC (1<<4)
-
-#define SMI_DGB_LARB_SELECT(smi_dbg_larb,n) ((smi_dbg_larb) & (1<<n))
-
-#ifndef CONFIG_MTK_SMI
- #define smi_debug_bus_hanging_detect(larbs, show_dump) {}
- #define smi_debug_bus_hanging_detect_ext(larbs, show_dump, output_gce_buffer) {}
-#else
- int smi_debug_bus_hanging_detect(unsigned int larbs, int show_dump);
- //output_gce_buffer = 1, pass log to CMDQ error dumping messages
- int smi_debug_bus_hanging_detect_ext( unsigned int larbs, int show_dump, int output_gce_buffer);
-
-#endif
-
-
-#endif //__MT6735_SMI_DEBUG_H__
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_reg_d1.h b/drivers/misc/mediatek/smi/mt6735/smi_reg_d1.h
deleted file mode 100644
index cceadc9f7..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_reg_d1.h
+++ /dev/null
@@ -1,467 +0,0 @@
-#ifndef _MT6735_SMI_REG_H__
-#define _MT6735_SMI_REG_H__
-
-#define SMI_COMMON_EXT_BASE (smi_reg_base_common_ext)
-#define LARB0_BASE (smi_reg_base_barb0)
-#define LARB1_BASE (smi_reg_base_barb1)
-#define LARB2_BASE (smi_reg_base_barb2)
-#define LARB3_BASE (smi_reg_base_barb3)
-
-
-//=================================================
-//common macro definitions
-#define F_VAL(val,msb,lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
-#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
-#define F_BIT_SET(bit) (1<<(bit))
-#define F_BIT_VAL(val,bit) ((!!(val))<<(bit))
-#define F_MSK_SHIFT(regval,msb,lsb) (((regval)&F_MSK(msb,lsb))>>lsb)
-
-
-//=====================================================
-//M4U register definition
-//=====================================================
-
-#define REG_MMUg_PT_BASE (0x0)
- #define F_MMUg_PT_VA_MSK 0xffff0000
-#define REG_MMUg_PT_BASE_SEC (0x4)
- #define F_MMUg_PT_VA_MSK_SEC 0xffff0000
-
-
-#define REG_MMU_PROG_EN 0x10
- #define F_MMU0_PROG_EN 1
- #define F_MMU1_PROG_EN 2
-#define REG_MMU_PROG_VA 0x14
- #define F_PROG_VA_LOCK_BIT (1<<11)
- #define F_PROG_VA_LAYER_BIT F_BIT_SET(9)
- #define F_PROG_VA_SIZE16X_BIT F_BIT_SET(8)
- #define F_PROG_VA_SECURE_BIT (1<<7)
- #define F_PROG_VA_MASK 0xfffff000
-
-#define REG_MMU_PROG_DSC 0x18
-
-#define REG_MMU_INVLD (0x20)
- #define F_MMU_INV_ALL 0x2
- #define F_MMU_INV_RANGE 0x1
-
-#define REG_MMU_INVLD_SA (0x24)
-#define REG_MMU_INVLD_EA (0x28)
-
-
-#define REG_MMU_INVLD_SEC (0x2c)
- #define F_MMU_INV_SEC_ALL 0x2
- #define F_MMU_INV_SEC_RANGE 0x1
-
-#define REG_MMU_INVLD_SA_SEC (0x30)
-#define REG_MMU_INVLD_EA_SEC (0x34)
-
-#define REG_INVLID_SEL (0x38)
- #define F_MMU_INV_EN_L1 (1<<0)
- #define F_MMU_INV_EN_L2 (1<<1)
-
-
-#define REG_INVLID_SEL_SEC (0x3c)
- #define F_MMU_INV_SEC_EN_L1 (1<<0)
- #define F_MMU_INV_SEC_EN_L2 (1<<1)
- #define F_MMU_INV_SEC_INV_DONE (1<<2)
- #define F_MMU_INV_SEC_INV_INT_SET (1<<3)
- #define F_MMU_INV_SEC_INV_INT_CLR (1<<4)
- #define F_MMU_INV_SEC_DBG (1<<5)
-
-
-#define REG_MMU_SEC_ABORT_INFO (0x40)
-#define REG_MMU_STANDARD_AXI_MODE (0x48)
-
-#define REG_MMU_PRIORITY (0x4c)
-#define REG_MMU_DCM_DIS (0x50)
-#define REG_MMU_WR_LEN (0x54)
-#define REG_MMU_HW_DEBUG (0x58)
- #define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
- #define F_MMU_HW_DBG_PFQ_BRDCST F_BIT_SET(0)
-
-#define REG_MMU_NON_BLOCKING_DIS 0x5C
- #define F_MMU_NON_BLOCK_DISABLE_BIT 1
- #define F_MMU_NON_BLOCK_HALF_ENTRY_BIT 2
-
-#define REG_MMU_LEGACY_4KB_MODE (0x60)
-
-#define REG_MMU_PFH_DIST0 0x80
-#define REG_MMU_PFH_DIST1 0x84
-#define REG_MMU_PFH_DIST2 0x88
-#define REG_MMU_PFH_DIST3 0x8c
-#define REG_MMU_PFH_DIST4 0x90
-#define REG_MMU_PFH_DIST5 0x94
-#define REG_MMU_PFH_DIST6 0x98
-
-#define REG_MMU_PFH_DIST(port) (0x80+(((port)>>3)<<2))
- #define F_MMU_PFH_DIST_VAL(port,val) ((val&0xf)<<(((port)&0x7)<<2))
- #define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
-
-#define REG_MMU_PFH_DIR0 0xF0
-#define REG_MMU_PFH_DIR1 0xF4
-#define REG_MMU_PFH_DIR(port) (((port)<32) ? REG_MMU_PFH_DIR0: REG_MMU_PFH_DIR1)
-#define F_MMU_PFH_DIR(port,val) ((!!(val))<<((port)&0x1f))
-
-
-#define REG_MMU_READ_ENTRY 0x100
- #define F_READ_ENTRY_EN F_BIT_SET(31)
- #define F_READ_ENTRY_MM1_MAIN F_BIT_SET(26)
- #define F_READ_ENTRY_MM0_MAIN F_BIT_SET(25)
- #define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(25+id)
- #define F_READ_ENTRY_PFH F_BIT_SET(24)
- #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx,21,16)
- #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx,11,5)
- //#define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4)
- //#define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2)
- #define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
- #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1,0)
-
-#define REG_MMU_DES_RDATA 0x104
-
-#define REG_MMU_PFH_TAG_RDATA 0x108
- #define F_PFH_TAG_VA_GET(mmu,tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(mmu)+1))
- #define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
- #define F_PFH_TAG_16X_BIT F_BIT_SET(2) //this bit is always 0 -- cost down.
- #define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
- #define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
-
-
-// tag releated macro
- //#define MMU0_SET_ORDER 7
- //#define MMU1_SET_ORDER 6
- #define MMU_SET_ORDER(mmu) (7-(mmu))
- #define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
- #define MMU_SET_LSB_OFFSET 15
- #define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
- #define MMU_PFH_VA_TO_SET(mmu,va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
-
- #define MMU_PAGE_PER_LINE 8
- #define MMU_WAY_NR 4
- #define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
-
-
-#define REG_MMU_CTRL_REG 0x110
- #define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
- #define F_MMU_CTRL_TLB_WALK_DIS(dis) F_BIT_VAL(dis, 1)
- #define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 2)
- #define F_MMU_CTRL_MONITOR_CLR(clr) F_BIT_VAL(clr, 3)
- #define F_MMU_CTRL_PFH_RT_RPL_MODE(mod) F_BIT_VAL(mod, 4)
- #define F_MMU_CTRL_TF_PROT_VAL(prot) F_VAL(prot, 6, 5)
- #define F_MMU_CTRL_TF_PROT_MSK F_MSK(6,5)
- #define F_MMU_CTRL_INT_HANG_en(en) F_BIT_VAL(en, 7)
- #define F_MMU_CTRL_COHERE_EN(en) F_BIT_VAL(en, 8)
- #define F_MMU_CTRL_IN_ORDER_WR(en) F_BIT_VAL(en, 9)
- #define F_MMU_CTRL_MAIN_TLB_SHARE_ALL(en) F_BIT_VAL(en, 10)
-
-
-#define REG_MMU_IVRP_PADDR 0x114
- #define F_MMU_IVRP_PA_SET(PA) (PA>>1)
- #define F_MMU_IVRP_8G_PA_SET(PA) ((PA>>1)|(1<<31))
-
-#define REG_MMU_INT_L2_CONTROL 0x120
- #define F_INT_L2_CLR_BIT (1<<12)
- #define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
- #define F_INT_L2_TABLE_WALK_FAULT F_BIT_SET(1)
- #define F_INT_L2_PFH_DMA_FIFO_OVERFLOW F_BIT_SET(2)
- #define F_INT_L2_MISS_DMA_FIFO_OVERFLOW F_BIT_SET(3)
- #define F_INT_L2_INVALD_DONE F_BIT_SET(4)
- #define F_INT_L2_PFH_IN_OUT_FIFO_ERROR F_BIT_SET(5)
- #define F_INT_L2_MISS_FIFO_ERR F_BIT_SET(6)
-
-#define REG_MMU_INT_MAIN_CONTROL 0x124
- #define F_INT_TRANSLATION_FAULT(MMU) F_BIT_SET(0+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
-
- #define F_INT_MAU(mmu, set) F_BIT_SET(14+(set)+(mmu<<2)) //(14+(set)+(mmu*4))
-
- #define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
- #define F_INT_MMU1_MAIN_MSK F_MSK(13, 7)
- #define F_INT_MMU0_MAU_MSK F_MSK(17, 14)
- #define F_INT_MMU1_MAU_MSK F_MSK(21, 18)
-
-#define REG_MMU_CPE_DONE_SEC 0x128
-#define REG_MMU_CPE_DONE 0x12C
-
-#define REG_MMU_L2_FAULT_ST 0x130
- #define F_INT_L2_MISS_OUT_FIFO_ERROR F_BIT_SET(7)
- #define F_INT_L2_MISS_IN_FIFO_ERR F_BIT_SET(8)
-#define REG_MMU_MAIN_FAULT_ST 0x134
-
-#define REG_MMU_TBWALK_FAULT_VA 0x138
- #define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
- #define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
-
-#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
- #define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
- #define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
- #define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
-
-#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
-#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
-
-#define REG_MMU_PF_MSCNT 0x160
-#define REG_MMU_PF_CNT 0x164
-#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) //(0x168+((mmu)*12)
-#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
-#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
-
-#define MMU01_SQ_OFFSET (0x600-0x300)
-#define REG_MMU_SQ_START(mmu,x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
- #define F_SQ_VA_MASK F_MSK(31, 18)
- #define F_SQ_EN_BIT (1<<17)
- //#define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13)
-#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
-
-
-#define MMU_TOTAL_RS_NR 8
-#define REG_MMU_RSx_VA(mmu,x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
- #define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
- #define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
-
-#define REG_MMU_RSx_PA(mmu,x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
- #define F_MMU_RSx_PA_VALID(regval) F_MSK_SHIFT(regval, 1, 0)
-
-#define REG_MMU_RSx_2ND_BASE(mmu,x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
-
-#define REG_MMU_RSx_ST(mmu,x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 21, 20)
- #define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
- #define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
-
-#define REG_MMU_MAIN_TAG(mmu,x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
- #define F_MAIN_TLB_LOCK_BIT (1<<11)
- #define F_MAIN_TLB_VALID_BIT (1<<10)
- #define F_MAIN_TLB_LAYER_BIT F_BIT_SET(9)
- #define F_MAIN_TLB_16X_BIT F_BIT_SET(8)
- #define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
- #define F_MAIN_TLB_INV_DES_BIT (1<<6)
- #define F_MAIN_TLB_SQ_EN_BIT (1<<5)
- #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4,1)
- #define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
-
-
-#define REG_MMU_MAU_START(mmu,mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_START_BIT32(mmu,mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END(mmu,mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END_BIT32(mmu,mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_PORT_EN(mmu,mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ID(mmu,mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR(mmu,mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR_BIT32(mmu,mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
-
-#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
- #define F_MAU_LARB_VAL(mau,larb) ((larb)<<(mau*8))
- #define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
-#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
-#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
- #define F_MAU_BIT_VAL(val, mau) F_BIT_VAL(val, mau)
-#define REG_MMU_MAU_RW(mmu) (0x98c+((mmu)*0xa0))
-#define REG_MMU_MAU_VA(mmu) (0x990+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ST(mmu) (0x994+((mmu)*0xa0))
-
-#define REG_MMU_PFH_VLD_0 (0x180)
-#define REG_MMU_PFH_VLD(set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<4)) //+((set/32)*4)+(way*16)
- #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) // set%32
-
-
-
-//================================================================
-// SMI larb
-//================================================================
-
-#define SMI_ERROR_ADDR 0
-#define SMI_LARB_NR 4
-
-#define SMI_LARB0_PORT_NUM 7
-#define SMI_LARB1_PORT_NUM 7
-#define SMI_LARB2_PORT_NUM 21
-#define SMI_LARB3_PORT_NUM 13
-
-#define SMI_LARB_STAT (0x0 )
-#define SMI_LARB_IRQ_EN (0x4 )
-#define SMI_LARB_IRQ_STATUS (0x8 )
-#define SMI_LARB_SLP_CON (0xc )
-#define SMI_LARB_CON (0x10 )
-#define SMI_LARB_CON_SET (0x14 )
-#define SMI_LARB_CON_CLR (0x18 )
-#define SMI_LARB_VC_PRI_MODE (0x20 )
-#define SMI_LARB_CMD_THRT_CON (0x24 )
-#define SMI_LARB_STARV_CON (0x28 )
-#define SMI_LARB_EMI_CON (0x2C )
-#define SMI_LARB_SHARE_EN (0x30 )
-#define SMI_LARB_BWL_EN (0x50 )
-#define SMI_LARB_BWL_SOFT_EN (0x54 )
-#define SMI_LARB_BWL_CON (0x58 )
-#define SMI_LARB_OSTDL_EN (0x60 )
-#define SMI_LARB_OSTDL_SOFT_EN (0x64 )
-#define SMI_LARB_ULTRA_DIS (0x70 )
-#define SMI_LARB_PREULTRA_DIS (0x74 )
-#define SMI_LARB_FORCE_ULTRA (0x78 )
-#define SMI_LARB_FORCE_PREULTRA (0x7c )
-#define SMI_LARB_MST_GRP_SEL_L (0x80 )
-#define SMI_LARB_MST_GRP_SEL_H (0x84 )
-#define SMI_LARB_INT_PATH_SEL (0x90 )
-#define SMI_LARB_EXT_GREQ_VIO (0xa0 )
-#define SMI_LARB_INT_GREQ_VIO (0xa4 )
-#define SMI_LARB_OSTD_UDF_VIO (0xa8 )
-#define SMI_LARB_OSTD_CRS_VIO (0xac )
-#define SMI_LARB_FIFO_STAT (0xb0 )
-#define SMI_LARB_BUS_STAT (0xb4 )
-#define SMI_LARB_CMD_THRT_STAT (0xb8 )
-#define SMI_LARB_MON_REQ (0xbc )
-#define SMI_LARB_REQ_MASK (0xc0 )
-#define SMI_LARB_REQ_DET (0xc4 )
-#define SMI_LARB_EXT_ONGOING (0xc8 )
-#define SMI_LARB_INT_ONGOING (0xcc )
-#define SMI_LARB_MISC_MON0 (0xd0 )
-#define SMI_LARB_DBG_CON (0xf0 )
-#define SMI_LARB_TST_MODE (0xf4 )
-#define SMI_LARB_WRR_PORT (0x100 )
-#define SMI_LARB_BWL_PORT (0x180 )
-#define SMI_LARB_OSTDL_PORT (0x200 )
-#define SMI_LARB_OSTD_MON_PORT (0x280 )
-#define SMI_LARB_PINFO (0x300 )
-#define SMI_LARB_MON_EN (0x400 )
-#define SMI_LARB_MON_CLR (0x404 )
-#define SMI_LARB_MON_PORT (0x408 )
-#define SMI_LARB_MON_CON (0x40c )
-#define SMI_LARB_MON_ACT_CNT (0x410 )
-#define SMI_LARB_MON_REQ_CNT (0x414 )
-#define SMI_LARB_MON_BEAT_CNT (0x418 )
-#define SMI_LARB_MON_BYTE_CNT (0x41c )
-#define SMI_LARB_MON_CP_CNT (0x420 )
-#define SMI_LARB_MON_DP_CNT (0x424 )
-#define SMI_LARB_MON_OSTD_CNT (0x428 )
-#define SMI_LARB_MON_CP_MAX (0x430 )
-#define SMI_LARB_MON_COS_MAX (0x434 )
-#define SMI_LARB_MMU_EN (0xf00 )
- #define F_SMI_MMU_EN(port, en) ((en)<<((port)))
- #define F_SMI_SEC_EN(port, en) ((en)<<((port)))
-#define REG_SMI_LARB_DOMN_OF_PORT(port) (((port)>15) ? 0xf0c : 0xf08)
- #define F_SMI_DOMN(port, domain) (((domain)&0x3)<<((((port)>15) ? (port-16) : port)<<1))
-
-
-
-
-/*
-#define SMI_SHARE_EN (0x210)
- #define F_SMI_SHARE_EN(port) F_BIT_SET(m4u_port_2_larb_port(port))
-#define SMI_ROUTE_SEL (0x220)
- #define F_SMI_ROUTE_SEL_EMI(port) F_BIT_SET(m4u_port_2_larb_port(port))
-#define SMI_MMULOCK_EN (0x230)
-*/
-
-
-/* ===============================================================
- * SMI COMMON
- * =============================================================== */
-
-#define REG_OFFSET_SMI_L1LEN (0x100)
-#define REG_OFFSET_SMI_L1ARB0 (0x104)
-#define REG_OFFSET_SMI_L1ARB1 (0x108)
-#define REG_OFFSET_SMI_L1ARB2 (0x10C)
-#define REG_OFFSET_SMI_L1ARB3 (0x110)
-#define REG_OFFSET_SMI_L1ARB4 (0x114)
-
-/*
-#define REG_SMI_MON_AXI_ENA (0x1a0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CLR (0x1a4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_TYPE (0x1ac+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CON (0x1b0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_ACT_CNT (0x1c0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_REQ_CNT (0x1c4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_OSTD_CNT (0x1c8+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_BEA_CNT (0x1cc+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_BYT_CNT (0x1d0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CP_CNT (0x1d4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_DP_CNT (0x1d8+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CP_MAX (0x1dc+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_COS_MAX (0x1e0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1LEN (0x200+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB0 (0x204+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB1 (0x208+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB2 (0x20C+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB3 (0x210+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB4 (0x214+SMI_COMMON_EXT_BASE)
-#define REG_SMI_BUS_SEL (0x220+SMI_COMMON_EXT_BASE)
- #define F_SMI_BUS_SEL_larb0(mmu_idx) F_VAL(mmu_idx, 1, 0)
- #define F_SMI_BUS_SEL_larb1(mmu_idx) F_VAL(mmu_idx, 3, 2)
- #define F_SMI_BUS_SEL_larb2(mmu_idx) F_VAL(mmu_idx, 5, 4)
- #define F_SMI_BUS_SEL_larb3(mmu_idx) F_VAL(mmu_idx, 7, 6)
- #define F_SMI_BUS_SEL_larb4(mmu_idx) F_VAL(mmu_idx, 9, 8)
-#define REG_SMI_WRR_REG0 (0x228+SMI_COMMON_EXT_BASE)
-#define REG_SMI_READ_FIFO_TH (0x230+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_M4U_TH (0x234+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_FIFO2_TH (0x238+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_PREULTRA_MASK0 (0x23c+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_PREULTRA_MASK1 (0x240+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DCM (0x300+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_ELA (0x304+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG0 (0x400+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG1 (0x404+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG2 (0x408+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DUMMY (0x418+SMI_COMMON_EXT_BASE)
-
-*/
-
-//=========================================================================
-// peripheral system
-//=========================================================================
-#define REG_PERIAXI_BUS_CTL3 (0x208+0xf0003000)
- #define F_PERI_MMU_EN(port, en) ((en)<<((port)))
-
-
-static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned long Offset)
-{
- unsigned int val;
- val = ioread32((void*)(M4uBase+Offset));
-
- //printk("read base=0x%x, reg=0x%x, val=0x%x\n",M4uBase,Offset,val );
- return val;
-}
-static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned long Offset, unsigned int Val)
-{
- //unsigned int read;
- iowrite32(Val, (void*)(M4uBase+Offset));
- mb();
- /*
- read = M4U_ReadReg32(M4uBase, Offset);
- if(read != Val)
- {
- printk("error to write base=0x%x, reg=0x%x, val=0x%x, read=0x%x\n",M4uBase,Offset, Val, read );
- }
- else
- {
- printk("write base=0x%x, reg=0x%x, val=0x%x, read=0x%x\n",M4uBase,Offset, Val, read );
- }
-*/
-
-}
-
-static inline unsigned int COM_ReadReg32(unsigned long addr)
-{
- return ioread32((void *)addr);
-}
-
-static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
-{
- iowrite32(Val, (void *)addr);
- mb();
-}
-
-
-extern unsigned long smi_reg_base_common_ext;
-extern unsigned long smi_reg_base_barb0;
-extern unsigned long smi_reg_base_barb1;
-extern unsigned long smi_reg_base_barb2;
-extern unsigned long smi_reg_base_barb3;
-
-
-#endif //_MT6735_SMI_REG_H__
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_reg_d2.h b/drivers/misc/mediatek/smi/mt6735/smi_reg_d2.h
deleted file mode 100644
index e6a7bbe99..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_reg_d2.h
+++ /dev/null
@@ -1,464 +0,0 @@
-#ifndef _MT6735m_SMI_REG_H__
-#define _MT6735m_SMI_REG_H__
-
-#define SMI_COMMON_EXT_BASE (smi_reg_base_common_ext)
-#define LARB0_BASE (smi_reg_base_barb0)
-#define LARB1_BASE (smi_reg_base_barb1)
-#define LARB2_BASE (smi_reg_base_barb2)
-
-
-//=================================================
-//common macro definitions
-#define F_VAL(val,msb,lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
-#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
-#define F_BIT_SET(bit) (1<<(bit))
-#define F_BIT_VAL(val,bit) ((!!(val))<<(bit))
-#define F_MSK_SHIFT(regval,msb,lsb) (((regval)&F_MSK(msb,lsb))>>lsb)
-
-
-//=====================================================
-//M4U register definition
-//=====================================================
-
-#define REG_MMUg_PT_BASE (0x0)
- #define F_MMUg_PT_VA_MSK 0xffff0000
-#define REG_MMUg_PT_BASE_SEC (0x4)
- #define F_MMUg_PT_VA_MSK_SEC 0xffff0000
-
-
-#define REG_MMU_PROG_EN 0x10
- #define F_MMU0_PROG_EN 1
- #define F_MMU1_PROG_EN 2
-#define REG_MMU_PROG_VA 0x14
- #define F_PROG_VA_LOCK_BIT (1<<11)
- #define F_PROG_VA_LAYER_BIT F_BIT_SET(9)
- #define F_PROG_VA_SIZE16X_BIT F_BIT_SET(8)
- #define F_PROG_VA_SECURE_BIT (1<<7)
- #define F_PROG_VA_MASK 0xfffff000
-
-#define REG_MMU_PROG_DSC 0x18
-
-#define REG_MMU_INVLD (0x20)
- #define F_MMU_INV_ALL 0x2
- #define F_MMU_INV_RANGE 0x1
-
-#define REG_MMU_INVLD_SA (0x24)
-#define REG_MMU_INVLD_EA (0x28)
-
-
-#define REG_MMU_INVLD_SEC (0x2c)
- #define F_MMU_INV_SEC_ALL 0x2
- #define F_MMU_INV_SEC_RANGE 0x1
-
-#define REG_MMU_INVLD_SA_SEC (0x30)
-#define REG_MMU_INVLD_EA_SEC (0x34)
-
-#define REG_INVLID_SEL (0x38)
- #define F_MMU_INV_EN_L1 (1<<0)
- #define F_MMU_INV_EN_L2 (1<<1)
-
-
-#define REG_INVLID_SEL_SEC (0x3c)
- #define F_MMU_INV_SEC_EN_L1 (1<<0)
- #define F_MMU_INV_SEC_EN_L2 (1<<1)
- #define F_MMU_INV_SEC_INV_DONE (1<<2)
- #define F_MMU_INV_SEC_INV_INT_SET (1<<3)
- #define F_MMU_INV_SEC_INV_INT_CLR (1<<4)
- #define F_MMU_INV_SEC_DBG (1<<5)
-
-
-#define REG_MMU_SEC_ABORT_INFO (0x40)
-#define REG_MMU_STANDARD_AXI_MODE (0x48)
-
-#define REG_MMU_PRIORITY (0x4c)
-#define REG_MMU_DCM_DIS (0x50)
-#define REG_MMU_WR_LEN (0x54)
-#define REG_MMU_HW_DEBUG (0x58)
- #define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
- #define F_MMU_HW_DBG_PFQ_BRDCST F_BIT_SET(0)
-
-#define REG_MMU_NON_BLOCKING_DIS 0x5C
- #define F_MMU_NON_BLOCK_DISABLE_BIT 1
- #define F_MMU_NON_BLOCK_HALF_ENTRY_BIT 2
-
-#define REG_MMU_LEGACY_4KB_MODE (0x60)
-
-#define REG_MMU_PFH_DIST0 0x80
-#define REG_MMU_PFH_DIST1 0x84
-#define REG_MMU_PFH_DIST2 0x88
-#define REG_MMU_PFH_DIST3 0x8c
-#define REG_MMU_PFH_DIST4 0x90
-#define REG_MMU_PFH_DIST5 0x94
-#define REG_MMU_PFH_DIST6 0x98
-
-#define REG_MMU_PFH_DIST(port) (0x80+(((port)>>3)<<2))
- #define F_MMU_PFH_DIST_VAL(port,val) ((val&0xf)<<(((port)&0x7)<<2))
- #define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
-
-#define REG_MMU_PFH_DIR0 0xF0
-#define REG_MMU_PFH_DIR1 0xF4
-#define REG_MMU_PFH_DIR(port) (((port)<32) ? REG_MMU_PFH_DIR0: REG_MMU_PFH_DIR1)
-#define F_MMU_PFH_DIR(port,val) ((!!(val))<<((port)&0x1f))
-
-
-#define REG_MMU_READ_ENTRY 0x100
- #define F_READ_ENTRY_EN F_BIT_SET(31)
- #define F_READ_ENTRY_MM1_MAIN F_BIT_SET(26)
- #define F_READ_ENTRY_MM0_MAIN F_BIT_SET(25)
- #define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(25+id)
- #define F_READ_ENTRY_PFH F_BIT_SET(24)
- #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx,21,16)
- #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx,11,5)
- //#define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4)
- //#define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2)
- #define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
- #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1,0)
-
-#define REG_MMU_DES_RDATA 0x104
-
-#define REG_MMU_PFH_TAG_RDATA 0x108
- #define F_PFH_TAG_VA_GET(mmu,tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(mmu)+1))
- #define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
- #define F_PFH_TAG_16X_BIT F_BIT_SET(2) //this bit is always 0 -- cost down.
- #define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
- #define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
-
-
-// tag releated macro
- //#define MMU0_SET_ORDER 7
- //#define MMU1_SET_ORDER 6
- #define MMU_SET_ORDER(mmu) (7-(mmu))
- #define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
- #define MMU_SET_LSB_OFFSET 15
- #define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
- #define MMU_PFH_VA_TO_SET(mmu,va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
-
- #define MMU_PAGE_PER_LINE 8
- #define MMU_WAY_NR 4
- #define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
-
-
-#define REG_MMU_CTRL_REG 0x110
- #define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
- #define F_MMU_CTRL_TLB_WALK_DIS(dis) F_BIT_VAL(dis, 1)
- #define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 2)
- #define F_MMU_CTRL_MONITOR_CLR(clr) F_BIT_VAL(clr, 3)
- #define F_MMU_CTRL_PFH_RT_RPL_MODE(mod) F_BIT_VAL(mod, 4)
- #define F_MMU_CTRL_TF_PROT_VAL(prot) F_VAL(prot, 6, 5)
- #define F_MMU_CTRL_TF_PROT_MSK F_MSK(6,5)
- #define F_MMU_CTRL_INT_HANG_en(en) F_BIT_VAL(en, 7)
- #define F_MMU_CTRL_COHERE_EN(en) F_BIT_VAL(en, 8)
- #define F_MMU_CTRL_IN_ORDER_WR(en) F_BIT_VAL(en, 9)
- #define F_MMU_CTRL_MAIN_TLB_SHARE_ALL(en) F_BIT_VAL(en, 10)
-
-
-#define REG_MMU_IVRP_PADDR 0x114
- #define F_MMU_IVRP_PA_SET(PA) (PA>>1)
- #define F_MMU_IVRP_8G_PA_SET(PA) ((PA>>1)|(1<<31))
-
-#define REG_MMU_INT_L2_CONTROL 0x120
- #define F_INT_L2_CLR_BIT (1<<12)
- #define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
- #define F_INT_L2_TABLE_WALK_FAULT F_BIT_SET(1)
- #define F_INT_L2_PFH_DMA_FIFO_OVERFLOW F_BIT_SET(2)
- #define F_INT_L2_MISS_DMA_FIFO_OVERFLOW F_BIT_SET(3)
- #define F_INT_L2_INVALD_DONE F_BIT_SET(4)
- #define F_INT_L2_PFH_IN_OUT_FIFO_ERROR F_BIT_SET(5)
- #define F_INT_L2_MISS_FIFO_ERR F_BIT_SET(6)
-
-#define REG_MMU_INT_MAIN_CONTROL 0x124
- #define F_INT_TRANSLATION_FAULT(MMU) F_BIT_SET(0+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
-
- #define F_INT_MAU(mmu, set) F_BIT_SET(14+(set)+(mmu<<2)) //(14+(set)+(mmu*4))
-
- #define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
- #define F_INT_MMU1_MAIN_MSK F_MSK(13, 7)
- #define F_INT_MMU0_MAU_MSK F_MSK(17, 14)
- #define F_INT_MMU1_MAU_MSK F_MSK(21, 18)
-
-#define REG_MMU_CPE_DONE_SEC 0x128
-#define REG_MMU_CPE_DONE 0x12C
-
-#define REG_MMU_L2_FAULT_ST 0x130
- #define F_INT_L2_MISS_OUT_FIFO_ERROR F_BIT_SET(7)
- #define F_INT_L2_MISS_IN_FIFO_ERR F_BIT_SET(8)
-#define REG_MMU_MAIN_FAULT_ST 0x134
-
-#define REG_MMU_TBWALK_FAULT_VA 0x138
- #define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
- #define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
-
-#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
- #define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
- #define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
- #define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
-
-#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
-#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
-
-#define REG_MMU_PF_MSCNT 0x160
-#define REG_MMU_PF_CNT 0x164
-#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) //(0x168+((mmu)*12)
-#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
-#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
-
-#define MMU01_SQ_OFFSET (0x600-0x300)
-#define REG_MMU_SQ_START(mmu,x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
- #define F_SQ_VA_MASK F_MSK(31, 18)
- #define F_SQ_EN_BIT (1<<17)
- //#define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13)
-#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
-
-
-#define MMU_TOTAL_RS_NR 8
-#define REG_MMU_RSx_VA(mmu,x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
- #define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
- #define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
-
-#define REG_MMU_RSx_PA(mmu,x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
- #define F_MMU_RSx_PA_VALID(regval) F_MSK_SHIFT(regval, 1, 0)
-
-#define REG_MMU_RSx_2ND_BASE(mmu,x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
-
-#define REG_MMU_RSx_ST(mmu,x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 21, 20)
- #define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
- #define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
-
-#define REG_MMU_MAIN_TAG(mmu,x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
- #define F_MAIN_TLB_LOCK_BIT (1<<11)
- #define F_MAIN_TLB_VALID_BIT (1<<10)
- #define F_MAIN_TLB_LAYER_BIT F_BIT_SET(9)
- #define F_MAIN_TLB_16X_BIT F_BIT_SET(8)
- #define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
- #define F_MAIN_TLB_INV_DES_BIT (1<<6)
- #define F_MAIN_TLB_SQ_EN_BIT (1<<5)
- #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4,1)
- #define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
-
-
-#define REG_MMU_MAU_START(mmu,mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_START_BIT32(mmu,mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END(mmu,mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END_BIT32(mmu,mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_PORT_EN(mmu,mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ID(mmu,mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR(mmu,mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR_BIT32(mmu,mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
-
-#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
- #define F_MAU_LARB_VAL(mau,larb) ((larb)<<(mau*8))
- #define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
-#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
-#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
- #define F_MAU_BIT_VAL(val, mau) F_BIT_VAL(val, mau)
-#define REG_MMU_MAU_RW(mmu) (0x98c+((mmu)*0xa0))
-#define REG_MMU_MAU_VA(mmu) (0x990+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ST(mmu) (0x994+((mmu)*0xa0))
-
-#define REG_MMU_PFH_VLD_0 (0x180)
-#define REG_MMU_PFH_VLD(set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<4)) //+((set/32)*4)+(way*16)
- #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) // set%32
-
-
-
-//================================================================
-// SMI larb
-//================================================================
-
-#define SMI_ERROR_ADDR 0
-#define SMI_LARB_NR 3
-
-#define SMI_LARB0_PORT_NUM 8
-#define SMI_LARB1_PORT_NUM 7
-#define SMI_LARB2_PORT_NUM 13
-
-#define SMI_LARB_STAT (0x0 )
-#define SMI_LARB_IRQ_EN (0x4 )
-#define SMI_LARB_IRQ_STATUS (0x8 )
-#define SMI_LARB_SLP_CON (0xc )
-#define SMI_LARB_CON (0x10 )
-#define SMI_LARB_CON_SET (0x14 )
-#define SMI_LARB_CON_CLR (0x18 )
-#define SMI_LARB_VC_PRI_MODE (0x20 )
-#define SMI_LARB_CMD_THRT_CON (0x24 )
-#define SMI_LARB_STARV_CON (0x28 )
-#define SMI_LARB_EMI_CON (0x2C )
-#define SMI_LARB_SHARE_EN (0x30 )
-#define SMI_LARB_BWL_EN (0x50 )
-#define SMI_LARB_BWL_SOFT_EN (0x54 )
-#define SMI_LARB_BWL_CON (0x58 )
-#define SMI_LARB_OSTDL_EN (0x60 )
-#define SMI_LARB_OSTDL_SOFT_EN (0x64 )
-#define SMI_LARB_ULTRA_DIS (0x70 )
-#define SMI_LARB_PREULTRA_DIS (0x74 )
-#define SMI_LARB_FORCE_ULTRA (0x78 )
-#define SMI_LARB_FORCE_PREULTRA (0x7c )
-#define SMI_LARB_MST_GRP_SEL_L (0x80 )
-#define SMI_LARB_MST_GRP_SEL_H (0x84 )
-#define SMI_LARB_INT_PATH_SEL (0x90 )
-#define SMI_LARB_EXT_GREQ_VIO (0xa0 )
-#define SMI_LARB_INT_GREQ_VIO (0xa4 )
-#define SMI_LARB_OSTD_UDF_VIO (0xa8 )
-#define SMI_LARB_OSTD_CRS_VIO (0xac )
-#define SMI_LARB_FIFO_STAT (0xb0 )
-#define SMI_LARB_BUS_STAT (0xb4 )
-#define SMI_LARB_CMD_THRT_STAT (0xb8 )
-#define SMI_LARB_MON_REQ (0xbc )
-#define SMI_LARB_REQ_MASK (0xc0 )
-#define SMI_LARB_REQ_DET (0xc4 )
-#define SMI_LARB_EXT_ONGOING (0xc8 )
-#define SMI_LARB_INT_ONGOING (0xcc )
-#define SMI_LARB_MISC_MON0 (0xd0 )
-#define SMI_LARB_DBG_CON (0xf0 )
-#define SMI_LARB_TST_MODE (0xf4 )
-#define SMI_LARB_WRR_PORT (0x100 )
-#define SMI_LARB_BWL_PORT (0x180 )
-#define SMI_LARB_OSTDL_PORT (0x200 )
-#define SMI_LARB_OSTD_MON_PORT (0x280 )
-#define SMI_LARB_PINFO (0x300 )
-#define SMI_LARB_MON_EN (0x400 )
-#define SMI_LARB_MON_CLR (0x404 )
-#define SMI_LARB_MON_PORT (0x408 )
-#define SMI_LARB_MON_CON (0x40c )
-#define SMI_LARB_MON_ACT_CNT (0x410 )
-#define SMI_LARB_MON_REQ_CNT (0x414 )
-#define SMI_LARB_MON_BEAT_CNT (0x418 )
-#define SMI_LARB_MON_BYTE_CNT (0x41c )
-#define SMI_LARB_MON_CP_CNT (0x420 )
-#define SMI_LARB_MON_DP_CNT (0x424 )
-#define SMI_LARB_MON_OSTD_CNT (0x428 )
-#define SMI_LARB_MON_CP_MAX (0x430 )
-#define SMI_LARB_MON_COS_MAX (0x434 )
-#define SMI_LARB_MMU_EN (0xf00 )
- #define F_SMI_MMU_EN(port, en) ((en)<<((port)))
- #define F_SMI_SEC_EN(port, en) ((en)<<((port)))
-#define REG_SMI_LARB_DOMN_OF_PORT(port) (((port)>15) ? 0xf0c : 0xf08)
- #define F_SMI_DOMN(port, domain) (((domain)&0x3)<<((((port)>15) ? (port-16) : port)<<1))
-
-
-
-
-/*
-#define SMI_SHARE_EN (0x210)
- #define F_SMI_SHARE_EN(port) F_BIT_SET(m4u_port_2_larb_port(port))
-#define SMI_ROUTE_SEL (0x220)
- #define F_SMI_ROUTE_SEL_EMI(port) F_BIT_SET(m4u_port_2_larb_port(port))
-#define SMI_MMULOCK_EN (0x230)
-*/
-
-
-/* ===============================================================
- * SMI COMMON
- * =============================================================== */
-
-#define REG_OFFSET_SMI_L1LEN (0x100)
-#define REG_OFFSET_SMI_L1ARB0 (0x104)
-#define REG_OFFSET_SMI_L1ARB1 (0x108)
-#define REG_OFFSET_SMI_L1ARB2 (0x10C)
-#define REG_OFFSET_SMI_L1ARB3 (0x110)
-#define REG_OFFSET_SMI_L1ARB4 (0x114)
-
-/*
-#define REG_SMI_MON_AXI_ENA (0x1a0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CLR (0x1a4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_TYPE (0x1ac+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CON (0x1b0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_ACT_CNT (0x1c0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_REQ_CNT (0x1c4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_OSTD_CNT (0x1c8+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_BEA_CNT (0x1cc+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_BYT_CNT (0x1d0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CP_CNT (0x1d4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_DP_CNT (0x1d8+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CP_MAX (0x1dc+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_COS_MAX (0x1e0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1LEN (0x200+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB0 (0x204+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB1 (0x208+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB2 (0x20C+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB3 (0x210+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB4 (0x214+SMI_COMMON_EXT_BASE)
-#define REG_SMI_BUS_SEL (0x220+SMI_COMMON_EXT_BASE)
- #define F_SMI_BUS_SEL_larb0(mmu_idx) F_VAL(mmu_idx, 1, 0)
- #define F_SMI_BUS_SEL_larb1(mmu_idx) F_VAL(mmu_idx, 3, 2)
- #define F_SMI_BUS_SEL_larb2(mmu_idx) F_VAL(mmu_idx, 5, 4)
- #define F_SMI_BUS_SEL_larb3(mmu_idx) F_VAL(mmu_idx, 7, 6)
- #define F_SMI_BUS_SEL_larb4(mmu_idx) F_VAL(mmu_idx, 9, 8)
-#define REG_SMI_WRR_REG0 (0x228+SMI_COMMON_EXT_BASE)
-#define REG_SMI_READ_FIFO_TH (0x230+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_M4U_TH (0x234+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_FIFO2_TH (0x238+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_PREULTRA_MASK0 (0x23c+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_PREULTRA_MASK1 (0x240+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DCM (0x300+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_ELA (0x304+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG0 (0x400+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG1 (0x404+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG2 (0x408+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DUMMY (0x418+SMI_COMMON_EXT_BASE)
-
-*/
-
-//=========================================================================
-// peripheral system
-//=========================================================================
-#define REG_PERIAXI_BUS_CTL3 (0x208+0xf0003000)
- #define F_PERI_MMU_EN(port, en) ((en)<<((port)))
-
-
-static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned long Offset)
-{
- unsigned int val;
- val = ioread32((void*)(M4uBase+Offset));
-
- //printk("read base=0x%x, reg=0x%x, val=0x%x\n",M4uBase,Offset,val );
- return val;
-}
-static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned long Offset, unsigned int Val)
-{
- //unsigned int read;
- iowrite32(Val, (void*)(M4uBase+Offset));
- mb();
- /*
- read = M4U_ReadReg32(M4uBase, Offset);
- if(read != Val)
- {
- printk("error to write base=0x%x, reg=0x%x, val=0x%x, read=0x%x\n",M4uBase,Offset, Val, read );
- }
- else
- {
- printk("write base=0x%x, reg=0x%x, val=0x%x, read=0x%x\n",M4uBase,Offset, Val, read );
- }
-*/
-
-}
-
-static inline unsigned int COM_ReadReg32(unsigned long addr)
-{
- return ioread32((void *)addr);
-}
-
-static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
-{
- iowrite32(Val, (void *)addr);
- mb();
-}
-
-
-extern unsigned long smi_reg_base_common_ext;
-extern unsigned long smi_reg_base_barb0;
-extern unsigned long smi_reg_base_barb1;
-extern unsigned long smi_reg_base_barb2;
-
-
-#endif //_MT6735m_SMI_REG_H__
-
diff --git a/drivers/misc/mediatek/smi/mt6735/smi_reg_d3.h b/drivers/misc/mediatek/smi/mt6735/smi_reg_d3.h
deleted file mode 100644
index 615b7c704..000000000
--- a/drivers/misc/mediatek/smi/mt6735/smi_reg_d3.h
+++ /dev/null
@@ -1,467 +0,0 @@
-#ifndef _MT6753_SMI_REG_H__
-#define _MT6753_SMI_REG_H__
-
-#define SMI_COMMON_EXT_BASE (smi_reg_base_common_ext)
-#define LARB0_BASE (smi_reg_base_barb0)
-#define LARB1_BASE (smi_reg_base_barb1)
-#define LARB2_BASE (smi_reg_base_barb2)
-#define LARB3_BASE (smi_reg_base_barb3)
-
-
-//=================================================
-//common macro definitions
-#define F_VAL(val,msb,lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
-#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
-#define F_BIT_SET(bit) (1<<(bit))
-#define F_BIT_VAL(val,bit) ((!!(val))<<(bit))
-#define F_MSK_SHIFT(regval,msb,lsb) (((regval)&F_MSK(msb,lsb))>>lsb)
-
-
-//=====================================================
-//M4U register definition
-//=====================================================
-
-#define REG_MMUg_PT_BASE (0x0)
- #define F_MMUg_PT_VA_MSK 0xffff0000
-#define REG_MMUg_PT_BASE_SEC (0x4)
- #define F_MMUg_PT_VA_MSK_SEC 0xffff0000
-
-
-#define REG_MMU_PROG_EN 0x10
- #define F_MMU0_PROG_EN 1
- #define F_MMU1_PROG_EN 2
-#define REG_MMU_PROG_VA 0x14
- #define F_PROG_VA_LOCK_BIT (1<<11)
- #define F_PROG_VA_LAYER_BIT F_BIT_SET(9)
- #define F_PROG_VA_SIZE16X_BIT F_BIT_SET(8)
- #define F_PROG_VA_SECURE_BIT (1<<7)
- #define F_PROG_VA_MASK 0xfffff000
-
-#define REG_MMU_PROG_DSC 0x18
-
-#define REG_MMU_INVLD (0x20)
- #define F_MMU_INV_ALL 0x2
- #define F_MMU_INV_RANGE 0x1
-
-#define REG_MMU_INVLD_SA (0x24)
-#define REG_MMU_INVLD_EA (0x28)
-
-
-#define REG_MMU_INVLD_SEC (0x2c)
- #define F_MMU_INV_SEC_ALL 0x2
- #define F_MMU_INV_SEC_RANGE 0x1
-
-#define REG_MMU_INVLD_SA_SEC (0x30)
-#define REG_MMU_INVLD_EA_SEC (0x34)
-
-#define REG_INVLID_SEL (0x38)
- #define F_MMU_INV_EN_L1 (1<<0)
- #define F_MMU_INV_EN_L2 (1<<1)
-
-
-#define REG_INVLID_SEL_SEC (0x3c)
- #define F_MMU_INV_SEC_EN_L1 (1<<0)
- #define F_MMU_INV_SEC_EN_L2 (1<<1)
- #define F_MMU_INV_SEC_INV_DONE (1<<2)
- #define F_MMU_INV_SEC_INV_INT_SET (1<<3)
- #define F_MMU_INV_SEC_INV_INT_CLR (1<<4)
- #define F_MMU_INV_SEC_DBG (1<<5)
-
-
-#define REG_MMU_SEC_ABORT_INFO (0x40)
-#define REG_MMU_STANDARD_AXI_MODE (0x48)
-
-#define REG_MMU_PRIORITY (0x4c)
-#define REG_MMU_DCM_DIS (0x50)
-#define REG_MMU_WR_LEN (0x54)
-#define REG_MMU_HW_DEBUG (0x58)
- #define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
- #define F_MMU_HW_DBG_PFQ_BRDCST F_BIT_SET(0)
-
-#define REG_MMU_NON_BLOCKING_DIS 0x5C
- #define F_MMU_NON_BLOCK_DISABLE_BIT 1
- #define F_MMU_NON_BLOCK_HALF_ENTRY_BIT 2
-
-#define REG_MMU_LEGACY_4KB_MODE (0x60)
-
-#define REG_MMU_PFH_DIST0 0x80
-#define REG_MMU_PFH_DIST1 0x84
-#define REG_MMU_PFH_DIST2 0x88
-#define REG_MMU_PFH_DIST3 0x8c
-#define REG_MMU_PFH_DIST4 0x90
-#define REG_MMU_PFH_DIST5 0x94
-#define REG_MMU_PFH_DIST6 0x98
-
-#define REG_MMU_PFH_DIST(port) (0x80+(((port)>>3)<<2))
- #define F_MMU_PFH_DIST_VAL(port,val) ((val&0xf)<<(((port)&0x7)<<2))
- #define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
-
-#define REG_MMU_PFH_DIR0 0xF0
-#define REG_MMU_PFH_DIR1 0xF4
-#define REG_MMU_PFH_DIR(port) (((port)<32) ? REG_MMU_PFH_DIR0: REG_MMU_PFH_DIR1)
-#define F_MMU_PFH_DIR(port,val) ((!!(val))<<((port)&0x1f))
-
-
-#define REG_MMU_READ_ENTRY 0x100
- #define F_READ_ENTRY_EN F_BIT_SET(31)
- #define F_READ_ENTRY_MM1_MAIN F_BIT_SET(26)
- #define F_READ_ENTRY_MM0_MAIN F_BIT_SET(25)
- #define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(25+id)
- #define F_READ_ENTRY_PFH F_BIT_SET(24)
- #define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx,21,16)
- #define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx,11,5)
- //#define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4)
- //#define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2)
- #define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
- #define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1,0)
-
-#define REG_MMU_DES_RDATA 0x104
-
-#define REG_MMU_PFH_TAG_RDATA 0x108
- #define F_PFH_TAG_VA_GET(mmu,tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(mmu)+1))
- #define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
- #define F_PFH_TAG_16X_BIT F_BIT_SET(2) //this bit is always 0 -- cost down.
- #define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
- #define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
-
-
-// tag releated macro
- //#define MMU0_SET_ORDER 7
- //#define MMU1_SET_ORDER 6
- #define MMU_SET_ORDER(mmu) (7-(mmu))
- #define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
- #define MMU_SET_LSB_OFFSET 15
- #define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
- #define MMU_PFH_VA_TO_SET(mmu,va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
-
- #define MMU_PAGE_PER_LINE 8
- #define MMU_WAY_NR 4
- #define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
-
-
-#define REG_MMU_CTRL_REG 0x110
- #define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
- #define F_MMU_CTRL_TLB_WALK_DIS(dis) F_BIT_VAL(dis, 1)
- #define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 2)
- #define F_MMU_CTRL_MONITOR_CLR(clr) F_BIT_VAL(clr, 3)
- #define F_MMU_CTRL_PFH_RT_RPL_MODE(mod) F_BIT_VAL(mod, 4)
- #define F_MMU_CTRL_TF_PROT_VAL(prot) F_VAL(prot, 6, 5)
- #define F_MMU_CTRL_TF_PROT_MSK F_MSK(6,5)
- #define F_MMU_CTRL_INT_HANG_en(en) F_BIT_VAL(en, 7)
- #define F_MMU_CTRL_COHERE_EN(en) F_BIT_VAL(en, 8)
- #define F_MMU_CTRL_IN_ORDER_WR(en) F_BIT_VAL(en, 9)
- #define F_MMU_CTRL_MAIN_TLB_SHARE_ALL(en) F_BIT_VAL(en, 10)
-
-
-#define REG_MMU_IVRP_PADDR 0x114
- #define F_MMU_IVRP_PA_SET(PA) (PA>>1)
- #define F_MMU_IVRP_8G_PA_SET(PA) ((PA>>1)|(1<<31))
-
-#define REG_MMU_INT_L2_CONTROL 0x120
- #define F_INT_L2_CLR_BIT (1<<12)
- #define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
- #define F_INT_L2_TABLE_WALK_FAULT F_BIT_SET(1)
- #define F_INT_L2_PFH_DMA_FIFO_OVERFLOW F_BIT_SET(2)
- #define F_INT_L2_MISS_DMA_FIFO_OVERFLOW F_BIT_SET(3)
- #define F_INT_L2_INVALD_DONE F_BIT_SET(4)
- #define F_INT_L2_PFH_IN_OUT_FIFO_ERROR F_BIT_SET(5)
- #define F_INT_L2_MISS_FIFO_ERR F_BIT_SET(6)
-
-#define REG_MMU_INT_MAIN_CONTROL 0x124
- #define F_INT_TRANSLATION_FAULT(MMU) F_BIT_SET(0+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
- #define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
-
- #define F_INT_MAU(mmu, set) F_BIT_SET(14+(set)+(mmu<<2)) //(14+(set)+(mmu*4))
-
- #define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
- #define F_INT_MMU1_MAIN_MSK F_MSK(13, 7)
- #define F_INT_MMU0_MAU_MSK F_MSK(17, 14)
- #define F_INT_MMU1_MAU_MSK F_MSK(21, 18)
-
-#define REG_MMU_CPE_DONE_SEC 0x128
-#define REG_MMU_CPE_DONE 0x12C
-
-#define REG_MMU_L2_FAULT_ST 0x130
- #define F_INT_L2_MISS_OUT_FIFO_ERROR F_BIT_SET(7)
- #define F_INT_L2_MISS_IN_FIFO_ERR F_BIT_SET(8)
-#define REG_MMU_MAIN_FAULT_ST 0x134
-
-#define REG_MMU_TBWALK_FAULT_VA 0x138
- #define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
- #define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
-
-#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
- #define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
- #define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
- #define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
-
-#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
-#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
-
-#define REG_MMU_PF_MSCNT 0x160
-#define REG_MMU_PF_CNT 0x164
-#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) //(0x168+((mmu)*12)
-#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
-#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
-
-#define MMU01_SQ_OFFSET (0x600-0x300)
-#define REG_MMU_SQ_START(mmu,x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
- #define F_SQ_VA_MASK F_MSK(31, 18)
- #define F_SQ_EN_BIT (1<<17)
- //#define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13)
-#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
-
-
-#define MMU_TOTAL_RS_NR 8
-#define REG_MMU_RSx_VA(mmu,x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
- #define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
- #define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
-
-#define REG_MMU_RSx_PA(mmu,x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
- #define F_MMU_RSx_PA_VALID(regval) F_MSK_SHIFT(regval, 1, 0)
-
-#define REG_MMU_RSx_2ND_BASE(mmu,x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
-
-#define REG_MMU_RSx_ST(mmu,x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 21, 20)
- #define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
- #define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
-
-#define REG_MMU_MAIN_TAG(mmu,x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
- #define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
- #define F_MAIN_TLB_LOCK_BIT (1<<11)
- #define F_MAIN_TLB_VALID_BIT (1<<10)
- #define F_MAIN_TLB_LAYER_BIT F_BIT_SET(9)
- #define F_MAIN_TLB_16X_BIT F_BIT_SET(8)
- #define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
- #define F_MAIN_TLB_INV_DES_BIT (1<<6)
- #define F_MAIN_TLB_SQ_EN_BIT (1<<5)
- #define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4,1)
- #define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
-
-
-#define REG_MMU_MAU_START(mmu,mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_START_BIT32(mmu,mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END(mmu,mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_END_BIT32(mmu,mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_PORT_EN(mmu,mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ID(mmu,mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR(mmu,mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
-#define REG_MMU_MAU_ADDR_BIT32(mmu,mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
-
-#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
- #define F_MAU_LARB_VAL(mau,larb) ((larb)<<(mau*8))
- #define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
-#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
-#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
- #define F_MAU_BIT_VAL(val, mau) F_BIT_VAL(val, mau)
-#define REG_MMU_MAU_RW(mmu) (0x98c+((mmu)*0xa0))
-#define REG_MMU_MAU_VA(mmu) (0x990+((mmu)*0xa0))
-#define REG_MMU_MAU_ASSERT_ST(mmu) (0x994+((mmu)*0xa0))
-
-#define REG_MMU_PFH_VLD_0 (0x180)
-#define REG_MMU_PFH_VLD(set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<4)) //+((set/32)*4)+(way*16)
- #define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) // set%32
-
-
-
-//================================================================
-// SMI larb
-//================================================================
-
-#define SMI_ERROR_ADDR 0
-#define SMI_LARB_NR 4
-
-#define SMI_LARB0_PORT_NUM 10
-#define SMI_LARB1_PORT_NUM 7
-#define SMI_LARB2_PORT_NUM 21
-#define SMI_LARB3_PORT_NUM 13
-
-#define SMI_LARB_STAT (0x0 )
-#define SMI_LARB_IRQ_EN (0x4 )
-#define SMI_LARB_IRQ_STATUS (0x8 )
-#define SMI_LARB_SLP_CON (0xc )
-#define SMI_LARB_CON (0x10 )
-#define SMI_LARB_CON_SET (0x14 )
-#define SMI_LARB_CON_CLR (0x18 )
-#define SMI_LARB_VC_PRI_MODE (0x20 )
-#define SMI_LARB_CMD_THRT_CON (0x24 )
-#define SMI_LARB_STARV_CON (0x28 )
-#define SMI_LARB_EMI_CON (0x2C )
-#define SMI_LARB_SHARE_EN (0x30 )
-#define SMI_LARB_BWL_EN (0x50 )
-#define SMI_LARB_BWL_SOFT_EN (0x54 )
-#define SMI_LARB_BWL_CON (0x58 )
-#define SMI_LARB_OSTDL_EN (0x60 )
-#define SMI_LARB_OSTDL_SOFT_EN (0x64 )
-#define SMI_LARB_ULTRA_DIS (0x70 )
-#define SMI_LARB_PREULTRA_DIS (0x74 )
-#define SMI_LARB_FORCE_ULTRA (0x78 )
-#define SMI_LARB_FORCE_PREULTRA (0x7c )
-#define SMI_LARB_MST_GRP_SEL_L (0x80 )
-#define SMI_LARB_MST_GRP_SEL_H (0x84 )
-#define SMI_LARB_INT_PATH_SEL (0x90 )
-#define SMI_LARB_EXT_GREQ_VIO (0xa0 )
-#define SMI_LARB_INT_GREQ_VIO (0xa4 )
-#define SMI_LARB_OSTD_UDF_VIO (0xa8 )
-#define SMI_LARB_OSTD_CRS_VIO (0xac )
-#define SMI_LARB_FIFO_STAT (0xb0 )
-#define SMI_LARB_BUS_STAT (0xb4 )
-#define SMI_LARB_CMD_THRT_STAT (0xb8 )
-#define SMI_LARB_MON_REQ (0xbc )
-#define SMI_LARB_REQ_MASK (0xc0 )
-#define SMI_LARB_REQ_DET (0xc4 )
-#define SMI_LARB_EXT_ONGOING (0xc8 )
-#define SMI_LARB_INT_ONGOING (0xcc )
-#define SMI_LARB_MISC_MON0 (0xd0 )
-#define SMI_LARB_DBG_CON (0xf0 )
-#define SMI_LARB_TST_MODE (0xf4 )
-#define SMI_LARB_WRR_PORT (0x100 )
-#define SMI_LARB_BWL_PORT (0x180 )
-#define SMI_LARB_OSTDL_PORT (0x200 )
-#define SMI_LARB_OSTD_MON_PORT (0x280 )
-#define SMI_LARB_PINFO (0x300 )
-#define SMI_LARB_MON_EN (0x400 )
-#define SMI_LARB_MON_CLR (0x404 )
-#define SMI_LARB_MON_PORT (0x408 )
-#define SMI_LARB_MON_CON (0x40c )
-#define SMI_LARB_MON_ACT_CNT (0x410 )
-#define SMI_LARB_MON_REQ_CNT (0x414 )
-#define SMI_LARB_MON_BEAT_CNT (0x418 )
-#define SMI_LARB_MON_BYTE_CNT (0x41c )
-#define SMI_LARB_MON_CP_CNT (0x420 )
-#define SMI_LARB_MON_DP_CNT (0x424 )
-#define SMI_LARB_MON_OSTD_CNT (0x428 )
-#define SMI_LARB_MON_CP_MAX (0x430 )
-#define SMI_LARB_MON_COS_MAX (0x434 )
-#define SMI_LARB_MMU_EN (0xf00 )
- #define F_SMI_MMU_EN(port, en) ((en)<<((port)))
- #define F_SMI_SEC_EN(port, en) ((en)<<((port)))
-#define REG_SMI_LARB_DOMN_OF_PORT(port) (((port)>15) ? 0xf0c : 0xf08)
- #define F_SMI_DOMN(port, domain) (((domain)&0x3)<<((((port)>15) ? (port-16) : port)<<1))
-
-
-
-
-/*
-#define SMI_SHARE_EN (0x210)
- #define F_SMI_SHARE_EN(port) F_BIT_SET(m4u_port_2_larb_port(port))
-#define SMI_ROUTE_SEL (0x220)
- #define F_SMI_ROUTE_SEL_EMI(port) F_BIT_SET(m4u_port_2_larb_port(port))
-#define SMI_MMULOCK_EN (0x230)
-*/
-
-
-/* ===============================================================
- * SMI COMMON
- * =============================================================== */
-
-#define REG_OFFSET_SMI_L1LEN (0x100)
-#define REG_OFFSET_SMI_L1ARB0 (0x104)
-#define REG_OFFSET_SMI_L1ARB1 (0x108)
-#define REG_OFFSET_SMI_L1ARB2 (0x10C)
-#define REG_OFFSET_SMI_L1ARB3 (0x110)
-#define REG_OFFSET_SMI_L1ARB4 (0x114)
-
-/*
-#define REG_SMI_MON_AXI_ENA (0x1a0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CLR (0x1a4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_TYPE (0x1ac+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CON (0x1b0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_ACT_CNT (0x1c0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_REQ_CNT (0x1c4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_OSTD_CNT (0x1c8+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_BEA_CNT (0x1cc+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_BYT_CNT (0x1d0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CP_CNT (0x1d4+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_DP_CNT (0x1d8+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_CP_MAX (0x1dc+SMI_COMMON_EXT_BASE)
-#define REG_SMI_MON_AXI_COS_MAX (0x1e0+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1LEN (0x200+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB0 (0x204+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB1 (0x208+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB2 (0x20C+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB3 (0x210+SMI_COMMON_EXT_BASE)
-#define REG_SMI_L1ARB4 (0x214+SMI_COMMON_EXT_BASE)
-#define REG_SMI_BUS_SEL (0x220+SMI_COMMON_EXT_BASE)
- #define F_SMI_BUS_SEL_larb0(mmu_idx) F_VAL(mmu_idx, 1, 0)
- #define F_SMI_BUS_SEL_larb1(mmu_idx) F_VAL(mmu_idx, 3, 2)
- #define F_SMI_BUS_SEL_larb2(mmu_idx) F_VAL(mmu_idx, 5, 4)
- #define F_SMI_BUS_SEL_larb3(mmu_idx) F_VAL(mmu_idx, 7, 6)
- #define F_SMI_BUS_SEL_larb4(mmu_idx) F_VAL(mmu_idx, 9, 8)
-#define REG_SMI_WRR_REG0 (0x228+SMI_COMMON_EXT_BASE)
-#define REG_SMI_READ_FIFO_TH (0x230+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_M4U_TH (0x234+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_FIFO2_TH (0x238+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_PREULTRA_MASK0 (0x23c+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_PREULTRA_MASK1 (0x240+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DCM (0x300+SMI_COMMON_EXT_BASE)
-#define REG_SMI_SMI_ELA (0x304+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG0 (0x400+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG1 (0x404+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DEBUG2 (0x408+SMI_COMMON_EXT_BASE)
-#define REG_SMI_DUMMY (0x418+SMI_COMMON_EXT_BASE)
-
-*/
-
-//=========================================================================
-// peripheral system
-//=========================================================================
-#define REG_PERIAXI_BUS_CTL3 (0x208+0xf0003000)
- #define F_PERI_MMU_EN(port, en) ((en)<<((port)))
-
-
-static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned long Offset)
-{
- unsigned int val;
- val = ioread32((void*)(M4uBase+Offset));
-
- //printk("read base=0x%x, reg=0x%x, val=0x%x\n",M4uBase,Offset,val );
- return val;
-}
-static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned long Offset, unsigned int Val)
-{
- //unsigned int read;
- iowrite32(Val, (void*)(M4uBase+Offset));
- mb();
- /*
- read = M4U_ReadReg32(M4uBase, Offset);
- if(read != Val)
- {
- printk("error to write base=0x%x, reg=0x%x, val=0x%x, read=0x%x\n",M4uBase,Offset, Val, read );
- }
- else
- {
- printk("write base=0x%x, reg=0x%x, val=0x%x, read=0x%x\n",M4uBase,Offset, Val, read );
- }
-*/
-
-}
-
-static inline unsigned int COM_ReadReg32(unsigned long addr)
-{
- return ioread32((void *)addr);
-}
-
-static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
-{
- iowrite32(Val, (void *)addr);
- mb();
-}
-
-
-extern unsigned long smi_reg_base_common_ext;
-extern unsigned long smi_reg_base_barb0;
-extern unsigned long smi_reg_base_barb1;
-extern unsigned long smi_reg_base_barb2;
-extern unsigned long smi_reg_base_barb3;
-
-
-#endif //_MT6753_SMI_REG_H__
-
diff --git a/drivers/misc/mediatek/smi/smi_common.c b/drivers/misc/mediatek/smi/smi_common.c
new file mode 100644
index 000000000..7237b2638
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_common.c
@@ -0,0 +1,2003 @@
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kobject.h>
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/aee.h>
+
+/* Define SMI_INTERNAL_CCF_SUPPORT when CCF needs to be enabled */
+#if !defined(CONFIG_MTK_LEGACY)
+#define SMI_INTERNAL_CCF_SUPPORT
+#endif
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+#include <linux/clk.h>
+/* for ccf clk CB */
+#if defined(SMI_D1)
+#include "clk-mt6735-pg.h"
+#elif defined(SMI_J)
+#include "clk-mt6755-pg.h"
+#endif
+/* notify clk is enabled/disabled for m4u*/
+#include "m4u.h"
+#else
+#include <mach/mt_clkmgr.h>
+#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+#include <asm/io.h>
+
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#endif
+
+#include <mach/mt_smi.h>
+
+
+#include "smi_reg.h"
+#include "smi_common.h"
+#include "smi_debug.h"
+#include "smi_info_util.h"
+#include "smi_configuration.h"
+#if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
+#include "mmdvfs_mgr.h"
+#endif
+#undef pr_fmt
+#define pr_fmt(fmt) "[SMI]" fmt
+
+#define SMI_LOG_TAG "SMI"
+
+#define LARB_BACKUP_REG_SIZE 128
+#define SMI_COMMON_BACKUP_REG_NUM 7
+
+#define SF_HWC_PIXEL_MAX_NORMAL (1920 * 1080 * 7)
+#define SF_HWC_PIXEL_MAX_VR (1920 * 1080 * 4 + 1036800) /* 4.5 FHD size */
+#define SF_HWC_PIXEL_MAX_VP (1920 * 1080 * 7)
+#define SF_HWC_PIXEL_MAX_ALWAYS_GPU (1920 * 1080 * 1)
+
+/* debug level */
+static unsigned int smi_debug_level;
+
+#define SMIDBG(level, x...) \
+ do { \
+ if (smi_debug_level >= (level)) \
+ SMIMSG(x); \
+ } while (0)
+
+#define DEFINE_ATTR_RO(_name)\
+ static struct kobj_attribute _name##_attr = {\
+ .attr = {\
+ .name = #_name,\
+ .mode = 0444,\
+ },\
+ .show = _name##_show,\
+ }
+
+#define DEFINE_ATTR_RW(_name)\
+ static struct kobj_attribute _name##_attr = {\
+ .attr = {\
+ .name = #_name,\
+ .mode = 0644,\
+ },\
+ .show = _name##_show,\
+ .store = _name##_store,\
+ }
+
+#define __ATTR_OF(_name) (&_name##_attr.attr)
+
+struct SMI_struct {
+ spinlock_t SMI_lock;
+ unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT]; /* one bit represent one module */
+};
+
+static struct SMI_struct g_SMIInfo;
+
+/* LARB BASE ADDRESS */
+unsigned long gLarbBaseAddr[SMI_LARB_NR] = { 0 };
+
+/* DT porting */
+unsigned long smi_reg_base_common_ext = 0;
+unsigned long smi_reg_base_barb0 = 0;
+unsigned long smi_reg_base_barb1 = 0;
+unsigned long smi_reg_base_barb2 = 0;
+unsigned long smi_reg_base_barb3 = 0;
+
+
+
+
+char *smi_get_region_name(unsigned int region_indx);
+
+
+static struct smi_device *smi_dev;
+
+static struct device *smiDeviceUevent;
+
+static struct cdev *pSmiDev;
+
+#define SMI_COMMON_REG_INDX 0
+#define SMI_LARB0_REG_INDX 1
+#define SMI_LARB1_REG_INDX 2
+#define SMI_LARB2_REG_INDX 3
+#define SMI_LARB3_REG_INDX 4
+
+#if defined(SMI_D2)
+#define SMI_REG_REGION_MAX 4
+
+static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
+ SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM
+};
+
+static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 1 };
+
+static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
+static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
+static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
+
+static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
+ larb0_port_backup, larb1_port_backup, larb2_port_backup
+};
+
+
+#elif defined(SMI_D1)
+#define SMI_REG_REGION_MAX 5
+
+static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
+ SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM
+};
+
+static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 0, 1 };
+
+static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
+static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
+static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
+static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
+static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
+ larb0_port_backup, larb1_port_backup, larb2_port_backup, larb3_port_backup
+};
+
+
+#elif defined(SMI_D3)
+#define SMI_REG_REGION_MAX 5
+
+static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
+ SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM
+};
+
+static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
+static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
+static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
+static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
+
+static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2, 1, 1 };
+
+static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
+ larb0_port_backup, larb1_port_backup, larb2_port_backup, larb3_port_backup
+};
+#elif defined(SMI_R)
+
+#define SMI_REG_REGION_MAX 3
+
+static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
+ SMI_LARB1_PORT_NUM
+};
+
+static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
+static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
+
+static unsigned char larb_vc_setting[SMI_LARB_NR] = { 0, 2 };
+
+static unsigned short int *larb_port_backup[SMI_LARB_NR] = {
+ larb0_port_backup, larb1_port_backup
+};
+
+#elif defined(SMI_J)
+#define SMI_REG_REGION_MAX 5
+
+
+static const unsigned int larb_port_num[SMI_LARB_NR] = { SMI_LARB0_PORT_NUM,
+ SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM
+};
+
+static unsigned short int larb0_port_backup[SMI_LARB0_PORT_NUM];
+static unsigned short int larb1_port_backup[SMI_LARB1_PORT_NUM];
+static unsigned short int larb2_port_backup[SMI_LARB2_PORT_NUM];
+static unsigned short int larb3_port_backup[SMI_LARB3_PORT_NUM];
+
+static unsigned char larb_vc_setting[SMI_LARB_NR] = { 1, 2, 1, 1 };
+
+static unsigned short int *larb_port_backup[SMI_LARB_NR] = { larb0_port_backup,
+ larb1_port_backup, larb2_port_backup, larb3_port_backup
+};
+#endif
+
+static unsigned long gSMIBaseAddrs[SMI_REG_REGION_MAX];
+
+/* SMI COMMON register list to be backuped */
+static unsigned short g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = { 0x100, 0x104,
+ 0x108, 0x10c, 0x110, 0x230, 0x234
+};
+
+static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
+struct smi_device {
+ struct device *dev;
+ void __iomem *regs[SMI_REG_REGION_MAX];
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ struct clk *smi_common_clk;
+ struct clk *smi_larb0_clk;
+ struct clk *img_larb2_clk;
+ struct clk *vdec0_vdec_clk;
+ struct clk *vdec1_larb_clk;
+ struct clk *venc_larb_clk;
+ struct clk *venc_venc_clk;
+ struct clk *larb0_mtcmos;
+ struct clk *larb1_mtcmos;
+ struct clk *larb2_mtcmos;
+ struct clk *larb3_mtcmos;
+#endif
+};
+
+
+/* To keep the HW's init value */
+static int is_default_value_saved;
+static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
+
+static unsigned int wifi_disp_transaction;
+
+
+/* larb backuprestore */
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+static bool fglarbcallback;
+#endif
+/* tuning mode, 1 for register ioctl */
+static unsigned int smi_tuning_mode;
+#if defined(SMI_J)
+static unsigned int disable_freq_hopping;
+static unsigned int disable_freq_mux = 1;
+static unsigned int force_max_mmsys_clk;
+static unsigned int force_camera_hpm;
+#endif
+static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
+
+static unsigned int *pLarbRegBackUp[SMI_LARB_NR];
+static int g_bInited;
+
+MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = {
+ 0, 0, {0, 0}, {0, 0}, {0, 0}, {0, 0}, 0, 0, 0,
+ SF_HWC_PIXEL_MAX_NORMAL
+};
+
+char *smi_port_name[][21] = {
+ { /* 0 MMSYS */
+ "disp_ovl0", "disp_rdma0", "disp_rdma1", "disp_wdma0", "disp_ovl1",
+ "disp_rdma2", "disp_wdma1", "disp_od_r", "disp_od_w", "mdp_rdma0",
+ "mdp_rdma1", "mdp_wdma", "mdp_wrot0", "mdp_wrot1"},
+ { /* 1 VDEC */ "hw_vdec_mc_ext", "hw_vdec_pp_ext", "hw_vdec_ufo_ext", "hw_vdec_vld_ext",
+ "hw_vdec_vld2_ext", "hw_vdec_avc_mv_ext", "hw_vdec_pred_rd_ext",
+ "hw_vdec_pred_wr_ext", "hw_vdec_ppwrap_ext"},
+ { /* 2 ISP */ "imgo", "rrzo", "aao", "lcso", "esfko", "imgo_d", "lsci", "lsci_d", "bpci",
+ "bpci_d", "ufdi", "imgi", "img2o", "img3o", "vipi", "vip2i", "vip3i",
+ "lcei", "rb", "rp", "wr"},
+ { /* 3 VENC */ "venc_rcpu", "venc_rec", "venc_bsdma", "venc_sv_comv", "venc_rd_comv",
+ "jpgenc_bsdma", "remdc_sdma", "remdc_bsdma", "jpgenc_rdma", "jpgenc_sdma",
+ "jpgdec_wdma", "jpgdec_bsdma", "venc_cur_luma", "venc_cur_chroma",
+ "venc_ref_luma", "venc_ref_chroma", "remdc_wdma", "venc_nbm_rdma",
+ "venc_nbm_wdma"},
+ { /* 4 MJC */ "mjc_mv_rd", "mjc_mv_wr", "mjc_dma_rd", "mjc_dma_wr"}
+};
+
+
+
+static unsigned long get_register_base(int i);
+
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+static struct clk *get_smi_clk(char *smi_clk_name);
+#endif
+
+#if IS_ENABLED(CONFIG_COMPAT)
+static long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+#define MTK_SMI_COMPAT_ioctl NULL
+#endif
+
+
+/* Use this function to get base address of Larb resgister */
+/* to support error checking */
+unsigned long get_larb_base_addr(int larb_id)
+{
+ if (larb_id >= SMI_LARB_NR || larb_id < 0)
+ return SMI_ERROR_ADDR;
+ else
+ return gLarbBaseAddr[larb_id];
+
+}
+
+/* 0 for common, 1 for larb0, 2 for larb1... */
+unsigned long get_smi_base_addr(int larb_id)
+{
+ if (larb_id >= SMI_LARB_NR || larb_id < 0)
+ return SMI_ERROR_ADDR;
+ else
+ return gSMIBaseAddrs[larb_id];
+}
+EXPORT_SYMBOL(get_smi_base_addr);
+
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+struct clk *get_smi_clk(char *smi_clk_name)
+{
+ struct clk *smi_clk_ptr = NULL;
+
+ smi_clk_ptr = devm_clk_get(smi_dev->dev, smi_clk_name);
+ if (IS_ERR(smi_clk_ptr)) {
+ SMIMSG("cannot get %s\n", smi_clk_name);
+ smi_clk_ptr = NULL;
+ }
+ return smi_clk_ptr;
+}
+
+static void smi_prepare_clk(struct clk *smi_clk, char *name)
+{
+ if (smi_clk != NULL) {
+ int ret = 0;
+
+ ret = clk_prepare(smi_clk);
+ if (ret)
+ SMIMSG("clk_prepare return error %d, %s\n", ret, name);
+ } else {
+ SMIMSG("clk_prepare error, smi_clk can't be NULL, %s\n", name);
+ }
+}
+
+static void smi_enable_clk(struct clk *smi_clk, char *name)
+{
+ if (smi_clk != NULL) {
+ int ret = 0;
+
+ ret = clk_enable(smi_clk);
+ if (ret)
+ SMIMSG("clk_enable return error %d, %s\n", ret, name);
+ } else {
+ SMIMSG("clk_enable error, smi_clk can't be NULL, %s\n", name);
+ }
+}
+
+static void smi_unprepare_clk(struct clk *smi_clk, char *name)
+{
+ if (smi_clk != NULL)
+ clk_unprepare(smi_clk);
+ else
+ SMIMSG("smi_unprepare error, smi_clk can't be NULL, %s\n", name);
+}
+
+static void smi_disable_clk(struct clk *smi_clk, char *name)
+{
+ if (smi_clk != NULL)
+ clk_disable(smi_clk);
+ else
+ SMIMSG("smi_disable error, smi_clk can't be NULL, %s\n", name);
+}
+
+/* end MTCMOS*/
+#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+static int larb_clock_enable(int larb_id, int enable_mtcmos)
+{
+#if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING)
+ char name[30];
+
+ sprintf(name, "smi+%d", larb_id);
+
+
+ switch (larb_id) {
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+ case 0:
+ enable_clock(MT_CG_DISP0_SMI_COMMON, name);
+ enable_clock(MT_CG_DISP0_SMI_LARB0, name);
+ break;
+ case 1:
+ enable_clock(MT_CG_DISP0_SMI_COMMON, name);
+#if defined(SMI_R)
+ enable_clock(MT_CG_LARB1_SMI_CKPDN, name);
+#else
+ enable_clock(MT_CG_VDEC1_LARB, name);
+#endif
+ break;
+ case 2:
+#if !defined(SMI_R)
+ enable_clock(MT_CG_DISP0_SMI_COMMON, name);
+ enable_clock(MT_CG_IMAGE_LARB2_SMI, name);
+#endif
+ break;
+ case 3:
+ enable_clock(MT_CG_DISP0_SMI_COMMON, name);
+#if defined(SMI_D1)
+ enable_clock(MT_CG_VENC_LARB, name);
+#elif defined(SMI_D3)
+ enable_clock(MT_CG_VENC_VENC, name);
+#endif
+ break;
+#else
+ case 0:
+ if (enable_mtcmos)
+ smi_enable_clk(smi_dev->larb0_mtcmos, name);
+ smi_enable_clk(smi_dev->smi_common_clk, name);
+ smi_enable_clk(smi_dev->smi_larb0_clk, name);
+ break;
+ case 1:
+ if (enable_mtcmos) {
+ smi_enable_clk(smi_dev->larb0_mtcmos, name);
+ smi_enable_clk(smi_dev->larb1_mtcmos, name);
+ }
+ smi_enable_clk(smi_dev->smi_common_clk, name);
+ smi_enable_clk(smi_dev->vdec1_larb_clk, name);
+ break;
+ case 2:
+ if (enable_mtcmos) {
+ smi_enable_clk(smi_dev->larb0_mtcmos, name);
+ smi_enable_clk(smi_dev->larb2_mtcmos, name);
+ }
+ smi_enable_clk(smi_dev->smi_common_clk, name);
+ smi_enable_clk(smi_dev->img_larb2_clk, name);
+ break;
+ case 3:
+ if (enable_mtcmos) {
+ smi_enable_clk(smi_dev->larb0_mtcmos, name);
+ smi_enable_clk(smi_dev->larb3_mtcmos, name);
+ }
+ smi_enable_clk(smi_dev->smi_common_clk, name);
+#if defined(SMI_D1)
+ smi_enable_clk(smi_dev->venc_larb_clk, name);
+#elif defined(SMI_J)
+ smi_enable_clk(smi_dev->venc_venc_clk, name);
+#endif
+ break;
+#endif
+ default:
+ break;
+ }
+#endif
+ return 0;
+}
+
+static int larb_clock_prepare(int larb_id, int enable_mtcmos)
+{
+#if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING) && defined(SMI_INTERNAL_CCF_SUPPORT)
+ char name[30];
+
+ sprintf(name, "smi+%d", larb_id);
+
+ switch (larb_id) {
+ case 0:
+ /* must enable MTCOMS before clk */
+ /* common MTCMOS is called with larb0_MTCMOS */
+ if (enable_mtcmos)
+ smi_prepare_clk(smi_dev->larb0_mtcmos, name);
+ smi_prepare_clk(smi_dev->smi_common_clk, name);
+ smi_prepare_clk(smi_dev->smi_larb0_clk, name);
+ break;
+ case 1:
+ if (enable_mtcmos) {
+ smi_prepare_clk(smi_dev->larb0_mtcmos, name);
+ smi_prepare_clk(smi_dev->larb1_mtcmos, name);
+ }
+ smi_prepare_clk(smi_dev->smi_common_clk, name);
+ smi_prepare_clk(smi_dev->vdec1_larb_clk, name);
+ break;
+ case 2:
+ if (enable_mtcmos) {
+ smi_prepare_clk(smi_dev->larb0_mtcmos, name);
+ smi_prepare_clk(smi_dev->larb2_mtcmos, name);
+ }
+ smi_prepare_clk(smi_dev->smi_common_clk, name);
+ smi_prepare_clk(smi_dev->img_larb2_clk, name);
+ break;
+ case 3:
+ if (enable_mtcmos) {
+ smi_prepare_clk(smi_dev->larb0_mtcmos, name);
+ smi_prepare_clk(smi_dev->larb3_mtcmos, name);
+ }
+ smi_prepare_clk(smi_dev->smi_common_clk, name);
+#if defined(SMI_D1)
+ smi_prepare_clk(smi_dev->venc_larb_clk, name);
+#elif defined(SMI_J)
+ smi_prepare_clk(smi_dev->venc_venc_clk, name);
+#endif
+ break;
+ default:
+ break;
+ }
+#endif
+ return 0;
+}
+
+static int larb_clock_disable(int larb_id, int enable_mtcmos)
+{
+#if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING)
+ char name[30];
+
+ sprintf(name, "smi+%d", larb_id);
+
+ switch (larb_id) {
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+ case 0:
+ disable_clock(MT_CG_DISP0_SMI_LARB0, name);
+ disable_clock(MT_CG_DISP0_SMI_COMMON, name);
+ break;
+ case 1:
+#if defined(SMI_R)
+ disable_clock(MT_CG_LARB1_SMI_CKPDN, name);
+#else
+ disable_clock(MT_CG_VDEC1_LARB, name);
+#endif
+ disable_clock(MT_CG_DISP0_SMI_COMMON, name);
+ break;
+ case 2:
+#if !defined(SMI_R)
+ disable_clock(MT_CG_IMAGE_LARB2_SMI, name);
+ disable_clock(MT_CG_DISP0_SMI_COMMON, name);
+#endif
+ break;
+ case 3:
+#if defined(SMI_D1)
+ disable_clock(MT_CG_VENC_LARB, name);
+#elif defined(SMI_D3)
+ disable_clock(MT_CG_VENC_VENC, name);
+#endif
+ disable_clock(MT_CG_DISP0_SMI_COMMON, name);
+ break;
+#else
+ case 0:
+ smi_disable_clk(smi_dev->smi_larb0_clk, name);
+ smi_disable_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos)
+ smi_disable_clk(smi_dev->larb0_mtcmos, name);
+ break;
+ case 1:
+ smi_disable_clk(smi_dev->vdec1_larb_clk, name);
+ smi_disable_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos) {
+ smi_disable_clk(smi_dev->larb1_mtcmos, name);
+ smi_disable_clk(smi_dev->larb0_mtcmos, name);
+ }
+ break;
+ case 2:
+ smi_disable_clk(smi_dev->img_larb2_clk, name);
+ smi_disable_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos) {
+ smi_disable_clk(smi_dev->larb2_mtcmos, name);
+ smi_disable_clk(smi_dev->larb0_mtcmos, name);
+ }
+ break;
+ case 3:
+#if defined(SMI_D1)
+ smi_disable_clk(smi_dev->venc_larb_clk, name);
+#elif defined(SMI_J)
+ smi_disable_clk(smi_dev->venc_venc_clk, name);
+#endif
+ smi_disable_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos) {
+ smi_disable_clk(smi_dev->larb3_mtcmos, name);
+ smi_disable_clk(smi_dev->larb0_mtcmos, name);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+#endif
+ return 0;
+}
+
+static int larb_clock_unprepare(int larb_id, int enable_mtcmos)
+{
+#if !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING) && defined(SMI_INTERNAL_CCF_SUPPORT)
+ char name[30];
+
+ sprintf(name, "smi+%d", larb_id);
+
+ switch (larb_id) {
+ case 0:
+ /* must enable MTCOMS before clk */
+ /* common MTCMOS is called with larb0_MTCMOS */
+ smi_unprepare_clk(smi_dev->smi_larb0_clk, name);
+ smi_unprepare_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos)
+ smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
+ break;
+ case 1:
+ smi_unprepare_clk(smi_dev->vdec1_larb_clk, name);
+ smi_unprepare_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos) {
+ smi_unprepare_clk(smi_dev->larb1_mtcmos, name);
+ smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
+ }
+ break;
+ case 2:
+ smi_unprepare_clk(smi_dev->img_larb2_clk, name);
+ smi_unprepare_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos) {
+ smi_unprepare_clk(smi_dev->larb2_mtcmos, name);
+ smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
+ }
+ break;
+ case 3:
+#if defined(SMI_D1)
+ smi_unprepare_clk(smi_dev->venc_larb_clk, name);
+#elif defined(SMI_J)
+ smi_unprepare_clk(smi_dev->venc_venc_clk, name);
+#endif
+ smi_unprepare_clk(smi_dev->smi_common_clk, name);
+ if (enable_mtcmos) {
+ smi_unprepare_clk(smi_dev->larb3_mtcmos, name);
+ smi_unprepare_clk(smi_dev->larb0_mtcmos, name);
+ }
+ break;
+
+ default:
+ break;
+ }
+#endif
+ return 0;
+}
+
+static void backup_smi_common(void)
+{
+ int i;
+
+ for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
+ g_smi_common_backup[i] = M4U_ReadReg32(SMI_COMMON_EXT_BASE, (unsigned long)
+ g_smi_common_backup_reg_offset[i]);
+ }
+}
+
+static void restore_smi_common(void)
+{
+ int i;
+
+ for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE,
+ (unsigned long)g_smi_common_backup_reg_offset[i],
+ g_smi_common_backup[i]);
+ }
+}
+
+static void backup_larb_smi(int index)
+{
+ int port_index = 0;
+ unsigned short int *backup_ptr = NULL;
+ unsigned long larb_base = 0;
+ unsigned long larb_offset = 0x200;
+ int total_port_num = 0;
+
+ /* boundary check for larb_port_num and larb_port_backup access */
+ if (index < 0 || index >= SMI_LARB_NR)
+ return;
+
+ larb_base = gLarbBaseAddr[index];
+ total_port_num = larb_port_num[index];
+ backup_ptr = larb_port_backup[index];
+
+ /* boundary check for port value access */
+ if (total_port_num <= 0 || backup_ptr == NULL)
+ return;
+
+ for (port_index = 0; port_index < total_port_num; port_index++) {
+ *backup_ptr = (unsigned short int)(M4U_ReadReg32(larb_base, larb_offset));
+ backup_ptr++;
+ larb_offset += 4;
+ }
+
+ /* backup smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
+ if (index == 0)
+ backup_smi_common();
+
+}
+
+static void restore_larb_smi(int index)
+{
+ int port_index = 0;
+ unsigned short int *backup_ptr = NULL;
+ unsigned long larb_base = 0;
+ unsigned long larb_offset = 0x200;
+ unsigned int backup_value = 0;
+ int total_port_num = 0;
+
+ /* boundary check for larb_port_num and larb_port_backup access */
+ if (index < 0 || index >= SMI_LARB_NR)
+ return;
+
+ larb_base = gLarbBaseAddr[index];
+ total_port_num = larb_port_num[index];
+ backup_ptr = larb_port_backup[index];
+
+ /* boundary check for port value access */
+ if (total_port_num <= 0 || backup_ptr == NULL)
+ return;
+
+ /* restore smi common along with larb0, smi common clk is guaranteed to be on when processing larbs */
+ if (index == 0)
+ restore_smi_common();
+
+ for (port_index = 0; port_index < total_port_num; port_index++) {
+ backup_value = *backup_ptr;
+ M4U_WriteReg32(larb_base, larb_offset, backup_value);
+ backup_ptr++;
+ larb_offset += 4;
+ }
+
+ /* we do not backup 0x20 because it is a fixed setting */
+ M4U_WriteReg32(larb_base, 0x20, larb_vc_setting[index]);
+
+ /* turn off EMI empty OSTD dobule, fixed setting */
+ M4U_WriteReg32(larb_base, 0x2c, 4);
+
+}
+
+static int larb_reg_backup(int larb)
+{
+ unsigned int *pReg = pLarbRegBackUp[larb];
+ unsigned long larb_base = gLarbBaseAddr[larb];
+
+ *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
+
+ backup_larb_smi(larb);
+
+ if (0 == larb)
+ g_bInited = 0;
+
+ return 0;
+}
+
+static int smi_larb_init(unsigned int larb)
+{
+ unsigned int regval = 0;
+ unsigned int regval1 = 0;
+ unsigned int regval2 = 0;
+ unsigned long larb_base = get_larb_base_addr(larb);
+
+ /* Clock manager enable LARB clock before call back restore already,
+ * it will be disabled after restore call back returns
+ * Got to enable OSTD before engine starts */
+ regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
+
+ /* TODO: FIX ME */
+ /* regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0); */
+ /* regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1); */
+
+ if (0 == regval) {
+ SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
+ M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
+ } else {
+ SMIMSG("Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n",
+ larb_base, regval, regval1, regval2);
+ if (smi_debug_level >= 1) {
+ SMIERR("DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
+ larb_base, regval, regval1, regval2);
+ } else {
+ dump_stack();
+ }
+ }
+
+ restore_larb_smi(larb);
+
+ return 0;
+}
+
+int larb_reg_restore(int larb)
+{
+ unsigned long larb_base = SMI_ERROR_ADDR;
+ unsigned int regval = 0;
+ unsigned int *pReg = NULL;
+
+ larb_base = get_larb_base_addr(larb);
+
+ /* The larb assign doesn't exist */
+ if (larb_base == SMI_ERROR_ADDR) {
+ SMIMSG("Can't find the base address for Larb%d\n", larb);
+ return 0;
+ }
+
+ if (larb >= SMI_LARB_NR || larb < 0) {
+ SMIMSG("Can't find the backup register value for Larb%d\n", larb);
+ return 0;
+ }
+
+ pReg = pLarbRegBackUp[larb];
+
+ SMIDBG(1, "+larb_reg_restore(), larb_idx=%d\n", larb);
+ SMIDBG(1, "m4u part restore, larb_idx=%d\n", larb);
+ /* warning: larb_con is controlled by set/clr */
+ regval = *(pReg++);
+ M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
+ M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
+
+ smi_larb_init(larb);
+
+ return 0;
+}
+
+/* callback after larb clock is enabled */
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+void on_larb_power_on(struct larb_monitor *h, int larb_idx)
+{
+ larb_reg_restore(larb_idx);
+}
+
+/* callback before larb clock is disabled */
+void on_larb_power_off(struct larb_monitor *h, int larb_idx)
+{
+ larb_reg_backup(larb_idx);
+}
+#endif /* !defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+void on_larb_power_on_with_ccf(int larb_idx)
+{
+ /* MTCMOS has already enable, only enable clk here to set register value */
+ larb_clock_prepare(larb_idx, 0);
+ larb_clock_enable(larb_idx, 0);
+ larb_reg_restore(larb_idx);
+ larb_clock_disable(larb_idx, 0);
+ larb_clock_unprepare(larb_idx, 0);
+}
+
+void on_larb_power_off_with_ccf(int larb_idx)
+{
+ /* enable clk here for get correct register value */
+ larb_clock_prepare(larb_idx, 0);
+ larb_clock_enable(larb_idx, 0);
+ larb_reg_backup(larb_idx);
+ larb_clock_disable(larb_idx, 0);
+ larb_clock_unprepare(larb_idx, 0);
+}
+#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+#if defined(SMI_J)
+static void DCM_enable(void)
+{
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x300, 0x1 + (0x78 << 1) + (0x4 << 8));
+ M4U_WriteReg32(LARB0_BASE, 0x14, (0x7 << 8) + (0xf << 4));
+ M4U_WriteReg32(LARB1_BASE, 0x14, (0x7 << 8) + (0xf << 4));
+ M4U_WriteReg32(LARB2_BASE, 0x14, (0x7 << 8) + (0xf << 4));
+ M4U_WriteReg32(LARB3_BASE, 0x14, (0x7 << 8) + (0xf << 4));
+
+}
+#endif
+
+
+/* Fake mode check, e.g. WFD */
+static int fake_mode_handling(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
+{
+ if (p_conf->scenario == SMI_BWC_SCEN_WFD) {
+ if (p_conf->b_on_off) {
+ wifi_disp_transaction = 1;
+ SMIMSG("Enable WFD in profile: %d\n", smi_profile);
+ } else {
+ wifi_disp_transaction = 0;
+ SMIMSG("Disable WFD in profile: %d\n", smi_profile);
+ }
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static int ovl_limit_uevent(int bwc_scenario, int ovl_pixel_limit)
+{
+ int err = 0;
+ char *envp[3];
+ char scenario_buf[32] = "";
+ char ovl_limit_buf[32] = "";
+
+ snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
+ snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
+
+ envp[0] = scenario_buf;
+ envp[1] = ovl_limit_buf;
+ envp[2] = NULL;
+
+ if (pSmiDev != NULL) {
+ err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
+ SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit, bwc_scenario);
+ }
+
+ if (err < 0)
+ SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
+
+ return err;
+}
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+static unsigned int smiclk_subsys_2_larb(enum subsys_id sys)
+{
+ unsigned int i4larbid = 0;
+
+ switch (sys) {
+ case SYS_DIS:
+ i4larbid = 0; /*0&4 is disp */
+ break;
+ case SYS_VDE:
+ i4larbid = 1;
+ break;
+ case SYS_ISP:
+ i4larbid = 2;
+ break;
+ case SYS_VEN:
+ i4larbid = 3;
+ break;
+ default:
+ i4larbid = SMI_LARB_NR;
+ break;
+ }
+ return i4larbid;
+}
+
+static void smiclk_subsys_after_on(enum subsys_id sys)
+{
+ unsigned int i4larbid = smiclk_subsys_2_larb(sys);
+
+ if (!fglarbcallback) {
+ SMIDBG(1, "don't need restore incb\n");
+ return;
+ }
+
+ if (i4larbid < SMI_LARB_NR) {
+ on_larb_power_on_with_ccf(i4larbid);
+#if defined(SMI_D1)
+ /* inform m4u to restore register value */
+ m4u_larb_backup((int)i4larbid);
+#endif
+ } else {
+ SMIDBG(1, "subsys id don't backup sys %d larb %u\n", sys, i4larbid);
+ }
+}
+
+static void smiclk_subsys_before_off(enum subsys_id sys)
+{
+ unsigned int i4larbid = smiclk_subsys_2_larb(sys);
+
+ if (!fglarbcallback) {
+ SMIDBG(1, "don't need backup incb\n");
+ return;
+ }
+
+ if (i4larbid < SMI_LARB_NR) {
+ on_larb_power_off_with_ccf(i4larbid);
+#if defined(SMI_D1)
+ /* inform m4u to backup register value */
+ m4u_larb_restore((int)i4larbid);
+#endif
+ } else {
+ SMIDBG(1, "subsys id don't restore sys %d larb %d\n", sys, i4larbid);
+ }
+
+}
+
+struct pg_callbacks smi_clk_subsys_handle = {
+ .before_off = smiclk_subsys_before_off,
+ .after_on = smiclk_subsys_after_on
+};
+#endif
+
+static int smi_bwc_config(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
+{
+ int i;
+ int result = 0;
+ unsigned int u4Concurrency = 0;
+ MTK_SMI_BWC_SCEN eFinalScen;
+ static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
+
+ if (smi_tuning_mode == 1) {
+ SMIMSG("Doesn't change profile in tunning mode");
+ return 0;
+ }
+
+
+ if ((SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario)) {
+ SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n", p_conf->scenario);
+ return -1;
+ }
+#if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
+ if (p_conf->b_on_off) {
+ /* set mmdvfs step according to certain scenarios */
+ mmdvfs_notify_scenario_enter(p_conf->scenario);
+ } else {
+ /* set mmdvfs step to default after the scenario exits */
+ mmdvfs_notify_scenario_exit(p_conf->scenario);
+ }
+#endif
+
+ spin_lock(&g_SMIInfo.SMI_lock);
+ result = fake_mode_handling(p_conf, pu4LocalCnt);
+ spin_unlock(&g_SMIInfo.SMI_lock);
+
+ /* Fake mode is not a real SMI profile, so we need to return here */
+ if (result == 1)
+ return 0;
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ /* prepare larb clk because prepare cannot in spinlock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ larb_clock_prepare(i, 1);
+#endif
+ spin_lock(&g_SMIInfo.SMI_lock);
+
+ if (p_conf->b_on_off) {
+ /* turn on certain scenario */
+ g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
+
+ if (NULL != pu4LocalCnt)
+ pu4LocalCnt[p_conf->scenario] += 1;
+
+ } else {
+ /* turn off certain scenario */
+ if (0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]) {
+ SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
+ p_conf->scenario, g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
+ } else {
+ g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
+ }
+
+ if (NULL != pu4LocalCnt) {
+ if (0 == pu4LocalCnt[p_conf->scenario]) {
+ SMIMSG
+ ("Process : %s did too many turning off for local SMI profile:%d,%d\n",
+ current->comm, p_conf->scenario,
+ pu4LocalCnt[p_conf->scenario]);
+ } else {
+ pu4LocalCnt[p_conf->scenario] -= 1;
+ }
+ }
+ }
+
+ for (i = 0; i < SMI_BWC_SCEN_CNT; i++) {
+ if (g_SMIInfo.pu4ConcurrencyTable[i])
+ u4Concurrency |= (1 << i);
+ }
+#if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
+ /* notify mmdvfs concurrency */
+ mmdvfs_notify_scenario_concurrency(u4Concurrency);
+#endif
+
+ if ((1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_MM_GPU;
+ else if ((1 << SMI_BWC_SCEN_ICFP) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_ICFP;
+ else if ((1 << SMI_BWC_SCEN_VSS) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VSS;
+ else if ((1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VR_SLOW;
+ else if ((1 << SMI_BWC_SCEN_VR) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VR;
+ else if ((1 << SMI_BWC_SCEN_VP) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VP;
+ else if ((1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
+ else if ((1 << SMI_BWC_SCEN_VENC) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VENC;
+ else
+ eFinalScen = SMI_BWC_SCEN_NORMAL;
+
+ if (ePreviousFinalScen != eFinalScen) {
+ ePreviousFinalScen = eFinalScen;
+ } else {
+ SMIMSG("Scen equal%d,don't change\n", eFinalScen);
+ spin_unlock(&g_SMIInfo.SMI_lock);
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ /* unprepare larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ larb_clock_unprepare(i, 1);
+#endif
+ return 0;
+ }
+
+ /* enable larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ larb_clock_enable(i, 1);
+
+ smi_profile = eFinalScen;
+
+ smi_bus_regs_setting(smi_profile,
+ smi_profile_config[smi_profile].setting);
+
+ /* Bandwidth Limiter */
+ switch (eFinalScen) {
+ case SMI_BWC_SCEN_VP:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
+ break;
+
+ case SMI_BWC_SCEN_SWDEC_VP:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
+ break;
+
+ case SMI_BWC_SCEN_ICFP:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_ICFP");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
+ break;
+ case SMI_BWC_SCEN_VR:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
+ break;
+
+ case SMI_BWC_SCEN_VR_SLOW:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+
+ case SMI_BWC_SCEN_VENC:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+
+ case SMI_BWC_SCEN_NORMAL:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+
+ case SMI_BWC_SCEN_MM_GPU:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+
+ default:
+ SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+ }
+
+ /* disable larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ larb_clock_disable(i, 1);
+
+ spin_unlock(&g_SMIInfo.SMI_lock);
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ /* unprepare larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ larb_clock_unprepare(i, 1);
+#endif
+ ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
+
+ /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex,
+ * call these APIs only when necessary */
+ {
+ static unsigned int current_fps;
+
+ if ((eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30)) {
+ /* force 30 fps in VR slow motion profile */
+ primary_display_force_set_vsync_fps(30);
+ current_fps = 30;
+ SMIMSG("[SMI_PROFILE] set 30 fps\n");
+ } else if ((eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30)) {
+ /* back to normal fps */
+ current_fps = primary_display_get_fps();
+ primary_display_force_set_vsync_fps(current_fps);
+ SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
+ }
+ }
+
+ SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
+ (p_conf->b_on_off ? "on" : "off"), eFinalScen,
+ g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
+ g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
+ g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
+
+ return 0;
+}
+
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+struct larb_monitor larb_monitor_handler = {
+ .level = LARB_MONITOR_LEVEL_HIGH,
+ .backup = on_larb_power_off,
+ .restore = on_larb_power_on
+};
+#endif /* !defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+int smi_common_init(void)
+{
+ int i;
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ struct pg_callbacks *pold = 0;
+#endif
+
+#if defined(SMI_J)
+ /* enable DCM */
+ DCM_enable();
+#endif
+ SMIMSG("Enter smi_common_init\n");
+ for (i = 0; i < SMI_LARB_NR; i++) {
+ pLarbRegBackUp[i] = kmalloc(LARB_BACKUP_REG_SIZE, GFP_KERNEL | __GFP_ZERO);
+ if (pLarbRegBackUp[i] == NULL)
+ SMIERR("pLarbRegBackUp kmalloc fail %d\n", i);
+ }
+
+ /*
+ * make sure all larb power is on before we register callback func.
+ * then, when larb power is first off, default register value will be backed up.
+ */
+
+ for (i = 0; i < SMI_LARB_NR; i++) {
+ larb_clock_prepare(i, 1);
+ larb_clock_enable(i, 1);
+ }
+ /* keep default HW value */
+ save_default_common_val(&is_default_value_saved, default_val_smi_l1arb);
+ /* set nonconstatnt variables */
+ smi_set_nonconstant_variable();
+ /* apply init setting after kernel boot */
+ SMIMSG("Enter smi_common_init\n");
+ smi_bus_regs_setting(SMI_BWC_SCEN_NORMAL, smi_profile_config[SMI_BWC_SCEN_NORMAL].setting);
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ fglarbcallback = true;
+
+ pold = register_pg_callback(&smi_clk_subsys_handle);
+ if (pold)
+ SMIERR("smi reg clk cb call fail\n");
+ else
+ SMIMSG("smi reg clk cb call success\n");
+
+#else /* !defined(SMI_INTERNAL_CCF_SUPPORT) */
+ register_larb_monitor(&larb_monitor_handler);
+#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+ for (i = 0; i < SMI_LARB_NR; i++) {
+ larb_clock_disable(i, 1);
+ larb_clock_unprepare(i, 1);
+ }
+
+ return 0;
+}
+
+static int smi_open(struct inode *inode, struct file *file)
+{
+ file->private_data = kmalloc_array(SMI_BWC_SCEN_CNT, sizeof(unsigned int), GFP_ATOMIC);
+
+ if (NULL == file->private_data) {
+ SMIMSG("Not enough entry for DDP open operation\n");
+ return -ENOMEM;
+ }
+
+ memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
+
+ return 0;
+}
+
+static int smi_release(struct inode *inode, struct file *file)
+{
+
+#if 0
+ unsigned long u4Index = 0;
+ unsigned long u4AssignCnt = 0;
+ unsigned long *pu4Cnt = (unsigned long *)file->private_data;
+ MTK_SMI_BWC_CONFIG config;
+
+ for (; u4Index < SMI_BWC_SCEN_CNT; u4Index += 1) {
+ if (pu4Cnt[u4Index]) {
+ SMIMSG("Process:%s does not turn off BWC properly , force turn off %d\n",
+ current->comm, u4Index);
+ u4AssignCnt = pu4Cnt[u4Index];
+ config.b_on_off = 0;
+ config.scenario = (MTK_SMI_BWC_SCEN) u4Index;
+ do {
+ smi_bwc_config(&config, pu4Cnt);
+ } while (0 < u4AssignCnt);
+ }
+ }
+#endif
+
+ if (NULL != file->private_data) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+
+ return 0;
+}
+
+static long smi_ioctl(struct file *pFile, unsigned int cmd, unsigned long param)
+{
+ int ret = 0;
+
+ /* unsigned long * pu4Cnt = (unsigned long *)pFile->private_data; */
+
+ switch (cmd) {
+
+ /* disable reg access ioctl by default for possible security holes */
+ /* TBD: check valid SMI register range */
+ case MTK_IOC_SMI_BWC_CONFIG:{
+ MTK_SMI_BWC_CONFIG cfg;
+
+ ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_CONFIG));
+ if (ret) {
+ SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
+ return -EFAULT;
+ }
+ ret = smi_bwc_config(&cfg, NULL);
+
+ break;
+ }
+ /* GMP start */
+ case MTK_IOC_SMI_BWC_INFO_SET:{
+ smi_set_mm_info_ioctl_wrapper(pFile, cmd, param);
+ break;
+ }
+ case MTK_IOC_SMI_BWC_INFO_GET:{
+ smi_get_mm_info_ioctl_wrapper(pFile, cmd, param);
+ break;
+ }
+ /* GMP end */
+
+ case MTK_IOC_SMI_DUMP_LARB:{
+ unsigned int larb_index;
+
+ ret = copy_from_user(&larb_index, (void *)param, sizeof(unsigned int));
+ if (ret)
+ return -EFAULT;
+
+ smi_dumpLarb(larb_index);
+ }
+ break;
+
+ case MTK_IOC_SMI_DUMP_COMMON:{
+ unsigned int arg;
+
+ ret = copy_from_user(&arg, (void *)param, sizeof(unsigned int));
+ if (ret)
+ return -EFAULT;
+
+ smi_dumpCommon();
+ }
+ break;
+#if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
+ case MTK_IOC_MMDVFS_CMD:
+ {
+ MTK_MMDVFS_CMD mmdvfs_cmd;
+
+ if (copy_from_user(&mmdvfs_cmd, (void *)param, sizeof(MTK_MMDVFS_CMD)))
+ return -EFAULT;
+
+
+ mmdvfs_handle_cmd(&mmdvfs_cmd);
+
+ if (copy_to_user
+ ((void *)param, (void *)&mmdvfs_cmd, sizeof(MTK_MMDVFS_CMD))) {
+ return -EFAULT;
+ }
+ }
+ break;
+#endif
+ default:
+ return -1;
+ }
+
+ return ret;
+}
+
+static const struct file_operations smiFops = {
+ .owner = THIS_MODULE,
+ .open = smi_open,
+ .release = smi_release,
+ .unlocked_ioctl = smi_ioctl,
+ .compat_ioctl = MTK_SMI_COMPAT_ioctl,
+};
+
+#if defined(SMI_J)
+/*
+static int smi_sel;
+
+static ssize_t smi_sel_show(struct kobject *kobj, struct kobj_attribute *attr,
+char *buf)
+{
+ char *p = buf;
+
+ p += sprintf(p, "%d\n", smi_sel);
+
+ return p - buf;
+}
+
+static ssize_t smi_sel_store(struct kobject *kobj, struct kobj_attribute *attr,
+const char *buf, size_t count)
+{
+ int val;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EPERM;
+
+ smi_sel = val;
+
+ return count;
+}
+
+static ssize_t smi_dbg_show(struct kobject *kobj, struct kobj_attribute *attr,
+char *buf)
+{
+ if (smi_sel >= 0 && smi_sel < SMI_LARB_NR)
+ smi_dumpLarb(smi_sel);
+ else if (smi_sel == 999)
+ smi_dumpCommon();
+
+ return 0;
+}
+DEFINE_ATTR_RW(smi_sel);
+DEFINE_ATTR_RO(smi_dbg);
+
+static struct attribute *smi_attrs[] = {__ATTR_OF(smi_sel), __ATTR_OF(smi_dbg),
+NULL, };
+
+static struct attribute_group smi_attr_group = {.name = "smi", .attrs =
+smi_attrs, };
+*/
+#endif
+static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
+static inline int smi_register(void)
+{
+ if (alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI")) {
+ SMIERR("Allocate device No. failed");
+ return -EAGAIN;
+ }
+ /* Allocate driver */
+ pSmiDev = cdev_alloc();
+
+ if (NULL == pSmiDev) {
+ unregister_chrdev_region(smiDevNo, 1);
+ SMIERR("Allocate mem for kobject failed");
+ return -ENOMEM;
+ }
+ /* Attatch file operation. */
+ cdev_init(pSmiDev, &smiFops);
+ pSmiDev->owner = THIS_MODULE;
+
+ /* Add to system */
+ if (cdev_add(pSmiDev, smiDevNo, 1)) {
+ SMIERR("Attatch file operation failed");
+ unregister_chrdev_region(smiDevNo, 1);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static unsigned long get_register_base(int i)
+{
+ unsigned long pa_value = 0;
+ unsigned long va_value = 0;
+
+ va_value = gSMIBaseAddrs[i];
+ pa_value = virt_to_phys((void *)va_value);
+
+ return pa_value;
+}
+
+void register_base_dump(void)
+{
+ int i = 0;
+
+ for (i = 0; i < SMI_REG_REGION_MAX; i++) {
+ SMIMSG("REG BASE:%s-->VA=0x%lx,PA=0x%lx\n",
+ smi_get_region_name(i), gSMIBaseAddrs[i], get_register_base(i));
+ }
+}
+
+static struct class *pSmiClass;
+
+static int smi_probe(struct platform_device *pdev)
+{
+
+ int i;
+
+ static unsigned int smi_probe_cnt;
+ struct device *smiDevice = NULL;
+
+ SMIMSG("Enter smi_probe\n");
+ /* Debug only */
+ if (smi_probe_cnt != 0) {
+ SMIERR("Only support 1 SMI driver probed\n");
+ return 0;
+ }
+ smi_probe_cnt++;
+ SMIMSG("Allocate smi_dev space\n");
+ smi_dev = kmalloc(sizeof(struct smi_device), GFP_KERNEL);
+
+ if (smi_dev == NULL) {
+ SMIERR("Unable to allocate memory for smi driver\n");
+ return -ENOMEM;
+ }
+ if (NULL == pdev) {
+ SMIERR("platform data missed\n");
+ return -ENXIO;
+ }
+ /* Keep the device structure */
+ smi_dev->dev = &pdev->dev;
+
+ /* Map registers */
+ for (i = 0; i < SMI_REG_REGION_MAX; i++) {
+ SMIMSG("Save region: %d\n", i);
+ smi_dev->regs[i] = (void *)of_iomap(pdev->dev.of_node, i);
+
+ if (!smi_dev->regs[i]) {
+ SMIERR("Unable to ioremap registers, of_iomap fail, i=%d\n", i);
+ return -ENOMEM;
+ }
+ /* Record the register base in global variable */
+ gSMIBaseAddrs[i] = (unsigned long)(smi_dev->regs[i]);
+ SMIMSG("DT, i=%d, region=%s, map_addr=0x%p, reg_pa=0x%lx\n", i,
+ smi_get_region_name(i), smi_dev->regs[i], get_register_base(i));
+ }
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ smi_dev->smi_common_clk = get_smi_clk("smi-common");
+ smi_dev->smi_larb0_clk = get_smi_clk("smi-larb0");
+ smi_dev->img_larb2_clk = get_smi_clk("img-larb2");
+#if defined(SMI_D1)
+ smi_dev->vdec0_vdec_clk = get_smi_clk("vdec0-vdec");
+#endif
+ smi_dev->vdec1_larb_clk = get_smi_clk("vdec1-larb");
+ smi_dev->venc_larb_clk = get_smi_clk("venc-larb");
+#if defined(SMI_J)
+ smi_dev->venc_venc_clk = get_smi_clk("venc-venc");
+#endif
+ /* MTCMOS */
+ smi_dev->larb1_mtcmos = get_smi_clk("mtcmos-vde");
+ smi_dev->larb3_mtcmos = get_smi_clk("mtcmos-ven");
+ smi_dev->larb2_mtcmos = get_smi_clk("mtcmos-isp");
+ smi_dev->larb0_mtcmos = get_smi_clk("mtcmos-dis");
+#endif
+
+ SMIMSG("Execute smi_register\n");
+ if (smi_register()) {
+ dev_err(&pdev->dev, "register char failed\n");
+ return -EAGAIN;
+ }
+
+ pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
+ if (IS_ERR(pSmiClass)) {
+ int ret = PTR_ERR(pSmiClass);
+
+ SMIERR("Unable to create class, err = %d", ret);
+ return ret;
+ }
+ SMIMSG("Create davice\n");
+ smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
+ smiDeviceUevent = smiDevice;
+
+ SMIMSG("SMI probe done.\n");
+#if defined(SMI_D2)
+ /* To adapt the legacy codes */
+ smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
+ smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
+ smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
+ smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
+ /* smi_reg_base_barb4 = gSMIBaseAddrs[SMI_LARB4_REG_INDX]; */
+
+ gLarbBaseAddr[0] = LARB0_BASE;
+ gLarbBaseAddr[1] = LARB1_BASE;
+ gLarbBaseAddr[2] = LARB2_BASE;
+#elif defined(SMI_D1) || defined(SMI_D3)
+ /* To adapt the legacy codes */
+ smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
+ smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
+ smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
+ smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
+ smi_reg_base_barb3 = gSMIBaseAddrs[SMI_LARB3_REG_INDX];
+
+ gLarbBaseAddr[0] = LARB0_BASE;
+ gLarbBaseAddr[1] = LARB1_BASE;
+ gLarbBaseAddr[2] = LARB2_BASE;
+ gLarbBaseAddr[3] = LARB3_BASE;
+
+#elif defined(SMI_J)
+/* To adapt the legacy codes */
+ smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
+ smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
+ smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
+ smi_reg_base_barb2 = gSMIBaseAddrs[SMI_LARB2_REG_INDX];
+ smi_reg_base_barb3 = gSMIBaseAddrs[SMI_LARB3_REG_INDX];
+
+ gLarbBaseAddr[0] = LARB0_BASE;
+ gLarbBaseAddr[1] = LARB1_BASE;
+ gLarbBaseAddr[2] = LARB2_BASE;
+ gLarbBaseAddr[3] = LARB3_BASE;
+#else
+ smi_reg_base_common_ext = gSMIBaseAddrs[SMI_COMMON_REG_INDX];
+ smi_reg_base_barb0 = gSMIBaseAddrs[SMI_LARB0_REG_INDX];
+ smi_reg_base_barb1 = gSMIBaseAddrs[SMI_LARB1_REG_INDX];
+
+ gLarbBaseAddr[0] = LARB0_BASE;
+ gLarbBaseAddr[1] = LARB1_BASE;
+#endif
+
+ SMIMSG("Execute smi_common_init\n");
+ smi_common_init();
+
+ return 0;
+
+}
+
+char *smi_get_region_name(unsigned int region_indx)
+{
+ switch (region_indx) {
+ case SMI_COMMON_REG_INDX:
+ return "smi_common";
+ case SMI_LARB0_REG_INDX:
+ return "larb0";
+ case SMI_LARB1_REG_INDX:
+ return "larb1";
+ case SMI_LARB2_REG_INDX:
+ return "larb2";
+ case SMI_LARB3_REG_INDX:
+ return "larb3";
+ default:
+ SMIMSG("invalid region id=%d", region_indx);
+ return "unknown";
+ }
+}
+
+static int smi_remove(struct platform_device *pdev)
+{
+ cdev_del(pSmiDev);
+ unregister_chrdev_region(smiDevNo, 1);
+ device_destroy(pSmiClass, smiDevNo);
+ class_destroy(pSmiClass);
+ return 0;
+}
+
+static int smi_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ return 0;
+}
+
+static int smi_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id smi_of_ids[] = {
+ {.compatible = "mediatek,smi_common",},
+ {}
+};
+
+static struct platform_driver smiDrv = {
+ .probe = smi_probe,
+ .remove = smi_remove,
+ .suspend = smi_suspend,
+ .resume = smi_resume,
+ .driver = {
+ .name = "MTK_SMI",
+ .owner = THIS_MODULE,
+ .of_match_table = smi_of_ids,
+ }
+};
+
+static int __init smi_init(void)
+{
+ SMIMSG("smi_init enter\n");
+ spin_lock_init(&g_SMIInfo.SMI_lock);
+#if defined(SMI_D1) || defined(SMI_D2) || defined(SMI_D3)
+ /* MMDVFS init */
+ mmdvfs_init(&g_smi_bwc_mm_info);
+#endif
+ memset(g_SMIInfo.pu4ConcurrencyTable, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
+
+ /* Informs the kernel about the function to be called */
+ /* if hardware matching MTK_SMI has been found */
+ SMIMSG("register platform driver\n");
+ if (platform_driver_register(&smiDrv)) {
+ SMIERR("failed to register MAU driver");
+ return -ENODEV;
+ }
+ SMIMSG("exit smi_init\n");
+ return 0;
+}
+
+static void __exit smi_exit(void)
+{
+ platform_driver_unregister(&smiDrv);
+
+}
+
+
+
+
+
+
+
+void smi_client_status_change_notify(int module, int mode)
+{
+
+}
+
+#if defined(SMI_J)
+MTK_SMI_BWC_SCEN smi_get_current_profile(void)
+{
+ return (MTK_SMI_BWC_SCEN) smi_profile;
+}
+#endif
+#if IS_ENABLED(CONFIG_COMPAT)
+/* 32 bits process ioctl support: */
+/* This is prepared for the future extension since currently the sizes of 32 bits */
+/* and 64 bits smi parameters are the same. */
+
+struct MTK_SMI_COMPAT_BWC_CONFIG {
+ compat_int_t scenario;
+ compat_int_t b_on_off; /* 0 : exit this scenario , 1 : enter this scenario */
+};
+
+struct MTK_SMI_COMPAT_BWC_INFO_SET {
+ compat_int_t property;
+ compat_int_t value1;
+ compat_int_t value2;
+};
+
+struct MTK_SMI_COMPAT_BWC_MM_INFO {
+ compat_uint_t flag; /* Reserved */
+ compat_int_t concurrent_profile;
+ compat_int_t sensor_size[2];
+ compat_int_t video_record_size[2];
+ compat_int_t display_size[2];
+ compat_int_t tv_out_size[2];
+ compat_int_t fps;
+ compat_int_t video_encode_codec;
+ compat_int_t video_decode_codec;
+ compat_int_t hw_ovl_limit;
+};
+
+#define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, struct MTK_SMI_COMPAT_BWC_CONFIG)
+#define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, struct MTK_SMI_COMPAT_BWC_INFO_SET)
+#define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, struct MTK_SMI_COMPAT_BWC_MM_INFO)
+
+static int compat_get_smi_bwc_config_struct(struct MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
+ MTK_SMI_BWC_CONFIG __user *data)
+{
+
+ compat_int_t i;
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(i, &(data32->scenario));
+ err |= put_user(i, &(data->scenario));
+ err |= get_user(i, &(data32->b_on_off));
+ err |= put_user(i, &(data->b_on_off));
+
+ return err;
+}
+
+static int compat_get_smi_bwc_mm_info_set_struct(struct MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
+ MTK_SMI_BWC_INFO_SET __user *data)
+{
+
+ compat_int_t i;
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(i, &(data32->property));
+ err |= put_user(i, &(data->property));
+ err |= get_user(i, &(data32->value1));
+ err |= put_user(i, &(data->value1));
+ err |= get_user(i, &(data32->value2));
+ err |= put_user(i, &(data->value2));
+
+ return err;
+}
+
+static int compat_get_smi_bwc_mm_info_struct(struct MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
+ MTK_SMI_BWC_MM_INFO __user *data)
+{
+ compat_uint_t u;
+ compat_int_t i;
+ compat_int_t p[2];
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(u, &(data32->flag));
+ err |= put_user(u, &(data->flag));
+ err |= get_user(i, &(data32->concurrent_profile));
+ err |= put_user(i, &(data->concurrent_profile));
+ err |= copy_from_user(p, &(data32->sensor_size), sizeof(p));
+ err |= copy_to_user(&(data->sensor_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data32->video_record_size), sizeof(p));
+ err |= copy_to_user(&(data->video_record_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data32->display_size), sizeof(p));
+ err |= copy_to_user(&(data->display_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data32->tv_out_size), sizeof(p));
+ err |= copy_to_user(&(data->tv_out_size), p, sizeof(p));
+ err |= get_user(i, &(data32->fps));
+ err |= put_user(i, &(data->fps));
+ err |= get_user(i, &(data32->video_encode_codec));
+ err |= put_user(i, &(data->video_encode_codec));
+ err |= get_user(i, &(data32->video_decode_codec));
+ err |= put_user(i, &(data->video_decode_codec));
+ err |= get_user(i, &(data32->hw_ovl_limit));
+ err |= put_user(i, &(data->hw_ovl_limit));
+
+ return err;
+}
+
+static int compat_put_smi_bwc_mm_info_struct(struct MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
+ MTK_SMI_BWC_MM_INFO __user *data)
+{
+
+ compat_uint_t u;
+ compat_int_t i;
+ compat_int_t p[2];
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(u, &(data->flag));
+ err |= put_user(u, &(data32->flag));
+ err |= get_user(i, &(data->concurrent_profile));
+ err |= put_user(i, &(data32->concurrent_profile));
+ err |= copy_from_user(p, &(data->sensor_size), sizeof(p));
+ err |= copy_to_user(&(data32->sensor_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data->video_record_size), sizeof(p));
+ err |= copy_to_user(&(data32->video_record_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data->display_size), sizeof(p));
+ err |= copy_to_user(&(data32->display_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data->tv_out_size), sizeof(p));
+ err |= copy_to_user(&(data32->tv_out_size), p, sizeof(p));
+ err |= get_user(i, &(data->fps));
+ err |= put_user(i, &(data32->fps));
+ err |= get_user(i, &(data->video_encode_codec));
+ err |= put_user(i, &(data32->video_encode_codec));
+ err |= get_user(i, &(data->video_decode_codec));
+ err |= put_user(i, &(data32->video_decode_codec));
+ err |= get_user(i, &(data->hw_ovl_limit));
+ err |= put_user(i, &(data32->hw_ovl_limit));
+ return err;
+}
+
+static long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
+ {
+ if (COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG) {
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ } else {
+
+ struct MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
+ MTK_SMI_BWC_CONFIG __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
+
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_smi_bwc_config_struct(data32, data);
+ if (err)
+ return err;
+
+ ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
+ (unsigned long)data);
+ return ret;
+ }
+ }
+
+ case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
+ {
+
+ if (COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET) {
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ } else {
+
+ struct MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
+ MTK_SMI_BWC_INFO_SET __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
+ (unsigned long)data);
+ }
+ }
+ /* Fall through */
+ case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
+ {
+
+ if (COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET) {
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ } else {
+ struct MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
+ MTK_SMI_BWC_MM_INFO __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
+
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_smi_bwc_mm_info_struct(data32, data);
+ if (err)
+ return err;
+
+ ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
+ (unsigned long)data);
+
+ err = compat_put_smi_bwc_mm_info_struct(data32, data);
+
+ if (err)
+ return err;
+
+ return ret;
+ }
+ }
+
+ case MTK_IOC_SMI_DUMP_LARB:
+ case MTK_IOC_SMI_DUMP_COMMON:
+ case MTK_IOC_MMDVFS_CMD:
+
+ return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+}
+
+#endif
+
+#if defined(SMI_J)
+int is_mmdvfs_freq_hopping_disabled(void)
+{
+ return disable_freq_hopping;
+}
+
+int is_mmdvfs_freq_mux_disabled(void)
+{
+ return disable_freq_mux;
+}
+
+int is_force_max_mmsys_clk(void)
+{
+ return force_max_mmsys_clk;
+}
+
+int is_force_camera_hpm(void)
+{
+ return force_camera_hpm;
+}
+subsys_initcall(smi_init);
+module_param_named(disable_freq_hopping, disable_freq_hopping, uint, S_IRUGO | S_IWUSR);
+module_param_named(disable_freq_mux, disable_freq_mux, uint, S_IRUGO | S_IWUSR);
+module_param_named(force_max_mmsys_clk, force_max_mmsys_clk, uint, S_IRUGO | S_IWUSR);
+module_param_named(force_camera_hpm, force_camera_hpm, uint, S_IRUGO | S_IWUSR);
+#endif
+module_init(smi_init);
+module_exit(smi_exit);
+
+module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
+module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
+
+MODULE_DESCRIPTION("MTK SMI driver");
+MODULE_AUTHOR("Kendrick Hsu<kendrick.hsu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mediatek/smi/smi_common.h b/drivers/misc/mediatek/smi/smi_common.h
new file mode 100644
index 000000000..0202bf3bd
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_common.h
@@ -0,0 +1,70 @@
+#ifndef __SMI_COMMON_H__
+#define __SMI_COMMON_H__
+
+#include <linux/aee.h>
+#include "smi_configuration.h"
+#ifdef CONFIG_MTK_CMDQ
+#include "cmdq_core.h"
+#endif
+
+#define SMI_CLIENT_DISP 0
+#define SMI_CLIENT_WFD 1
+#define SMI_EVENT_DIRECT_LINK (0x1 << 0)
+#define SMI_EVENT_DECOUPLE (0x1 << 1)
+#define SMI_EVENT_OVL_CASCADE (0x1 << 2)
+#define SMI_EVENT_OVL1_EXTERNAL (0x1 << 3)
+
+#define SMIMSG(string, args...) pr_debug("[pid=%d]" string, current->tgid, ##args)
+#define SMIMSG2(string, args...) pr_debug(string, ##args)
+#ifdef CONFIG_MTK_CMDQ
+#define SMIMSG3(onoff, string, args...)\
+ do {\
+ if (onoff == 1)\
+ cmdq_core_save_first_dump(string, ##args);\
+ SMIMSG(string, ##args);\
+ } while (0)
+#else
+#define SMIMSG3(string, args...) SMIMSG(string, ##args)
+#endif
+#define SMITMP(string, args...) pr_debug("[pid=%d]"string, current->tgid, ##args)
+
+#define SMIERR(string, args...) pr_debug("error: " string, ##args)
+#define smi_aee_print(string, args...)\
+ do {\
+ char smi_name[100];\
+ snprintf(smi_name, 100, "[" SMI_LOG_TAG "]" string, ##args); \
+ } while (0)
+
+/*
+#define SMIERR(string, args...)\
+ do {\
+ pr_debug("error: " string, ##args); \
+ aee_kernel_warning(SMI_LOG_TAG, "error: "string, ##args); \
+ } while (0)
+#define smi_aee_print(string, args...)\
+ do {\
+ char smi_name[100];\
+ snprintf(smi_name, 100, "[" SMI_LOG_TAG "]" string, ##args); \
+ aee_kernel_warning(smi_name, "["SMI_LOG_TAG"]error:"string, ##args); \
+ } while (0)
+*/
+/* Please use the function to instead gLarbBaseAddr to prevent the NULL pointer access error */
+/* when the corrosponding larb is not exist */
+/* extern unsigned int gLarbBaseAddr[SMI_LARB_NR]; */
+extern unsigned long get_larb_base_addr(int larb_id);
+
+/* extern char *smi_port_name[][21]; */
+/* for slow motion force 30 fps */
+extern int primary_display_force_set_vsync_fps(unsigned int fps);
+extern unsigned int primary_display_get_fps(void);
+extern void smi_client_status_change_notify(int module, int mode);
+extern void smi_dumpLarb(unsigned int index);
+extern void smi_dumpCommon(void);
+/* void register_base_dump(void); */
+
+extern struct SMI_PROFILE_CONFIG smi_profile_config[SMI_PROFILE_CONFIG_NUM];
+extern void smi_set_nonconstant_variable(void);
+extern void save_default_common_val(int *is_default_value_saved, unsigned int *default_val_smi_array);
+extern int smi_bus_regs_setting(int profile, struct SMI_SETTING *settings);
+
+#endif
diff --git a/drivers/misc/mediatek/smi/smi_config_util.c b/drivers/misc/mediatek/smi/smi_config_util.c
new file mode 100644
index 000000000..2683adf45
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_config_util.c
@@ -0,0 +1,49 @@
+#include <asm/io.h>
+#include <linux/string.h>
+#include "smi_reg.h"
+#include <mach/mt_smi.h>
+#include "smi_common.h"
+#include "smi_configuration.h"
+#include "smi_config_util.h"
+
+int smi_bus_regs_setting(int profile, struct SMI_SETTING *settings)
+{
+ int i = 0;
+ int j = 0;
+
+ if (!settings || profile < 0 || profile >= SMI_BWC_SCEN_CNT)
+ return -1;
+
+ if (settings->smi_common_reg_num == 0)
+ return -1;
+
+ /* set regs of common */
+ SMIMSG("Current Scen:%d", profile);
+ for (i = 0 ; i < settings->smi_common_reg_num ; ++i) {
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE,
+ settings->smi_common_setting_vals[i].offset,
+ settings->smi_common_setting_vals[i].value);
+ }
+
+ /* set regs of larbs */
+ for (i = 0 ; i < SMI_LARB_NR ; ++i)
+ for (j = 0 ; j < settings->smi_larb_reg_num[i] ; ++j) {
+ M4U_WriteReg32(gLarbBaseAddr[i],
+ settings->smi_larb_setting_vals[i][j].offset,
+ settings->smi_larb_setting_vals[i][j].value);
+ }
+ return 0;
+}
+
+void save_default_common_val(int *is_default_value_saved, unsigned int *default_val_smi_array)
+{
+ if (!*is_default_value_saved) {
+ int i = 0;
+
+ SMIMSG("Save default config:\n");
+ for (i = 0 ; i < SMI_LARB_NR ; ++i)
+ default_val_smi_array[i] = M4U_ReadReg32(SMI_COMMON_EXT_BASE, smi_common_l1arb_offset[i]);
+
+ *is_default_value_saved = 1;
+ }
+}
diff --git a/drivers/misc/mediatek/smi/smi_config_util.h b/drivers/misc/mediatek/smi/smi_config_util.h
new file mode 100644
index 000000000..8ef3934a9
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_config_util.h
@@ -0,0 +1,8 @@
+#ifndef _SMI_CONFIG_UTIL_H_
+#define _SMI_CONFIG_UTIL_H_
+
+extern unsigned long smi_common_l1arb_offset[SMI_LARB_NR];
+extern unsigned long gLarbBaseAddr[SMI_LARB_NR];
+
+
+#endif
diff --git a/drivers/misc/mediatek/smi/smi_configuration.c b/drivers/misc/mediatek/smi/smi_configuration.c
new file mode 100644
index 000000000..af1228ad7
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_configuration.c
@@ -0,0 +1,1307 @@
+#include <asm/io.h>
+#include <linux/string.h>
+#include <mach/mt_smi.h>
+#include "smi_configuration.h"
+#include "smi_common.h"
+#include "smi_reg.h"
+
+/* add static after all platform setting parameters moved to here */
+int is_default_value_saved;
+unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
+
+#define SMI_LARB_NUM_MAX 8
+
+#if defined(SMI_D1)
+unsigned int smi_dbg_disp_mask = 1;
+unsigned int smi_dbg_vdec_mask = 2;
+unsigned int smi_dbg_imgsys_mask = 4;
+unsigned int smi_dbg_venc_mask = 8;
+unsigned int smi_dbg_mjc_mask = 0;
+
+unsigned long smi_common_l1arb_offset[SMI_LARB_NR] = {
+ REG_OFFSET_SMI_L1ARB0, REG_OFFSET_SMI_L1ARB1, REG_OFFSET_SMI_L1ARB2
+};
+
+unsigned long smi_larb0_debug_offset[SMI_LARB0_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb1_debug_offset[SMI_LARB1_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb2_debug_offset[SMI_LARB2_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb3_debug_offset[SMI_LARB3_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_common_debug_offset[SMI_COMMON_DEBUG_OFFSET_NUM] = {
+ 0x100, 0x104, 0x108, 0x10C, 0x110, 0x114, 0x220, 0x230, 0x234, 0x238, 0x400, 0x404, 0x408,
+ 0x40C, 0x430, 0x440
+};
+
+int smi_larb_debug_offset_num[SMI_LARB_NR] = {
+ SMI_LARB0_DEBUG_OFFSET_NUM, SMI_LARB1_DEBUG_OFFSET_NUM, SMI_LARB2_DEBUG_OFFSET_NUM,
+ SMI_LARB3_DEBUG_OFFSET_NUM
+};
+
+unsigned long *smi_larb_debug_offset[SMI_LARB_NR] = {
+ smi_larb0_debug_offset, smi_larb1_debug_offset, smi_larb2_debug_offset,
+ smi_larb3_debug_offset
+};
+
+#define SMI_PROFILE_SETTING_COMMON_INIT_NUM 7
+#define SMI_VC_SETTING_NUM SMI_LARB_NR
+
+
+/* vc setting */
+struct SMI_SETTING_VALUE smi_vc_setting[SMI_VC_SETTING_NUM] = {
+ {0x20, 0}, {0x20, 2}, {0x20, 1}, {0x20, 1}
+};
+
+/* init_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_init[SMI_PROFILE_SETTING_COMMON_INIT_NUM] = {
+ {0, 0}, {0, 0x1000}, {0, 0x1000}, {0, 0x1000},
+ {0x100, 0x1b},
+ {0x234, (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
+ + (0x4 << 10) + (0x4 << 5) + 0x5},
+ {0x230, 0x1f + (0x8 << 4) + (0x7 << 9)}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_init[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0x1f}, {0x204, 4}, {0x208, 6}, {0x20c, 0x1f}, {0x210, 4}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_init[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_init[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_init[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING init_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init, smi_profile_setting_larb3_init}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VR_NUM SMI_LARB_NR
+/* vr_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vr[SMI_PROFILE_SETTING_COMMON_VR_NUM] = {
+ {0, 0x11F1}, {0, 0x1000}, {0, 0x120A}, {0, 0x11F3}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vr[SMI_LARB0_PORT_NUM] = {
+ {0x200, 8}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 2}, {0x218, 4}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vr[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vr[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 4}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 4}, {0x22c, 1}, {0x230, 2}, {0x234, 2}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vr[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 2}, {0x228, 1}, {0x22c, 3}, {0x230, 2}
+};
+
+struct SMI_SETTING vr_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VP_NUM SMI_LARB_NR
+/* vp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vp[SMI_PROFILE_SETTING_COMMON_VP_NUM] = {
+ {0, 0x1262}, {0, 0x11E9}, {0, 0x1000}, {0, 0x123D}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 8}, {0x204, 1}, {0x208, 2}, {0x20c, 1}, {0x210, 3}, {0x214, 1}, {0x218, 4}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 0xb}, {0x204, 0xe}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vp[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vp[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 3}, {0x230, 2}
+};
+
+struct SMI_SETTING vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+/* vr series */
+struct SMI_SETTING icfp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+struct SMI_SETTING vr_slow_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+struct SMI_SETTING venc_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+/* vp series */
+struct SMI_SETTING vpwfd_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+struct SMI_SETTING swdec_vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+/* init series */
+struct SMI_SETTING mm_gpu_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init, smi_profile_setting_larb3_init}
+};
+
+struct SMI_SETTING ui_idle_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi4k_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING vss_setting_config = { 0, NULL, {0}, {0} };
+
+#elif defined(SMI_D3)
+unsigned int smi_dbg_disp_mask = 1;
+unsigned int smi_dbg_vdec_mask = 2;
+unsigned int smi_dbg_imgsys_mask = 4;
+unsigned int smi_dbg_venc_mask = 8;
+unsigned int smi_dbg_mjc_mask = 0;
+
+unsigned long smi_common_l1arb_offset[SMI_LARB_NR] = {
+ REG_OFFSET_SMI_L1ARB0, REG_OFFSET_SMI_L1ARB1, REG_OFFSET_SMI_L1ARB2, REG_OFFSET_SMI_L1ARB3
+};
+
+static unsigned long smi_larb0_debug_offset[SMI_LARB0_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+static unsigned long smi_larb1_debug_offset[SMI_LARB1_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+static unsigned long smi_larb2_debug_offset[SMI_LARB2_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+static unsigned long smi_larb3_debug_offset[SMI_LARB3_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_common_debug_offset[SMI_COMMON_DEBUG_OFFSET_NUM] = {
+ 0x100, 0x104, 0x108, 0x10C, 0x110, 0x114, 0x220, 0x230, 0x234, 0x238, 0x400, 0x404, 0x408,
+ 0x40C, 0x430, 0x440
+};
+
+int smi_larb_debug_offset_num[SMI_LARB_NR] = {
+ SMI_LARB0_DEBUG_OFFSET_NUM, SMI_LARB1_DEBUG_OFFSET_NUM, SMI_LARB2_DEBUG_OFFSET_NUM,
+ SMI_LARB3_DEBUG_OFFSET_NUM
+};
+
+unsigned long *smi_larb_debug_offset[SMI_LARB_NR] = {
+ smi_larb0_debug_offset, smi_larb1_debug_offset, smi_larb2_debug_offset,
+ smi_larb3_debug_offset
+};
+
+
+#define SMI_PROFILE_SETTING_COMMON_INIT_NUM 7
+#define SMI_VC_SETTING_NUM SMI_LARB_NR
+
+
+/* vc setting */
+struct SMI_SETTING_VALUE smi_vc_setting[SMI_VC_SETTING_NUM] = {
+ {0x20, 0}, {0x20, 2}, {0x20, 1}, {0x20, 1}
+};
+
+/* init_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_init[SMI_PROFILE_SETTING_COMMON_INIT_NUM] = {
+ {0, 0}, {0, 0x1000}, {0, 0x1000}, {0, 0x1000},
+ {0x100, 0xb},
+ {0x234, (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
+ + (0x4 << 10) + (0x4 << 5) + 0x5},
+ {0x230, 0xf + (0x8 << 4) + (0x7 << 9)}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_init[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0x1f}, {0x204, 8}, {0x208, 6}, {0x20c, 0x1f}, {0x210, 4}, {0x214, 1}, {0x218, 0},
+ {0x21c, 2},
+ {0x220, 1}, {0x224, 3}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_init[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_init[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_init[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING init_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init, smi_profile_setting_larb3_init}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VR_NUM SMI_LARB_NR
+/* vr_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vr[SMI_PROFILE_SETTING_COMMON_VR_NUM] = {
+ {0, 0x1417}, {0, 0x1000}, {0, 0x11D0}, {0, 0x11F8}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vr[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0xa}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1},
+ {0x21c, 4},
+ {0x220, 1}, {0x224, 6}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vr[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vr[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 2}, {0x21c,
+ 1},
+ {0x220, 2}, {0x224, 1}, {0x228, 1}, {0x22c, 8}, {0x230, 1}, {0x234, 1}, {0x238, 2}, {0x23c,
+ 2},
+ {0x240, 2}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vr[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 2}, {0x228, 1}, {0x22c, 3}, {0x230, 2}
+};
+
+struct SMI_SETTING vr_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VP_NUM SMI_LARB_NR
+/* vp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vp[SMI_PROFILE_SETTING_COMMON_VP_NUM] = {
+ {0, 0x1262}, {0, 0x11E9}, {0, 0x1000}, {0, 0x123D}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 8}, {0x204, 1}, {0x208, 2}, {0x20c, 1}, {0x210, 3}, {0x214, 1}, {0x218, 4}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 0xb}, {0x204, 0xe}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vp[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vp[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 2}, {0x228, 1}, {0x22c, 3}, {0x230, 2}
+};
+
+struct SMI_SETTING vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VPWFD_NUM SMI_LARB_NR
+/* vpwfd_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vpwfd[SMI_PROFILE_SETTING_COMMON_VPWFD_NUM] = {
+ {0, 0x14B6}, {0, 0x11EE}, {0, 0x1000}, {0, 0x11F2}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vpwfd[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0xc}, {0x204, 8}, {0x208, 6}, {0x20c, 0xc}, {0x210, 4}, {0x214, 1}, {0x218, 1},
+ {0x21c, 3},
+ {0x220, 2}, {0x224, 5}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vpwfd[SMI_LARB1_PORT_NUM] = {
+ {0x200, 0xb}, {0x204, 0xe}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vpwfd[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vpwfd[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 2}, {0x228, 1}, {0x22c, 3}, {0x230, 2}
+};
+
+struct SMI_SETTING vpwfd_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VPWFD_NUM, smi_profile_setting_common_vpwfd,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vpwfd, smi_profile_setting_larb1_vpwfd,
+ smi_profile_setting_larb2_vpwfd, smi_profile_setting_larb3_vpwfd}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_ICFP_NUM SMI_LARB_NR
+/* icfp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_icfp[SMI_PROFILE_SETTING_COMMON_ICFP_NUM] = {
+ {0, 0x14E2}, {0, 0x1000}, {0, 0x1310}, {0, 0x106F}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_icfp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0xe}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1},
+ {0x21c, 2},
+ {0x220, 2}, {0x224, 3}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_icfp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_icfp[SMI_LARB2_PORT_NUM] = {
+ {0x200, 0xc}, {0x204, 4}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1},
+ {0x21c, 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 3}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_icfp[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING icfp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_ICFP_NUM, smi_profile_setting_common_icfp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_icfp, smi_profile_setting_larb1_icfp,
+ smi_profile_setting_larb2_icfp, smi_profile_setting_larb3_icfp}
+};
+
+/* vr series */
+struct SMI_SETTING vr_slow_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+struct SMI_SETTING venc_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+/* vp series */
+struct SMI_SETTING swdec_vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+/* init seris */
+struct SMI_SETTING mm_gpu_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init, smi_profile_setting_larb3_init}
+};
+
+struct SMI_SETTING vss_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING ui_idle_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi4k_setting_config = { 0, NULL, {0}, {0} };
+
+#elif defined(SMI_J)
+unsigned int smi_dbg_disp_mask = 1;
+unsigned int smi_dbg_vdec_mask = 2;
+unsigned int smi_dbg_imgsys_mask = 4;
+unsigned int smi_dbg_venc_mask = 8;
+unsigned int smi_dbg_mjc_mask = 0;
+
+unsigned long smi_common_l1arb_offset[SMI_LARB_NR] = {
+ REG_OFFSET_SMI_L1ARB0, REG_OFFSET_SMI_L1ARB1, REG_OFFSET_SMI_L1ARB2, REG_OFFSET_SMI_L1ARB3
+};
+
+unsigned long smi_larb0_debug_offset[SMI_LARB0_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb1_debug_offset[SMI_LARB1_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb2_debug_offset[SMI_LARB2_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb3_debug_offset[SMI_LARB3_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_common_debug_offset[SMI_COMMON_DEBUG_OFFSET_NUM] = {
+ 0x100, 0x104, 0x108, 0x10C, 0x110, 0x114, 0x220, 0x230, 0x234, 0x238, 0x400, 0x404, 0x408,
+ 0x40C, 0x430, 0x440
+};
+
+int smi_larb_debug_offset_num[SMI_LARB_NR] = {
+ SMI_LARB0_DEBUG_OFFSET_NUM, SMI_LARB1_DEBUG_OFFSET_NUM, SMI_LARB2_DEBUG_OFFSET_NUM,
+ SMI_LARB3_DEBUG_OFFSET_NUM
+};
+
+unsigned long *smi_larb_debug_offset[SMI_LARB_NR] = {
+ smi_larb0_debug_offset, smi_larb1_debug_offset, smi_larb2_debug_offset,
+ smi_larb3_debug_offset
+};
+
+#define SMI_PROFILE_SETTING_COMMON_INIT_NUM 7
+#define SMI_VC_SETTING_NUM SMI_LARB_NR
+#define SMI_INITSETTING_LARB0_NUM (SMI_LARB0_PORT_NUM + 4)
+
+
+/* vc setting */
+struct SMI_SETTING_VALUE smi_vc_setting[SMI_VC_SETTING_NUM] = {
+ {0x20, 0}, {0x20, 2}, {0x20, 1}, {0x20, 1}
+};
+
+/* ISP HRT setting */
+struct SMI_SETTING_VALUE smi_isp_hrt_setting[SMI_LARB_NR] = {
+ {0x24, 0}, {0x24, 0}, {0x24, 0}, {0x24, 0}
+};
+
+/* init_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_init[SMI_PROFILE_SETTING_COMMON_INIT_NUM] = {
+ {0, 0x15AE}, {0, 0x1000}, {0, 0x1000}, {0, 0x1000},
+ {0x100, 0xb},
+ {0x234, ((0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
+ + (0x4 << 10) + (0x4 << 5) + 0x5)},
+ {0x230, 0xf + (0x8 << 4) + (0x7 << 9)}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_init[SMI_INITSETTING_LARB0_NUM] = {
+ {0x200, 31}, {0x204, 8}, {0x208, 6}, {0x20c, 31}, {0x210, 4}, {0x214, 1}, {0x218, 31},
+ {0x21c, 31},
+ {0x220, 2}, {0x224, 1}, {0x228, 3}, {0x100, 5}, {0x10c, 5}, {0x118, 5}, {0x11c, 5}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_init[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_init[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 4}, {0x208, 2}, {0x20c, 2}, {0x210, 2}, {0x214, 1}, {0x218, 2}, {0x21c,
+ 2},
+ {0x220, 2}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 2}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_init[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING init_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_INITSETTING_LARB0_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init, smi_profile_setting_larb3_init}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VR_NUM SMI_LARB_NR
+/* vr_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vr[SMI_PROFILE_SETTING_COMMON_VR_NUM] = {
+ {0, 0x1393}, {0, 0x1000}, {0, 0x1205}, {0, 0x11D4}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vr[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0xe}, {0x204, 8}, {0x208, 4}, {0x20c, 0xe}, {0x210, 4}, {0x214, 1}, {0x218, 0xe},
+ {0x21c, 0xe},
+ {0x220, 2}, {0x224, 1}, {0x228, 2}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vr[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vr[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 4}, {0x208, 2}, {0x20c, 2}, {0x210, 2}, {0x214, 1}, {0x218, 2}, {0x21c,
+ 2},
+ {0x220, 2}, {0x224, 1}, {0x228, 1}, {0x22c, 2}, {0x230, 1}, {0x234, 2}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vr[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 2}, {0x228, 1}, {0x22c, 1}, {0x230, 4}
+};
+
+struct SMI_SETTING vr_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VP_NUM SMI_LARB_NR
+/* vp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vp[SMI_PROFILE_SETTING_COMMON_VP_NUM] = {
+ {0, 0x1510}, {0, 0x1169}, {0, 0x1000}, {0, 0x11CE}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0xc}, {0x204, 8}, {0x208, 4}, {0x20c, 0xc}, {0x210, 4}, {0x214, 2}, {0x218, 0xc},
+ {0x21c, 0xc},
+ {0x220, 2}, {0x224, 1}, {0x228, 3}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 5}, {0x204, 2}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vp[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}, {0x234, 1}, {0x238, 1}, {0x23c,
+ 1},
+ {0x240, 1}, {0x244, 1}, {0x248, 1}, {0x24c, 1}, {0x250, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb3_vp[SMI_LARB3_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 2}, {0x228, 1}, {0x22c, 1}, {0x230, 4}
+};
+
+struct SMI_SETTING vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+/* vr series */
+struct SMI_SETTING icfp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+struct SMI_SETTING vr_slow_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+struct SMI_SETTING vss_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+struct SMI_SETTING venc_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr,
+ smi_profile_setting_larb3_vr}
+};
+
+/* vp series */
+struct SMI_SETTING vpwfd_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+struct SMI_SETTING swdec_vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp,
+ smi_profile_setting_larb3_vp}
+};
+
+/* init seris */
+struct SMI_SETTING mm_gpu_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_INITSETTING_LARB0_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM, SMI_LARB3_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init, smi_profile_setting_larb3_init}
+};
+
+struct SMI_SETTING ui_idle_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi4k_setting_config = { 0, NULL, {0}, {0} };
+
+#elif defined(SMI_D2)
+
+unsigned long smi_common_l1arb_offset[SMI_LARB_NR] = {
+ REG_OFFSET_SMI_L1ARB0, REG_OFFSET_SMI_L1ARB1, REG_OFFSET_SMI_L1ARB2
+};
+
+unsigned int smi_dbg_disp_mask = 1;
+unsigned int smi_dbg_vdec_mask = 2;
+unsigned int smi_dbg_imgsys_mask = 4;
+unsigned int smi_dbg_venc_mask = 4;
+unsigned int smi_dbg_mjc_mask = 0;
+
+unsigned long smi_larb0_debug_offset[SMI_LARB0_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb1_debug_offset[SMI_LARB1_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb2_debug_offset[SMI_LARB2_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_common_debug_offset[SMI_COMMON_DEBUG_OFFSET_NUM] = {
+ 0x100, 0x104, 0x108, 0x10C, 0x110, 0x114, 0x220, 0x230, 0x234, 0x238, 0x400, 0x404, 0x408,
+ 0x40C, 0x430, 0x440
+};
+
+int smi_larb_debug_offset_num[SMI_LARB_NR] = {
+ SMI_LARB0_DEBUG_OFFSET_NUM, SMI_LARB1_DEBUG_OFFSET_NUM, SMI_LARB2_DEBUG_OFFSET_NUM
+};
+
+unsigned long *smi_larb_debug_offset[SMI_LARB_NR] = {
+ smi_larb0_debug_offset, smi_larb1_debug_offset, smi_larb2_debug_offset
+};
+
+#define SMI_VC_SETTING_NUM SMI_LARB_NR
+struct SMI_SETTING_VALUE smi_vc_setting[SMI_VC_SETTING_NUM] = {
+ {0x20, 0}, {0x20, 2}, {0x20, 1}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_INIT_NUM 6
+/* init_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_init[SMI_PROFILE_SETTING_COMMON_INIT_NUM] = {
+ {0, 0}, {0, 0}, {0, 0},
+ {0x100, 0xb},
+ {0x234, (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
+ + (0x4 << 10) + (0x4 << 5) + 0x5},
+ {0x230, (0x7 + (0x8 << 3) + (0x7 << 8))}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_init[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0x1f}, {0x204, 0x1f}, {0x208, 4}, {0x20c, 6}, {0x210, 4}, {0x214, 1}, {0x218, 1},
+ {0x21c, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_init[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_init[SMI_LARB2_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING init_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_ICFP_NUM SMI_LARB_NR
+/* icfp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_icfp[SMI_PROFILE_SETTING_COMMON_ICFP_NUM] = {
+ {0, 0x11da}, {0, 0x1000}, {0, 0x1318}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_icfp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 6}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_icfp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_icfp[SMI_LARB2_PORT_NUM] = {
+ {0x200, 8}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 2}, {0x214, 4}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING icfp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_ICFP_NUM, smi_profile_setting_common_icfp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_icfp, smi_profile_setting_larb1_icfp,
+ smi_profile_setting_larb2_icfp}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VR_NUM SMI_LARB_NR
+/* vr_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vr[SMI_PROFILE_SETTING_COMMON_VR_NUM] = {
+ {0, 0x11ff}, {0, 0x1000}, {0, 0x1361}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vr[SMI_LARB0_PORT_NUM] = {
+ {0x200, 6}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vr[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vr[SMI_LARB2_PORT_NUM] = {
+ {0x200, 8}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 4}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 2}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING vr_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VP_NUM SMI_LARB_NR
+/* vp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vp[SMI_PROFILE_SETTING_COMMON_VP_NUM] = {
+ {0, 0x11ff}, {0, 0}, {0, 0x1361}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 8}, {0x204, 8}, {0x208, 1}, {0x20c, 1}, {0x210, 3}, {0x214, 1}, {0x218, 4}, {0x21c,
+ 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 0xb}, {0x204, 0xe}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vp[SMI_LARB2_PORT_NUM] = {
+ {0x200, 8}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 4}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 2}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VPWFD_NUM SMI_LARB_NR
+/* vfd_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vpwfd[SMI_PROFILE_SETTING_COMMON_VPWFD_NUM] = {
+ {0, 0x11ff}, {0, 0}, {0, 0x1361}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vpwfd[SMI_LARB0_PORT_NUM] = {
+ {0x200, 8}, {0x204, 8}, {0x208, 1}, {0x20c, 1}, {0x210, 3}, {0x214, 1}, {0x218, 4}, {0x21c,
+ 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vpwfd[SMI_LARB1_PORT_NUM] = {
+ {0x200, 0xb}, {0x204, 0xe}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb2_vpwfd[SMI_LARB2_PORT_NUM] = {
+ {0x200, 8}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 4}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 2}, {0x22c, 1}, {0x230, 1}
+};
+
+struct SMI_SETTING vpwfd_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VPWFD_NUM, smi_profile_setting_common_vpwfd,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_vpwfd, smi_profile_setting_larb1_vpwfd,
+ smi_profile_setting_larb2_vpwfd}
+};
+
+/* vp series */
+struct SMI_SETTING swdec_vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp, smi_profile_setting_larb2_vp}
+};
+
+/* vr series */
+struct SMI_SETTING vr_slow_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr}
+};
+
+struct SMI_SETTING venc_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr, smi_profile_setting_larb2_vr}
+};
+
+/* init series */
+struct SMI_SETTING mm_gpu_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init,
+ smi_profile_setting_larb2_init}
+};
+struct SMI_SETTING vss_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING ui_idle_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi4k_setting_config = { 0, NULL, {0}, {0} };
+
+#elif defined(SMI_R)
+unsigned int smi_dbg_disp_mask = 1;
+unsigned int smi_dbg_vdec_mask = 0;
+unsigned int smi_dbg_imgsys_mask = 2;
+unsigned int smi_dbg_venc_mask = 2;
+unsigned int smi_dbg_mjc_mask = 0;
+
+unsigned long smi_common_l1arb_offset[SMI_LARB_NR] = {
+ REG_OFFSET_SMI_L1ARB0, REG_OFFSET_SMI_L1ARB1
+};
+
+unsigned long smi_larb0_debug_offset[SMI_LARB0_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_larb1_debug_offset[SMI_LARB1_DEBUG_OFFSET_NUM] = {
+ 0x0, 0x8, 0x10, 0x24, 0x50, 0x60, 0xa0, 0xa4, 0xa8, 0xac, 0xb0, 0xb4, 0xb8, 0xbc, 0xc0,
+ 0xc8,
+ 0xcc, 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x218, 0x21c, 0x220, 0x224, 0x228, 0x22c,
+ 0x230,
+ 0x234, 0x238, 0x23c, 0x240, 0x244, 0x248, 0x24c, 0x280, 0x284, 0x288, 0x28c, 0x290, 0x294,
+ 0x298,
+ 0x29c, 0x2a0, 0x2a4, 0x2a8, 0x2ac, 0x2b0, 0x2b4, 0x2b8, 0x2bc, 0x2c0, 0x2c4, 0x2c8, 0x2cc,
+ 0x2d0,
+ 0x2d4, 0x2d8, 0x2dc, 0x2e0, 0x2e4, 0x2e8, 0x2ec, 0x2f0, 0x2f4, 0x2f8, 0x2fc
+};
+
+unsigned long smi_common_debug_offset[SMI_COMMON_DEBUG_OFFSET_NUM] = {
+ 0x100, 0x104, 0x108, 0x10C, 0x110, 0x114, 0x220, 0x230, 0x234, 0x238, 0x400, 0x404, 0x408,
+ 0x40C, 0x430, 0x440
+};
+
+int smi_larb_debug_offset_num[SMI_LARB_NR] = {
+ SMI_LARB0_DEBUG_OFFSET_NUM, SMI_LARB1_DEBUG_OFFSET_NUM
+};
+
+unsigned long *smi_larb_debug_offset[SMI_LARB_NR] = {
+ smi_larb0_debug_offset, smi_larb1_debug_offset
+};
+
+#define SMI_VC_SETTING_NUM SMI_LARB_NR
+struct SMI_SETTING_VALUE smi_vc_setting[SMI_VC_SETTING_NUM] = {
+ {0x20, 0}, {0x20, 2}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_INIT_NUM 5
+/* init_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_init[SMI_PROFILE_SETTING_COMMON_INIT_NUM] = {
+ {0, 0x14cb}, {0, 0x1001},
+ {0x100, 0xb},
+ {0x234,
+ (0x1 << 31) + (0x1d << 26) + (0x1f << 21) + (0x0 << 20) + (0x3 << 15)
+ + (0x4 << 10) + (0x4 << 5) + 0x5},
+ {0x230, (0x3 + (0x8 << 2) + (0x7 << 7))}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_init[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0x1c}, {0x204, 4}, {0x208, 6}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_init[SMI_LARB1_PORT_NUM] = {
+ {0x200, 1}, {0x204, 1}, {0x208, 1}, {0x20c, 1}, {0x210, 1}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 1}, {0x224, 1}, {0x228, 1}
+};
+
+struct SMI_SETTING init_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VR_NUM SMI_LARB_NR
+/* vr_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vr[SMI_PROFILE_SETTING_COMMON_VR_NUM] = {
+ {0, 0x122b}, {0, 0x142c}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vr[SMI_LARB0_PORT_NUM] = {
+ {0x200, 0xa}, {0x204, 1}, {0x208, 1}, {0x20c, 4}, {0x210, 2}, {0x214, 2}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vr[SMI_LARB1_PORT_NUM] = {
+ {0x200, 8}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 4}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 3}, {0x224, 2}, {0x228, 2}
+};
+
+struct SMI_SETTING vr_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr}
+};
+
+#define SMI_PROFILE_SETTING_COMMON_VP_NUM SMI_LARB_NR
+/* vp_setting */
+struct SMI_SETTING_VALUE smi_profile_setting_common_vp[SMI_PROFILE_SETTING_COMMON_VP_NUM] = {
+ {0, 0x11ff}, {0, 0}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb0_vp[SMI_LARB0_PORT_NUM] = {
+ {0x200, 8}, {0x204, 1}, {0x208, 1}, {0x20c, 3}, {0x210, 1}, {0x214, 4}, {0x218, 1}
+};
+
+struct SMI_SETTING_VALUE smi_profile_setting_larb1_vp[SMI_LARB1_PORT_NUM] = {
+ {0x200, 8}, {0x204, 6}, {0x208, 1}, {0x20c, 1}, {0x210, 4}, {0x214, 1}, {0x218, 1}, {0x21c,
+ 1},
+ {0x220, 3}, {0x224, 2}, {0x228, 2}
+};
+
+struct SMI_SETTING vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp}
+};
+
+/* vp series */
+struct SMI_SETTING swdec_vp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VP_NUM, smi_profile_setting_common_vp,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_vp, smi_profile_setting_larb1_vp}
+};
+
+/* vr series */
+struct SMI_SETTING vr_slow_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr}
+};
+
+struct SMI_SETTING icfp_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr}
+};
+
+struct SMI_SETTING venc_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_VR_NUM, smi_profile_setting_common_vr,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_vr, smi_profile_setting_larb1_vr}
+};
+
+/* init series */
+struct SMI_SETTING mm_gpu_setting_config = {
+ SMI_PROFILE_SETTING_COMMON_INIT_NUM, smi_profile_setting_common_init,
+ {SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM},
+ {smi_profile_setting_larb0_init, smi_profile_setting_larb1_init}
+};
+
+struct SMI_SETTING vss_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING vpwfd_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING ui_idle_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi_setting_config = { 0, NULL, {0}, {0} };
+struct SMI_SETTING hdmi4k_setting_config = { 0, NULL, {0}, {0} };
+#endif
+
+struct SMI_PROFILE_CONFIG smi_profile_config[SMI_PROFILE_CONFIG_NUM] = {
+ {SMI_BWC_SCEN_NORMAL, &init_setting_config},
+ {SMI_BWC_SCEN_VR, &vr_setting_config},
+ {SMI_BWC_SCEN_SWDEC_VP, &swdec_vp_setting_config},
+ {SMI_BWC_SCEN_VP, &vp_setting_config},
+ {SMI_BWC_SCEN_VR_SLOW, &vr_slow_setting_config},
+ {SMI_BWC_SCEN_MM_GPU, &mm_gpu_setting_config},
+ {SMI_BWC_SCEN_WFD, &vpwfd_setting_config},
+ {SMI_BWC_SCEN_VENC, &venc_setting_config},
+ {SMI_BWC_SCEN_ICFP, &icfp_setting_config},
+ {SMI_BWC_SCEN_UI_IDLE, &ui_idle_setting_config},
+ {SMI_BWC_SCEN_VSS, &vss_setting_config},
+ {SMI_BWC_SCEN_FORCE_MMDVFS, &init_setting_config},
+ {SMI_BWC_SCEN_HDMI, &hdmi_setting_config},
+ {SMI_BWC_SCEN_HDMI4K, &hdmi4k_setting_config}
+};
+
+void smi_set_nonconstant_variable(void)
+{
+#if defined(SMI_D2)
+ int i = 0;
+
+ for (i = 0; i < SMI_LARB_NR; ++i) {
+ smi_profile_setting_common_init[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_init[i].value = default_val_smi_l1arb[i];
+ smi_profile_setting_common_icfp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vr[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vpwfd[i].offset = smi_common_l1arb_offset[i];
+ }
+
+ smi_profile_setting_common_vp[1].value = default_val_smi_l1arb[1];
+ smi_profile_setting_common_vpwfd[1].value = default_val_smi_l1arb[1];
+
+#elif defined(SMI_D1)
+ int i = 0;
+
+ M4U_WriteReg32(LARB2_BASE, 0x24, (M4U_ReadReg32(LARB2_BASE, 0x24) & 0xf7ffffff));
+ for (i = 0; i < SMI_LARB_NR; ++i) {
+ smi_profile_setting_common_vr[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_init[i].offset = smi_common_l1arb_offset[i];
+ }
+ smi_profile_setting_common_init[0].value = default_val_smi_l1arb[0];
+
+#elif defined(SMI_D3)
+ int i = 0;
+
+ M4U_WriteReg32(LARB2_BASE, 0x24, (M4U_ReadReg32(LARB2_BASE, 0x24) & 0xf7ffffff));
+ for (i = 0; i < SMI_LARB_NR; ++i) {
+ smi_profile_setting_common_vr[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_icfp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vpwfd[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_init[i].offset = smi_common_l1arb_offset[i];
+ }
+ smi_profile_setting_common_init[0].value = default_val_smi_l1arb[0];
+
+#elif defined(SMI_J)
+ unsigned int smi_val = 0;
+ int i = 0;
+
+ smi_val = (M4U_ReadReg32(LARB0_BASE, 0x24) & 0xf7ffffff);
+ for (i = 0; i < SMI_LARB_NR; ++i)
+ smi_isp_hrt_setting[i].value = smi_val;
+
+ for (i = 0; i < SMI_LARB_NR; ++i) {
+ smi_profile_setting_common_vr[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_init[i].offset = smi_common_l1arb_offset[i];
+ }
+#elif defined(SMI_R)
+ int i = 0;
+
+ for (i = 0; i < SMI_LARB_NR; ++i) {
+ smi_profile_setting_common_vr[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_vp[i].offset = smi_common_l1arb_offset[i];
+ smi_profile_setting_common_init[i].offset = smi_common_l1arb_offset[i];
+ }
+ smi_profile_setting_common_vp[1].offset = smi_common_l1arb_offset[1];
+#endif
+}
diff --git a/drivers/misc/mediatek/smi/smi_configuration.h b/drivers/misc/mediatek/smi/smi_configuration.h
new file mode 100644
index 000000000..92579476b
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_configuration.h
@@ -0,0 +1,53 @@
+#ifndef _SMI_CONFIGURATION_H_
+#define _SMI_CONFIGURATION_H_
+
+#include "smi_reg.h"
+#include <mach/mt_smi.h>
+/* ***********debug parameters*********** */
+
+#define SMI_COMMON_DEBUG_OFFSET_NUM 16
+#define SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM 69
+
+
+#if defined(SMI_D1) || defined(SMI_D3) || defined(SMI_J)
+#define SMI_LARB0_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+#define SMI_LARB1_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+#define SMI_LARB2_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+#define SMI_LARB3_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+
+#elif defined(SMI_D2)
+#define SMI_LARB0_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+#define SMI_LARB1_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+#define SMI_LARB2_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+
+#elif defined(SMI_R)
+#define SMI_LARB0_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+#define SMI_LARB1_DEBUG_OFFSET_NUM SMI_LARB_DEFAULT_DEBUG_OFFSET_NUM
+
+#endif
+
+
+struct SMI_SETTING_VALUE {
+ unsigned int offset;
+ int value;
+};
+
+struct SMI_SETTING {
+ unsigned int smi_common_reg_num;
+ struct SMI_SETTING_VALUE *smi_common_setting_vals;
+ unsigned int smi_larb_reg_num[SMI_LARB_NR];
+ struct SMI_SETTING_VALUE *smi_larb_setting_vals[SMI_LARB_NR];
+};
+
+struct SMI_PROFILE_CONFIG {
+ int smi_profile;
+ struct SMI_SETTING *setting;
+};
+
+#define SMI_PROFILE_CONFIG_NUM SMI_BWC_SCEN_CNT
+
+extern unsigned long smi_common_debug_offset[SMI_COMMON_DEBUG_OFFSET_NUM];
+extern int smi_larb_debug_offset_num[SMI_LARB_NR];
+extern unsigned long *smi_larb_debug_offset[SMI_LARB_NR];
+
+#endif
diff --git a/drivers/misc/mediatek/smi/smi_debug.c b/drivers/misc/mediatek/smi/smi_debug.c
new file mode 100644
index 000000000..6931b758f
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_debug.c
@@ -0,0 +1,348 @@
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/aee.h>
+#include <linux/timer.h>
+/* #include <asm/system.h> */
+#include <asm-generic/irq_regs.h>
+/* #include <asm/mach/map.h> */
+#include <mach/sync_write.h>
+/*#include <mach/irqs.h>*/
+#include <asm/cacheflush.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/fb.h>
+#include <linux/debugfs.h>
+#include <m4u.h>
+#include <mach/mt_smi.h>
+
+#include "smi_common.h"
+#include "smi_reg.h"
+#include "smi_debug.h"
+#include "smi_configuration.h"
+
+#define SMI_LOG_TAG "smi"
+
+#if !defined(CONFIG_MTK_LEGACY)
+#define SMI_INTERNAL_CCF_SUPPORT
+#endif
+
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+#include <mach/mt_clkmgr.h>
+#endif
+
+
+/* Debug Function */
+static void smi_dump_format(unsigned long base, unsigned int from, unsigned int to);
+static void smi_dumpper(int output_gce_buffer, unsigned long *offset, unsigned long base, int reg_number)
+{
+ int num_of_set = 3;
+ int remain_runtimes = 0;
+ int runtimes = 0;
+ int i = 0;
+
+ remain_runtimes = reg_number % num_of_set;
+ runtimes = reg_number / num_of_set;
+ runtimes = runtimes * 3;
+
+ do {
+ SMIMSG3(output_gce_buffer, "[0x%lx,0x%lx,0x%lx]=[0x%x,0x%x,0x%x]\n",
+ offset[i], offset[i + 1], offset[i + 2],
+ M4U_ReadReg32(base, offset[i]), M4U_ReadReg32(base, offset[i + 1]),
+ M4U_ReadReg32(base, offset[i + 2]));
+ i += 3;
+ } while (i < runtimes);
+
+ switch (remain_runtimes) {
+ case 2:
+ SMIMSG3(output_gce_buffer, "[0x%lx,0x%lx]=[0x%x,0x%x]\n",
+ offset[i], offset[i + 1],
+ M4U_ReadReg32(base, offset[i]), M4U_ReadReg32(base, offset[i + 1]));
+ break;
+ case 1:
+ SMIMSG3(output_gce_buffer, "[0x%lx]=[0x%x]\n",
+ offset[i], M4U_ReadReg32(base, offset[i]));
+ break;
+ default:
+ break;
+ }
+}
+
+void smi_dumpCommonDebugMsg(int output_gce_buffer)
+{
+ unsigned long u4Base;
+
+ /* No verify API in CCF, assume clk is always on */
+ int smiCommonClkEnabled = 1;
+
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+ smiCommonClkEnabled = clock_is_on(MT_CG_DISP0_SMI_COMMON);
+#endif /* !defined (SMI_INTERNAL_CCF_SUPPORT) */
+
+ /* SMI COMMON dump */
+ if ((!smiCommonClkEnabled)) {
+ SMIMSG3(output_gce_buffer, "===SMI common clock is disabled===\n");
+ return;
+ }
+
+ SMIMSG3(output_gce_buffer, "===SMI common reg dump, CLK: %d===\n", smiCommonClkEnabled);
+
+ u4Base = SMI_COMMON_EXT_BASE;
+ smi_dumpper(output_gce_buffer, smi_common_debug_offset, u4Base,
+ SMI_COMMON_DEBUG_OFFSET_NUM);
+}
+
+void smi_dumpLarbDebugMsg(unsigned int u4Index, int output_gce_buffer)
+{
+ unsigned long u4Base = 0;
+ /* No verify API in CCF, assume clk is always on */
+ int larbClkEnabled = 1;
+
+ u4Base = get_larb_base_addr(u4Index);
+#if !defined(SMI_INTERNAL_CCF_SUPPORT)
+ larbClkEnabled = smi_larb_clock_is_on(u4Index);
+#endif
+
+ if (u4Base == SMI_ERROR_ADDR) {
+ SMIMSG3(output_gce_buffer, "Doesn't support reg dump for Larb%d\n", u4Index);
+ return;
+ } else if ((larbClkEnabled != 0)) {
+ SMIMSG3(output_gce_buffer, "===SMI LARB%d reg dump, CLK: %d===\n", u4Index,
+ larbClkEnabled);
+
+ smi_dumpper(output_gce_buffer, smi_larb_debug_offset[u4Index], u4Base,
+ smi_larb_debug_offset_num[u4Index]);
+
+ } else {
+ SMIMSG3(output_gce_buffer, "===SMI LARB%d clock is disabled===\n", u4Index);
+ }
+
+}
+
+void smi_dumpLarb(unsigned int index)
+{
+ unsigned long u4Base;
+
+ u4Base = get_larb_base_addr(index);
+
+ if (u4Base == SMI_ERROR_ADDR) {
+ SMIMSG2("Doesn't support reg dump for Larb%d\n", index);
+ } else {
+ SMIMSG2("===SMI LARB%d reg dump base 0x%lx===\n", index, u4Base);
+ smi_dump_format(u4Base, 0, 0x434);
+ smi_dump_format(u4Base, 0xF00, 0xF0C);
+ }
+}
+
+void smi_dumpCommon(void)
+{
+ SMIMSG2("===SMI COMMON reg dump base 0x%lx===\n", SMI_COMMON_EXT_BASE);
+
+ smi_dump_format(SMI_COMMON_EXT_BASE, 0x1A0, 0x444);
+}
+
+static void smi_dump_format(unsigned long base, unsigned int from, unsigned int to)
+{
+ int i, j, left;
+ unsigned int value[8];
+
+ for (i = from; i <= to; i += 32) {
+ for (j = 0; j < 8; j++)
+ value[j] = M4U_ReadReg32(base, i + j * 4);
+
+ SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i, value[0], value[1],
+ value[2], value[3], value[4], value[5], value[6], value[7]);
+ }
+
+ left = ((from - to) / 4 + 1) % 8;
+
+ if (left) {
+ memset(value, 0, 8 * sizeof(unsigned int));
+
+ for (j = 0; j < left; j++)
+ value[j] = M4U_ReadReg32(base, i - 32 + j * 4);
+
+ SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i - 32 + j * 4, value[0],
+ value[1], value[2], value[3], value[4], value[5], value[6], value[7]);
+ }
+}
+
+void smi_dumpDebugMsg(void)
+{
+ unsigned int u4Index;
+
+ /* SMI COMMON dump, 0 stands for not pass log to CMDQ error dumping messages */
+ smi_dumpCommonDebugMsg(0);
+
+ /* dump all SMI LARB */
+ /* SMI Larb dump, 0 stands for not pass log to CMDQ error dumping messages */
+ for (u4Index = 0; u4Index < SMI_LARB_NR; u4Index++)
+ smi_dumpLarbDebugMsg(u4Index, 0);
+}
+
+int smi_debug_bus_hanging_detect(unsigned int larbs, int show_dump)
+{
+ return smi_debug_bus_hanging_detect_ext(larbs, show_dump, 0);
+}
+
+static int get_status_code(int smi_larb_clk_status, int smi_larb_busy_count,
+ int smi_common_busy_count)
+{
+ int status_code = 0;
+
+ if (smi_larb_clk_status != 0) {
+ if (smi_larb_busy_count == 5) { /* The larb is always busy */
+ if (smi_common_busy_count == 5) /* smi common is always busy */
+ status_code = 1;
+ else if (smi_common_busy_count == 0) /* smi common is always idle */
+ status_code = 2;
+ else
+ status_code = 5; /* smi common is sometimes busy and idle */
+ } else if (smi_larb_busy_count == 0) { /* The larb is always idle */
+ if (smi_common_busy_count == 5) /* smi common is always busy */
+ status_code = 3;
+ else if (smi_common_busy_count == 0) /* smi common is always idle */
+ status_code = 4;
+ else
+ status_code = 6; /* smi common is sometimes busy and idle */
+ } else { /* sometime the larb is busy */
+ if (smi_common_busy_count == 5) /* smi common is always busy */
+ status_code = 7;
+ else if (smi_common_busy_count == 0) /* smi common is always idle */
+ status_code = 8;
+ else
+ status_code = 9; /* smi common is sometimes busy and idle */
+ }
+ } else {
+ status_code = 10;
+ }
+ return status_code;
+}
+
+int smi_debug_bus_hanging_detect_ext(unsigned int larbs, int show_dump, int output_gce_buffer)
+{
+/* output_gce_buffer = 1, write log into kernel log and CMDQ buffer. */
+/* dual_buffer = 0, write log into kernel log only */
+ int i = 0;
+ int dump_time = 0;
+ int is_smi_issue = 0;
+ int status_code = 0;
+ /* Keep the dump result */
+ unsigned char smi_common_busy_count = 0;
+ unsigned int u4Index = 0;
+ unsigned long u4Base = 0;
+
+ volatile unsigned int reg_temp = 0;
+ unsigned char smi_larb_busy_count[SMI_LARB_NR] = { 0 };
+ unsigned char smi_larb_mmu_status[SMI_LARB_NR] = { 0 };
+ int smi_larb_clk_status[SMI_LARB_NR] = { 0 };
+ /* dump resister and save resgister status */
+ for (dump_time = 0; dump_time < 5; dump_time++) {
+ reg_temp = M4U_ReadReg32(SMI_COMMON_EXT_BASE, 0x440);
+ if ((reg_temp & (1 << 0)) == 0) {
+ /* smi common is busy */
+ smi_common_busy_count++;
+ }
+ /* Dump smi common regs */
+ if (show_dump != 0)
+ smi_dumpCommonDebugMsg(output_gce_buffer);
+
+ for (u4Index = 0; u4Index < SMI_LARB_NR; u4Index++) {
+ u4Base = get_larb_base_addr(u4Index);
+
+ smi_larb_clk_status[u4Index] = smi_larb_clock_is_on(u4Index);
+ /* check larb clk is enable */
+ if (smi_larb_clk_status[u4Index] != 0) {
+ if (u4Base != SMI_ERROR_ADDR) {
+ reg_temp = M4U_ReadReg32(u4Base, 0x0);
+ if (reg_temp != 0) {
+ /* Larb is busy */
+ smi_larb_busy_count[u4Index]++;
+ }
+ smi_larb_mmu_status[u4Index] = M4U_ReadReg32(u4Base, 0xa0);
+ if (show_dump != 0)
+ smi_dumpLarbDebugMsg(u4Index, output_gce_buffer);
+ }
+ }
+
+ }
+
+ /* Show the checked result */
+ for (i = 0; i < SMI_LARB_NR; i++) { /* Check each larb */
+ if (SMI_DGB_LARB_SELECT(larbs, i)) {
+ /* larb i has been selected */
+ /* Get status code */
+ status_code = get_status_code(smi_larb_clk_status[i], smi_larb_busy_count[i],
+ smi_common_busy_count);
+
+ /* Send the debug message according to the final result */
+ switch (status_code) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ case 8:
+ SMIMSG3(output_gce_buffer,
+ "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine's state first\n",
+ i, smi_larb_busy_count[i], smi_common_busy_count,
+ status_code);
+ SMIMSG3(output_gce_buffer,
+ "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
+ i);
+ break;
+ case 2:
+ if (smi_larb_mmu_status[i] == 0) {
+ SMIMSG3(output_gce_buffer,
+ "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine state first\n",
+ i, smi_larb_busy_count[i],
+ smi_common_busy_count, status_code);
+ SMIMSG3(output_gce_buffer,
+ "If the engine is waiting for Larb%ds' response, it needs SMI HW's check\n",
+ i);
+ } else {
+ SMIMSG3(output_gce_buffer,
+ "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> MMU port config error\n",
+ i, smi_larb_busy_count[i],
+ smi_common_busy_count, status_code);
+ is_smi_issue = 1;
+ }
+ break;
+ case 4:
+ case 6:
+ case 9:
+ SMIMSG3(output_gce_buffer,
+ "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> not SMI issue\n",
+ i, smi_larb_busy_count[i], smi_common_busy_count,
+ status_code);
+ break;
+ case 10:
+ SMIMSG3(output_gce_buffer,
+ "Larb%d clk is disbable, status=%d ==> no need to check\n",
+ i, status_code);
+ break;
+ default:
+ SMIMSG3(output_gce_buffer,
+ "Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> status unknown\n",
+ i, smi_larb_busy_count[i], smi_common_busy_count,
+ status_code);
+ break;
+ }
+ }
+
+ }
+
+ }
+ return is_smi_issue;
+}
diff --git a/drivers/misc/mediatek/smi/smi_debug.h b/drivers/misc/mediatek/smi/smi_debug.h
new file mode 100644
index 000000000..02fd0293b
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_debug.h
@@ -0,0 +1,34 @@
+#ifndef _SMI_DEBUG_H_
+#define _SMI_DEBUG_H_
+
+
+#define SMI_DBG_DISPSYS (smi_dbg_disp_mask)
+#define SMI_DBG_VDEC (smi_dbg_vdec_mask)
+#define SMI_DBG_IMGSYS (smi_dbg_imgsys_mask)
+#define SMI_DBG_VENC (smi_dbg_venc_mask)
+#define SMI_DBG_MJC (smi_dbg_mjc_mask)
+
+#define SMI_DGB_LARB_SELECT(smi_dbg_larb, n) ((smi_dbg_larb) & (1<<n))
+
+#ifndef CONFIG_MTK_SMI_EXT
+#define smi_debug_bus_hanging_detect(larbs, show_dump) {}
+#define smi_debug_bus_hanging_detect_ext(larbs, show_dump, output_gce_buffer) {}
+#else
+int smi_debug_bus_hanging_detect(unsigned int larbs, int show_dump);
+ /* output_gce_buffer = 1, pass log to CMDQ error dumping messages */
+int smi_debug_bus_hanging_detect_ext(unsigned int larbs, int show_dump, int output_gce_buffer);
+
+#endif
+void smi_dumpCommonDebugMsg(int output_gce_buffer);
+void smi_dumpLarbDebugMsg(unsigned int u4Index, int output_gce_buffer);
+void smi_dumpDebugMsg(void);
+
+extern int smi_larb_clock_is_on(unsigned int larb_index);
+
+extern unsigned int smi_dbg_disp_mask;
+extern unsigned int smi_dbg_vdec_mask;
+extern unsigned int smi_dbg_imgsys_mask;
+extern unsigned int smi_dbg_venc_mask;
+extern unsigned int smi_dbg_mjc_mask;
+
+#endif /* _SMI_DEBUG_H__ */
diff --git a/drivers/misc/mediatek/smi/smi_info_util.c b/drivers/misc/mediatek/smi/smi_info_util.c
new file mode 100644
index 000000000..6612dd5cf
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_info_util.c
@@ -0,0 +1,86 @@
+#include <asm/io.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include "smi_info_util.h"
+#include "smi_common.h"
+
+int smi_set_mm_info_ioctl_wrapper(struct file *pFile, unsigned int cmd, unsigned long param)
+{
+ int ret = 0;
+ MTK_SMI_BWC_INFO_SET cfg;
+
+ ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_INFO_SET));
+ if (ret) {
+ SMIMSG(" MTK_IOC_SMI_BWC_INFO_SET, copy_to_user failed: %d\n", ret);
+ return -EFAULT;
+ }
+ /* Set the address to the value assigned by user space program */
+ smi_bwc_mm_info_set(cfg.property, cfg.value1, cfg.value2);
+ /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... finish"); */
+ return ret;
+}
+
+
+int smi_get_mm_info_ioctl_wrapper(struct file *pFile, unsigned int cmd, unsigned long param)
+{
+ int ret = 0;
+
+ ret = copy_to_user((void *)param, (void *)&g_smi_bwc_mm_info, sizeof(MTK_SMI_BWC_MM_INFO));
+
+ if (ret) {
+ SMIMSG(" MTK_IOC_SMI_BWC_INFO_GET, copy_to_user failed: %d\n", ret);
+ return -EFAULT;
+ }
+ /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_GET request... finish"); */
+ return ret;
+}
+
+
+void smi_bwc_mm_info_set(int property_id, long val1, long val2)
+{
+
+ switch (property_id) {
+ case SMI_BWC_INFO_CON_PROFILE:
+ g_smi_bwc_mm_info.concurrent_profile = (int)val1;
+ break;
+ case SMI_BWC_INFO_SENSOR_SIZE:
+ g_smi_bwc_mm_info.sensor_size[0] = val1;
+ g_smi_bwc_mm_info.sensor_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_VIDEO_RECORD_SIZE:
+ g_smi_bwc_mm_info.video_record_size[0] = val1;
+ g_smi_bwc_mm_info.video_record_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_DISP_SIZE:
+ g_smi_bwc_mm_info.display_size[0] = val1;
+ g_smi_bwc_mm_info.display_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_TV_OUT_SIZE:
+ g_smi_bwc_mm_info.tv_out_size[0] = val1;
+ g_smi_bwc_mm_info.tv_out_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_FPS:
+ g_smi_bwc_mm_info.fps = (int)val1;
+ break;
+ case SMI_BWC_INFO_VIDEO_ENCODE_CODEC:
+ g_smi_bwc_mm_info.video_encode_codec = (int)val1;
+#if defined(SMI_J)
+ /* AVC @ 60 needs HPM */
+ /*
+ if (g_smi_bwc_mm_info.video_encode_codec == 2) {
+ int is_smvr = 0;
+ spin_lock(&g_SMIInfo.SMI_lock);
+ is_smvr = g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR_SLOW] ? 1 : 0;
+ spin_unlock(&g_SMIInfo.SMI_lock);
+ if (is_smvr)
+ mmdvfs_notify_scenario_enter(SMI_BWC_SCEN_VR_SLOW);
+ }
+ */
+#endif
+ break;
+ case SMI_BWC_INFO_VIDEO_DECODE_CODEC:
+ g_smi_bwc_mm_info.video_decode_codec = (int)val1;
+ break;
+ }
+}
diff --git a/drivers/misc/mediatek/smi/smi_info_util.h b/drivers/misc/mediatek/smi/smi_info_util.h
new file mode 100644
index 000000000..30eb18abd
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_info_util.h
@@ -0,0 +1,13 @@
+#ifndef __SMI_INFO_UTIL_H__
+#define __SMI_INFO_UTIL_H__
+
+#include <asm/io.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <mach/mt_smi.h>
+int smi_set_mm_info_ioctl_wrapper(struct file *pFile, unsigned int cmd, unsigned long param);
+int smi_get_mm_info_ioctl_wrapper(struct file *pFile, unsigned int cmd, unsigned long param);
+void smi_bwc_mm_info_set(int property_id, long val1, long val2);
+extern MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info;
+
+#endif /* __SMI_INFO_UTIL_H__ */
diff --git a/drivers/misc/mediatek/smi/smi_internal.c b/drivers/misc/mediatek/smi/smi_internal.c
new file mode 100644
index 000000000..1fb786968
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_internal.c
@@ -0,0 +1,61 @@
+#include <asm/io.h>
+/* Define SMI_INTERNAL_CCF_SUPPORT when CCF needs to be enabled */
+#if !defined(CONFIG_MTK_LEGACY)
+#define SMI_INTERNAL_CCF_SUPPORT
+#endif
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+#include <linux/clk.h>
+/* for ccf clk CB */
+#if defined(SMI_D1)
+#include "clk-mt6735-pg.h"
+#elif defined(SMI_J)
+#include "clk-mt6755-pg.h"
+#endif
+/* notify clk is enabled/disabled for m4u*/
+#include "m4u.h"
+#else
+#include <mach/mt_clkmgr.h>
+#endif /* defined(SMI_INTERNAL_CCF_SUPPORT) */
+
+#include "smi_configuration.h"
+#include "smi_common.h"
+
+int smi_larb_clock_is_on(unsigned int larb_index)
+{
+ int result = 0;
+
+#if defined(SMI_INTERNAL_CCF_SUPPORT)
+ result = 1;
+#elif !defined(CONFIG_MTK_FPGA) && !defined(CONFIG_FPGA_EARLY_PORTING)
+ switch (larb_index) {
+ case 0:
+ result = clock_is_on(MT_CG_DISP0_SMI_LARB0);
+ break;
+ case 1:
+#if defined(SMI_R)
+ result = clock_is_on(MT_CG_LARB1_SMI_CKPDN);
+#else
+ result = clock_is_on(MT_CG_VDEC1_LARB);
+#endif
+ break;
+ case 2:
+#if !defined(SMI_R)
+ result = clock_is_on(MT_CG_IMAGE_LARB2_SMI);
+#endif
+ break;
+ case 3:
+#if defined(SMI_D1)
+ result = clock_is_on(MT_CG_VENC_LARB);
+#elif defined(SMI_D3)
+ result = clock_is_on(MT_CG_VENC_VENC);
+#endif
+ break;
+ default:
+ result = 0;
+ break;
+ }
+#endif /* !defined (CONFIG_MTK_FPGA) && !defined (CONFIG_FPGA_EARLY_PORTING) */
+ return result;
+}
+
diff --git a/drivers/misc/mediatek/smi/smi_reg.h b/drivers/misc/mediatek/smi/smi_reg.h
new file mode 100644
index 000000000..84d61d42c
--- /dev/null
+++ b/drivers/misc/mediatek/smi/smi_reg.h
@@ -0,0 +1,449 @@
+#ifndef _SMI_REG_H_
+#define _SMI_REG_H_
+
+#define SMI_COMMON_EXT_BASE (smi_reg_base_common_ext)
+#define LARB0_BASE (smi_reg_base_barb0)
+#define LARB1_BASE (smi_reg_base_barb1)
+
+#if defined(SMI_D2)
+#define LARB2_BASE (smi_reg_base_barb2)
+#elif defined(SMI_D1) || defined(SMI_D3) || defined(SMI_J)
+#define LARB2_BASE (smi_reg_base_barb2)
+#define LARB3_BASE (smi_reg_base_barb3)
+#endif
+
+
+/* ================================================= */
+/* common macro definitions */
+#define F_VAL(val, msb, lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
+#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
+#define F_BIT_SET(bit) (1<<(bit))
+#define F_BIT_VAL(val, bit) ((!!(val))<<(bit))
+#define F_MSK_SHIFT(regval, msb, lsb) (((regval)&F_MSK(msb, lsb))>>lsb)
+
+
+/* ===================================================== */
+/* M4U register definition */
+/* ===================================================== */
+
+#define REG_MMUg_PT_BASE (0x0)
+#define F_MMUg_PT_VA_MSK 0xffff0000
+#define REG_MMUg_PT_BASE_SEC (0x4)
+#define F_MMUg_PT_VA_MSK_SEC 0xffff0000
+
+
+#define REG_MMU_PROG_EN 0x10
+#define F_MMU0_PROG_EN 1
+#define F_MMU1_PROG_EN 2
+#define REG_MMU_PROG_VA 0x14
+#define F_PROG_VA_LOCK_BIT (1<<11)
+#define F_PROG_VA_LAYER_BIT F_BIT_SET(9)
+#define F_PROG_VA_SIZE16X_BIT F_BIT_SET(8)
+#define F_PROG_VA_SECURE_BIT (1<<7)
+#define F_PROG_VA_MASK 0xfffff000
+
+#define REG_MMU_PROG_DSC 0x18
+
+#define REG_MMU_INVLD (0x20)
+#define F_MMU_INV_ALL 0x2
+#define F_MMU_INV_RANGE 0x1
+
+#define REG_MMU_INVLD_SA (0x24)
+#define REG_MMU_INVLD_EA (0x28)
+
+
+#define REG_MMU_INVLD_SEC (0x2c)
+#define F_MMU_INV_SEC_ALL 0x2
+#define F_MMU_INV_SEC_RANGE 0x1
+
+#define REG_MMU_INVLD_SA_SEC (0x30)
+#define REG_MMU_INVLD_EA_SEC (0x34)
+
+#define REG_INVLID_SEL (0x38)
+#define F_MMU_INV_EN_L1 (1<<0)
+#define F_MMU_INV_EN_L2 (1<<1)
+
+
+#define REG_INVLID_SEL_SEC (0x3c)
+#define F_MMU_INV_SEC_EN_L1 (1<<0)
+#define F_MMU_INV_SEC_EN_L2 (1<<1)
+#define F_MMU_INV_SEC_INV_DONE (1<<2)
+#define F_MMU_INV_SEC_INV_INT_SET (1<<3)
+#define F_MMU_INV_SEC_INV_INT_CLR (1<<4)
+#define F_MMU_INV_SEC_DBG (1<<5)
+
+
+#define REG_MMU_SEC_ABORT_INFO (0x40)
+#define REG_MMU_STANDARD_AXI_MODE (0x48)
+
+#define REG_MMU_PRIORITY (0x4c)
+#define REG_MMU_DCM_DIS (0x50)
+#define REG_MMU_WR_LEN (0x54)
+#define REG_MMU_HW_DEBUG (0x58)
+#define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
+#define F_MMU_HW_DBG_PFQ_BRDCST F_BIT_SET(0)
+
+#define REG_MMU_NON_BLOCKING_DIS 0x5C
+#define F_MMU_NON_BLOCK_DISABLE_BIT 1
+#define F_MMU_NON_BLOCK_HALF_ENTRY_BIT 2
+
+#define REG_MMU_LEGACY_4KB_MODE (0x60)
+
+#define REG_MMU_PFH_DIST0 0x80
+#define REG_MMU_PFH_DIST1 0x84
+#define REG_MMU_PFH_DIST2 0x88
+#define REG_MMU_PFH_DIST3 0x8c
+#define REG_MMU_PFH_DIST4 0x90
+#define REG_MMU_PFH_DIST5 0x94
+#define REG_MMU_PFH_DIST6 0x98
+
+#define REG_MMU_PFH_DIST(port) (0x80+(((port)>>3)<<2))
+#define F_MMU_PFH_DIST_VAL(port, val) ((val&0xf)<<(((port)&0x7)<<2))
+#define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
+
+#define REG_MMU_PFH_DIR0 0xF0
+#define REG_MMU_PFH_DIR1 0xF4
+#define REG_MMU_PFH_DIR(port) (((port) < 32) ? REG_MMU_PFH_DIR0 : REG_MMU_PFH_DIR1)
+#define F_MMU_PFH_DIR(port, val) ((!!(val))<<((port)&0x1f))
+
+
+#define REG_MMU_READ_ENTRY 0x100
+#define F_READ_ENTRY_EN F_BIT_SET(31)
+#define F_READ_ENTRY_MM1_MAIN F_BIT_SET(26)
+#define F_READ_ENTRY_MM0_MAIN F_BIT_SET(25)
+#define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(25+id)
+#define F_READ_ENTRY_PFH F_BIT_SET(24)
+#define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx, 21, 16)
+#define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx, 11, 5)
+ /* #define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4) */
+ /* #define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2) */
+#define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
+#define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1, 0)
+
+#define REG_MMU_DES_RDATA 0x104
+
+#define REG_MMU_PFH_TAG_RDATA 0x108
+#define F_PFH_TAG_VA_GET(mmu, tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(mmu)+1))
+#define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
+#define F_PFH_TAG_16X_BIT F_BIT_SET(2) /* this bit is always 0 -- cost down. */
+#define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
+#define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
+
+
+/* tag related macro */
+ /* #define MMU0_SET_ORDER 7 */
+ /* #define MMU1_SET_ORDER 6 */
+#define MMU_SET_ORDER(mmu) (7-(mmu))
+#define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
+#define MMU_SET_LSB_OFFSET 15
+#define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
+#define MMU_PFH_VA_TO_SET(mmu, va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
+
+#define MMU_PAGE_PER_LINE 8
+#define MMU_WAY_NR 4
+#define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
+
+
+#define REG_MMU_CTRL_REG 0x110
+#define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
+#define F_MMU_CTRL_TLB_WALK_DIS(dis) F_BIT_VAL(dis, 1)
+#define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 2)
+#define F_MMU_CTRL_MONITOR_CLR(clr) F_BIT_VAL(clr, 3)
+#define F_MMU_CTRL_PFH_RT_RPL_MODE(mod) F_BIT_VAL(mod, 4)
+#define F_MMU_CTRL_TF_PROT_VAL(prot) F_VAL(prot, 6, 5)
+#define F_MMU_CTRL_TF_PROT_MSK F_MSK(6, 5)
+#define F_MMU_CTRL_INT_HANG_en(en) F_BIT_VAL(en, 7)
+#define F_MMU_CTRL_COHERE_EN(en) F_BIT_VAL(en, 8)
+#define F_MMU_CTRL_IN_ORDER_WR(en) F_BIT_VAL(en, 9)
+#define F_MMU_CTRL_MAIN_TLB_SHARE_ALL(en) F_BIT_VAL(en, 10)
+
+
+#define REG_MMU_IVRP_PADDR 0x114
+#define F_MMU_IVRP_PA_SET(PA) (PA>>1)
+#define F_MMU_IVRP_8G_PA_SET(PA) ((PA>>1)|(1<<31))
+
+#define REG_MMU_INT_L2_CONTROL 0x120
+#define F_INT_L2_CLR_BIT (1<<12)
+#define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
+#define F_INT_L2_TABLE_WALK_FAULT F_BIT_SET(1)
+#define F_INT_L2_PFH_DMA_FIFO_OVERFLOW F_BIT_SET(2)
+#define F_INT_L2_MISS_DMA_FIFO_OVERFLOW F_BIT_SET(3)
+#define F_INT_L2_INVALD_DONE F_BIT_SET(4)
+#define F_INT_L2_PFH_IN_OUT_FIFO_ERROR F_BIT_SET(5)
+#define F_INT_L2_MISS_FIFO_ERR F_BIT_SET(6)
+
+#define REG_MMU_INT_MAIN_CONTROL 0x124
+#define F_INT_TRANSLATION_FAULT(MMU) F_BIT_SET(0+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
+
+#define F_INT_MAU(mmu, set) F_BIT_SET(14+(set)+(mmu<<2)) /* (14+(set)+(mmu*4)) */
+
+#define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
+#define F_INT_MMU1_MAIN_MSK F_MSK(13, 7)
+#define F_INT_MMU0_MAU_MSK F_MSK(17, 14)
+#define F_INT_MMU1_MAU_MSK F_MSK(21, 18)
+
+#define REG_MMU_CPE_DONE_SEC 0x128
+#define REG_MMU_CPE_DONE 0x12C
+
+#define REG_MMU_L2_FAULT_ST 0x130
+#define F_INT_L2_MISS_OUT_FIFO_ERROR F_BIT_SET(7)
+#define F_INT_L2_MISS_IN_FIFO_ERR F_BIT_SET(8)
+#define REG_MMU_MAIN_FAULT_ST 0x134
+
+#define REG_MMU_TBWALK_FAULT_VA 0x138
+#define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
+#define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
+
+#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
+#define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
+#define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
+#define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
+
+#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
+#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
+
+#define REG_MMU_PF_MSCNT 0x160
+#define REG_MMU_PF_CNT 0x164
+#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) /* (0x168+((mmu)*12) */
+#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
+#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
+
+#define MMU01_SQ_OFFSET (0x600-0x300)
+#define REG_MMU_SQ_START(mmu, x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+#define F_SQ_VA_MASK F_MSK(31, 18)
+#define F_SQ_EN_BIT (1<<17)
+ /* #define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13) */
+#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+
+
+#define MMU_TOTAL_RS_NR 8
+#define REG_MMU_RSx_VA(mmu, x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
+#define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
+#define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
+
+#define REG_MMU_RSx_PA(mmu, x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
+#define F_MMU_RSx_PA_VALID(regval) F_MSK_SHIFT(regval, 1, 0)
+
+#define REG_MMU_RSx_2ND_BASE(mmu, x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+
+#define REG_MMU_RSx_ST(mmu, x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 21, 20)
+#define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
+#define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
+
+#define REG_MMU_MAIN_TAG(mmu, x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
+#define F_MAIN_TLB_LOCK_BIT (1<<11)
+#define F_MAIN_TLB_VALID_BIT (1<<10)
+#define F_MAIN_TLB_LAYER_BIT F_BIT_SET(9)
+#define F_MAIN_TLB_16X_BIT F_BIT_SET(8)
+#define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
+#define F_MAIN_TLB_INV_DES_BIT (1<<6)
+#define F_MAIN_TLB_SQ_EN_BIT (1<<5)
+#define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4, 1)
+#define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
+
+
+#define REG_MMU_MAU_START(mmu, mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START_BIT32(mmu, mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END(mmu, mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END_BIT32(mmu, mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_PORT_EN(mmu, mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ID(mmu, mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR(mmu, mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR_BIT32(mmu, mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
+
+#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
+#define F_MAU_LARB_VAL(mau, larb) ((larb)<<(mau*8))
+#define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
+#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
+#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
+#define F_MAU_BIT_VAL(val, mau) F_BIT_VAL(val, mau)
+#define REG_MMU_MAU_RW(mmu) (0x98c+((mmu)*0xa0))
+#define REG_MMU_MAU_VA(mmu) (0x990+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ST(mmu) (0x994+((mmu)*0xa0))
+
+#define REG_MMU_PFH_VLD_0 (0x180)
+#define REG_MMU_PFH_VLD(set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<4)) /* +((set/32)*4)+(way*16) */
+#define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) /* set%32 */
+
+
+
+/* ================================================================ */
+/* SMI larb */
+/* ================================================================ */
+
+#define SMI_ERROR_ADDR 0
+#if defined(SMI_D2)
+#define SMI_LARB_NR 3
+
+#define SMI_LARB0_PORT_NUM 8
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 13
+#elif defined(SMI_D1)
+#define SMI_LARB_NR 4
+
+#define SMI_LARB0_PORT_NUM 7
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 21
+#define SMI_LARB3_PORT_NUM 13
+#elif defined(SMI_D3)
+#define SMI_LARB_NR 4
+
+#define SMI_LARB0_PORT_NUM 10
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 21
+#define SMI_LARB3_PORT_NUM 13
+#elif defined(SMI_R)
+#define SMI_LARB_NR 2
+
+#define SMI_LARB0_PORT_NUM 7
+#define SMI_LARB1_PORT_NUM 11
+#elif defined(SMI_J)
+#define SMI_LARB_NR 4
+
+#define SMI_LARB0_PORT_NUM 11
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 21
+#define SMI_LARB3_PORT_NUM 13
+#endif
+
+#define SMI_LARB_STAT (0x0)
+#define SMI_LARB_IRQ_EN (0x4)
+#define SMI_LARB_IRQ_STATUS (0x8)
+#define SMI_LARB_SLP_CON (0xc)
+#define SMI_LARB_CON (0x10)
+#define SMI_LARB_CON_SET (0x14)
+#define SMI_LARB_CON_CLR (0x18)
+#define SMI_LARB_VC_PRI_MODE (0x20)
+#define SMI_LARB_CMD_THRT_CON (0x24)
+#define SMI_LARB_STARV_CON (0x28)
+#define SMI_LARB_EMI_CON (0x2C)
+#define SMI_LARB_SHARE_EN (0x30)
+#define SMI_LARB_BWL_EN (0x50)
+#define SMI_LARB_BWL_SOFT_EN (0x54)
+#define SMI_LARB_BWL_CON (0x58)
+#define SMI_LARB_OSTDL_EN (0x60)
+#define SMI_LARB_OSTDL_SOFT_EN (0x64)
+#define SMI_LARB_ULTRA_DIS (0x70)
+#define SMI_LARB_PREULTRA_DIS (0x74)
+#define SMI_LARB_FORCE_ULTRA (0x78)
+#define SMI_LARB_FORCE_PREULTRA (0x7c)
+#define SMI_LARB_MST_GRP_SEL_L (0x80)
+#define SMI_LARB_MST_GRP_SEL_H (0x84)
+#define SMI_LARB_INT_PATH_SEL (0x90)
+#define SMI_LARB_EXT_GREQ_VIO (0xa0)
+#define SMI_LARB_INT_GREQ_VIO (0xa4)
+#define SMI_LARB_OSTD_UDF_VIO (0xa8)
+#define SMI_LARB_OSTD_CRS_VIO (0xac)
+#define SMI_LARB_FIFO_STAT (0xb0)
+#define SMI_LARB_BUS_STAT (0xb4)
+#define SMI_LARB_CMD_THRT_STAT (0xb8)
+#define SMI_LARB_MON_REQ (0xbc)
+#define SMI_LARB_REQ_MASK (0xc0)
+#define SMI_LARB_REQ_DET (0xc4)
+#define SMI_LARB_EXT_ONGOING (0xc8)
+#define SMI_LARB_INT_ONGOING (0xcc)
+#define SMI_LARB_MISC_MON0 (0xd0)
+#define SMI_LARB_DBG_CON (0xf0)
+#define SMI_LARB_TST_MODE (0xf4)
+#define SMI_LARB_WRR_PORT (0x100)
+#define SMI_LARB_BWL_PORT (0x180)
+#define SMI_LARB_OSTDL_PORT (0x200)
+#define SMI_LARB_OSTD_MON_PORT (0x280)
+#define SMI_LARB_PINFO (0x300)
+#define SMI_LARB_MON_EN (0x400)
+#define SMI_LARB_MON_CLR (0x404)
+#define SMI_LARB_MON_PORT (0x408)
+#define SMI_LARB_MON_CON (0x40c)
+#define SMI_LARB_MON_ACT_CNT (0x410)
+#define SMI_LARB_MON_REQ_CNT (0x414)
+#define SMI_LARB_MON_BEAT_CNT (0x418)
+#define SMI_LARB_MON_BYTE_CNT (0x41c)
+#define SMI_LARB_MON_CP_CNT (0x420)
+#define SMI_LARB_MON_DP_CNT (0x424)
+#define SMI_LARB_MON_OSTD_CNT (0x428)
+#define SMI_LARB_MON_CP_MAX (0x430)
+#define SMI_LARB_MON_COS_MAX (0x434)
+#define SMI_LARB_MMU_EN (0xf00)
+#define F_SMI_MMU_EN(port, en) ((en)<<((port)))
+#define F_SMI_SEC_EN(port, en) ((en)<<((port)))
+#define REG_SMI_LARB_DOMN_OF_PORT(port) (((port) > 15) ? 0xf0c : 0xf08)
+#define F_SMI_DOMN(port, domain) (((domain)&0x3)<<((((port) > 15) ? (port-16) : port)<<1))
+
+
+/* ===============================================================
+ * SMI COMMON
+ * =============================================================== */
+#if defined(SMI_R)
+#define REG_OFFSET_SMI_L1LEN (0x200)
+#define REG_OFFSET_SMI_L1ARB0 (0x204)
+#define REG_OFFSET_SMI_L1ARB1 (0x208)
+#define REG_OFFSET_SMI_L1ARB2 (0x20C)
+#define REG_OFFSET_SMI_L1ARB3 (0x210)
+#define REG_OFFSET_SMI_L1ARB4 (0x214)
+#else
+#define REG_OFFSET_SMI_L1LEN (0x100)
+#define REG_OFFSET_SMI_L1ARB0 (0x104)
+#define REG_OFFSET_SMI_L1ARB1 (0x108)
+#define REG_OFFSET_SMI_L1ARB2 (0x10C)
+#define REG_OFFSET_SMI_L1ARB3 (0x110)
+#define REG_OFFSET_SMI_L1ARB4 (0x114)
+#endif
+
+/* ========================================================================= */
+/* peripheral system */
+/* ========================================================================= */
+#define REG_PERIAXI_BUS_CTL3 (0x208+0xf0003000)
+#define F_PERI_MMU_EN(port, en) ((en)<<((port)))
+
+
+static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned long Offset)
+{
+ unsigned int val;
+
+ val = ioread32((void *)(M4uBase + Offset));
+
+ return val;
+}
+
+static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned long Offset, unsigned int Val)
+{
+ /* unsigned int read; */
+ iowrite32(Val, (void *)(M4uBase + Offset));
+ /* make sure memory manipulation sequence is OK */
+ mb();
+
+}
+
+static inline unsigned int COM_ReadReg32(unsigned long addr)
+{
+ return ioread32((void *)addr);
+}
+
+static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
+{
+ iowrite32(Val, (void *)addr);
+ /* make sure memory manipulation sequence is OK */
+ mb();
+}
+
+
+extern unsigned long smi_reg_base_common_ext;
+extern unsigned long smi_reg_base_barb0;
+extern unsigned long smi_reg_base_barb1;
+#if defined(SMI_D2)
+extern unsigned long smi_reg_base_barb2;
+#elif defined(SMI_D1) || defined(SMI_D3) || defined(SMI_J)
+extern unsigned long smi_reg_base_barb2;
+extern unsigned long smi_reg_base_barb3;
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/smi/variant/Makefile b/drivers/misc/mediatek/smi/variant/Makefile
new file mode 100644
index 000000000..e98461dad
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/Makefile
@@ -0,0 +1,15 @@
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/include/mt-plat
+ccflags-y += -I$(srctree)/drivers/misc/mediatek/m4u/$(MTK_PLATFORM)/
+
+obj-y += smi_variant.o
+obj-y += smi_debug.o
+
+ifeq ($(CONFIG_ARCH_MT8173),y)
+obj-y += smi_variant_config_8173.o
+ccflags-y += -DMT73
+endif
+
+ifeq ($(CONFIG_ARCH_MT8127),y)
+ccflags-y += -DMT27
+obj-y += smi_variant_config_8127.o
+endif
diff --git a/drivers/misc/mediatek/smi/variant/smi_common.h b/drivers/misc/mediatek/smi/variant/smi_common.h
new file mode 100644
index 000000000..93d05cb6f
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_common.h
@@ -0,0 +1,55 @@
+#ifndef __SMI_COMMON_H__
+#define __SMI_COMMON_H__
+
+#include <aee.h>
+
+#define SMI_CLIENT_DISP 0
+#define SMI_CLIENT_WFD 1
+#define SMI_EVENT_DIRECT_LINK (0x1 << 0)
+#define SMI_EVENT_DECOUPLE (0x1 << 1)
+#define SMI_EVENT_OVL_CASCADE (0x1 << 2)
+#define SMI_EVENT_OVL1_EXTERNAL (0x1 << 3)
+
+#define SMIMSG(string, args...) pr_warn("[pid=%d]" string, current->tgid, ##args)
+#define SMIMSG2(string, args...) pr_debug(string, ##args)
+#define SMIMSG3(string, args...) SMIMSG(string, ##args)
+
+#define SMITMP(string, args...) pr_debug("[pid=%d]"string, current->tgid, ##args)
+
+#define SMIERR(string, args...) pr_debug("error: " string, ##args)
+#define smi_aee_print(string, args...)\
+ do {\
+ char smi_name[100];\
+ snprintf(smi_name, 100, "[" SMI_LOG_TAG "]" string, ##args); \
+ } while (0)
+
+/*
+#define SMIERR(string, args...)\
+ do {\
+ pr_debug("error: " string, ##args); \
+ aee_kernel_warning(SMI_LOG_TAG, "error: "string, ##args); \
+ } while (0)
+#define smi_aee_print(string, args...)\
+ do {\
+ char smi_name[100];\
+ snprintf(smi_name, 100, "[" SMI_LOG_TAG "]" string, ##args); \
+ aee_kernel_warning(smi_name, "["SMI_LOG_TAG"]error:"string, ##args); \
+ } while (0)
+*/
+/* Please use the function to instead gLarbBaseAddr to prevent the NULL pointer access error */
+/* when the corrosponding larb is not exist */
+/* extern unsigned int gLarbBaseAddr[SMI_LARB_NR]; */
+/*extern unsigned long get_larb_base_addr(int larb_id);*/
+
+extern char *smi_port_name[][21];
+/* for slow motion force 30 fps */
+extern int primary_display_force_set_vsync_fps(unsigned int fps);
+extern unsigned int primary_display_get_fps(void);
+extern void smi_dumpDebugMsg(void);
+extern void smi_client_status_change_notify(int module, int mode);
+extern void SMI_DBG_Init(void);
+void register_base_dump(void);
+
+
+
+#endif
diff --git a/drivers/misc/mediatek/smi/variant/smi_debug.c b/drivers/misc/mediatek/smi/variant/smi_debug.c
new file mode 100644
index 000000000..43e455382
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_debug.c
@@ -0,0 +1,136 @@
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <aee.h>
+#include <linux/timer.h>
+/* #include <asm/system.h> */
+#include <asm-generic/irq_regs.h>
+/* #include <asm/mach/map.h> */
+#include <sync_write.h>
+/*#include <mach/irqs.h>*/
+#include <asm/cacheflush.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/fb.h>
+#include <linux/debugfs.h>
+#include <m4u.h>
+#include <mt_smi.h>
+
+#include "smi_common.h"
+#include "smi_reg.h"
+
+#define SMI_LOG_TAG "smi"
+
+static char debug_buffer[4096];
+
+static void process_dbg_opt(const char *opt)
+{
+ unsigned long addr = 0;
+ int ret = 0;
+
+ if (0 == strncmp(opt, "set_reg:", 8)) {
+ unsigned long val = 0;
+
+ char *p = (char *)opt + 8;
+
+ ret = kstrtoul(p, 16, &addr);
+ p++;
+
+ ret = kstrtoul(p, 16, &val);
+
+ SMIMSG("set register: 0x%lx = 0x%x\n", addr, (unsigned int)val);
+
+ COM_WriteReg32(addr, val);
+ }
+ if (0 == strncmp(opt, "get_reg:", 8)) {
+ char *p = (char *)opt + 8;
+
+ ret = kstrtoul(p, 16, &addr);
+
+ SMIMSG("get register: 0x%lx = 0x%x\n", addr, COM_ReadReg32(addr));
+ }
+
+}
+
+
+static void process_dbg_cmd(char *cmd)
+{
+ char *tok;
+
+ while ((tok = strsep(&cmd, " ")) != NULL)
+ process_dbg_opt(tok);
+
+}
+
+
+/* --------------------------------------------------------------------------- */
+/* Debug FileSystem Routines */
+/* --------------------------------------------------------------------------- */
+
+struct dentry *smi_dbgfs = NULL;
+
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t debug_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int n = 0;
+
+ return simple_read_from_buffer(ubuf, count, ppos, debug_buffer, n);
+}
+
+
+static ssize_t debug_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ const int debug_bufmax = sizeof(debug_buffer) - 1;
+ size_t ret;
+
+ ret = count;
+
+ if (count > debug_bufmax)
+ count = debug_bufmax;
+
+ if (copy_from_user(&debug_buffer, ubuf, count))
+ return -EFAULT;
+
+ debug_buffer[count] = 0;
+
+ process_dbg_cmd(debug_buffer);
+
+ return ret;
+}
+
+
+static const struct file_operations debug_fops = {
+ .read = debug_read,
+ .write = debug_write,
+ .open = debug_open,
+};
+
+
+void SMI_DBG_Init(void)
+{
+ smi_dbgfs = debugfs_create_file("smi", S_IFREG | S_IRUGO, NULL, (void *)0, &debug_fops);
+}
+
+
+void SMI_DBG_Deinit(void)
+{
+ debugfs_remove(smi_dbgfs);
+}
diff --git a/drivers/misc/mediatek/smi/variant/smi_debug.h b/drivers/misc/mediatek/smi/variant/smi_debug.h
new file mode 100644
index 000000000..3810d012e
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_debug.h
@@ -0,0 +1,23 @@
+#ifndef __MT8173_SMI_DEBUG_H__
+#define __MT8173_SMI_DEBUG_H__
+
+#define SMI_DBG_DISPSYS (1<<0)
+#define SMI_DBG_VDEC (1<<1)
+#define SMI_DBG_IMGSYS (1<<2)
+#define SMI_DBG_VENC (1<<3)
+#define SMI_DBG_MJC (1<<4)
+
+#define SMI_DGB_LARB_SELECT(smi_dbg_larb, n) ((smi_dbg_larb) & (1<<n))
+
+#ifndef CONFIG_MTK_SMI_EXT
+#define smi_debug_bus_hanging_detect(larbs, show_dump) {}
+#define smi_debug_bus_hanging_detect_ext(larbs, show_dump, output_gce_buffer) {}
+#else
+int smi_debug_bus_hanging_detect(unsigned int larbs, int show_dump);
+ /* output_gce_buffer = 1, pass log to CMDQ error dumping messages */
+int smi_debug_bus_hanging_detect_ext(unsigned int larbs, int show_dump, int output_gce_buffer);
+
+#endif
+
+
+#endif /* __MT6735_SMI_DEBUG_H__ */
diff --git a/drivers/misc/mediatek/smi/variant/smi_priv.h b/drivers/misc/mediatek/smi/variant/smi_priv.h
new file mode 100644
index 000000000..769d7ff51
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_priv.h
@@ -0,0 +1,36 @@
+#ifndef __SMI_PRIV_H__
+#define __SMI_PRIV_H__
+
+#include "smi_reg.h"
+
+#define SMI_LARB_PORT_NR_MAX 21/* Max port num in current platform.*/
+struct mtk_smi_priv;
+
+struct mtk_smi_data {
+ unsigned int larb_nr;
+ struct device *larb[SMI_LARB_NR];
+ struct device *smicommon;
+ const struct mtk_smi_priv *smi_priv;
+ unsigned long smi_common_base;
+ unsigned long larb_base[SMI_LARB_NR];
+
+ /*record the larb port register, please use the max value*/
+ unsigned short int larb_port_backup[SMI_LARB_PORT_NR_MAX*SMI_LARB_NR];
+};
+
+struct mtk_smi_priv {
+ unsigned int larb_port_num[SMI_LARB_NR];/* the port number in each larb */
+ unsigned char larb_vc_setting[SMI_LARB_NR];
+ void (*init_setting)(struct mtk_smi_data *, bool *,
+ u32 *, unsigned int);
+ void (*vp_setting)(struct mtk_smi_data *);
+ void (*vr_setting)(struct mtk_smi_data *);
+ void (*hdmi_setting)(struct mtk_smi_data *);
+ void (*hdmi_4k_setting)(struct mtk_smi_data *);
+};
+
+
+extern const struct mtk_smi_priv smi_mt8173_priv;
+extern const struct mtk_smi_priv smi_mt8127_priv;
+
+#endif
diff --git a/drivers/misc/mediatek/smi/variant/smi_reg.h b/drivers/misc/mediatek/smi/variant/smi_reg.h
new file mode 100644
index 000000000..1bbd628c7
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_reg.h
@@ -0,0 +1,536 @@
+#ifndef _SMI_REG_H__
+#define _SMI_REG_H__
+
+#ifndef CONFIG_MTK_SMI_VARIANT
+
+#define SMI_COMMON_EXT_BASE (smi_reg_base_common_ext)
+#define LARB0_BASE (smi_reg_base_barb0)
+#define LARB1_BASE (smi_reg_base_barb1)
+
+#if defined D2
+#define LARB2_BASE (smi_reg_base_barb2)
+#elif defined D1 || defined D3
+#define LARB2_BASE (smi_reg_base_barb2)
+#define LARB3_BASE (smi_reg_base_barb3)
+#endif
+
+#else
+extern struct mtk_smi_data *smi_data;
+
+#define LARB0_BASE smi_data->larb_base[0]
+#define LARB1_BASE smi_data->larb_base[1]
+#define LARB2_BASE smi_data->larb_base[2]
+#define LARB3_BASE smi_data->larb_base[3]
+#define LARB4_BASE smi_data->larb_base[4]
+#define LARB5_BASE smi_data->larb_base[5]
+
+#define SMI_COMMON_EXT_BASE smi_data->smi_common_base
+
+#endif
+
+/* ================================================= */
+/* common macro definitions */
+#define F_VAL(val, msb, lsb) (((val)&((1<<(msb-lsb+1))-1))<<lsb)
+#define F_MSK(msb, lsb) F_VAL(0xffffffff, msb, lsb)
+#define F_BIT_SET(bit) (1<<(bit))
+#define F_BIT_VAL(val, bit) ((!!(val))<<(bit))
+#define F_MSK_SHIFT(regval, msb, lsb) (((regval)&F_MSK(msb, lsb))>>lsb)
+
+
+/* ===================================================== */
+/* M4U register definition */
+/* ===================================================== */
+
+#define REG_MMUg_PT_BASE (0x0)
+#define F_MMUg_PT_VA_MSK 0xffff0000
+#define REG_MMUg_PT_BASE_SEC (0x4)
+#define F_MMUg_PT_VA_MSK_SEC 0xffff0000
+
+
+#define REG_MMU_PROG_EN 0x10
+#define F_MMU0_PROG_EN 1
+#define F_MMU1_PROG_EN 2
+#define REG_MMU_PROG_VA 0x14
+#define F_PROG_VA_LOCK_BIT (1<<11)
+#define F_PROG_VA_LAYER_BIT F_BIT_SET(9)
+#define F_PROG_VA_SIZE16X_BIT F_BIT_SET(8)
+#define F_PROG_VA_SECURE_BIT (1<<7)
+#define F_PROG_VA_MASK 0xfffff000
+
+#define REG_MMU_PROG_DSC 0x18
+
+#define REG_MMU_INVLD (0x20)
+#define F_MMU_INV_ALL 0x2
+#define F_MMU_INV_RANGE 0x1
+
+#define REG_MMU_INVLD_SA (0x24)
+#define REG_MMU_INVLD_EA (0x28)
+
+
+#define REG_MMU_INVLD_SEC (0x2c)
+#define F_MMU_INV_SEC_ALL 0x2
+#define F_MMU_INV_SEC_RANGE 0x1
+
+#define REG_MMU_INVLD_SA_SEC (0x30)
+#define REG_MMU_INVLD_EA_SEC (0x34)
+
+#define REG_INVLID_SEL (0x38)
+#define F_MMU_INV_EN_L1 (1<<0)
+#define F_MMU_INV_EN_L2 (1<<1)
+
+
+#define REG_INVLID_SEL_SEC (0x3c)
+#define F_MMU_INV_SEC_EN_L1 (1<<0)
+#define F_MMU_INV_SEC_EN_L2 (1<<1)
+#define F_MMU_INV_SEC_INV_DONE (1<<2)
+#define F_MMU_INV_SEC_INV_INT_SET (1<<3)
+#define F_MMU_INV_SEC_INV_INT_CLR (1<<4)
+#define F_MMU_INV_SEC_DBG (1<<5)
+
+
+#define REG_MMU_SEC_ABORT_INFO (0x40)
+#define REG_MMU_STANDARD_AXI_MODE (0x48)
+
+#define REG_MMU_PRIORITY (0x4c)
+#define REG_MMU_DCM_DIS (0x50)
+#define REG_MMU_WR_LEN (0x54)
+#define REG_MMU_HW_DEBUG (0x58)
+#define F_MMU_HW_DBG_L2_SCAN_ALL F_BIT_SET(1)
+#define F_MMU_HW_DBG_PFQ_BRDCST F_BIT_SET(0)
+
+#define REG_MMU_NON_BLOCKING_DIS 0x5C
+#define F_MMU_NON_BLOCK_DISABLE_BIT 1
+#define F_MMU_NON_BLOCK_HALF_ENTRY_BIT 2
+
+#define REG_MMU_LEGACY_4KB_MODE (0x60)
+
+#define REG_MMU_PFH_DIST0 0x80
+#define REG_MMU_PFH_DIST1 0x84
+#define REG_MMU_PFH_DIST2 0x88
+#define REG_MMU_PFH_DIST3 0x8c
+#define REG_MMU_PFH_DIST4 0x90
+#define REG_MMU_PFH_DIST5 0x94
+#define REG_MMU_PFH_DIST6 0x98
+
+#define REG_MMU_PFH_DIST(port) (0x80+(((port)>>3)<<2))
+#define F_MMU_PFH_DIST_VAL(port, val) ((val&0xf)<<(((port)&0x7)<<2))
+#define F_MMU_PFH_DIST_MASK(port) F_MMU_PFH_DIST_VAL((port), 0xf)
+
+#define REG_MMU_PFH_DIR0 0xF0
+#define REG_MMU_PFH_DIR1 0xF4
+#define REG_MMU_PFH_DIR(port) (((port) < 32) ? REG_MMU_PFH_DIR0 : REG_MMU_PFH_DIR1)
+#define F_MMU_PFH_DIR(port, val) ((!!(val))<<((port)&0x1f))
+
+
+#define REG_MMU_READ_ENTRY 0x100
+#define F_READ_ENTRY_EN F_BIT_SET(31)
+#define F_READ_ENTRY_MM1_MAIN F_BIT_SET(26)
+#define F_READ_ENTRY_MM0_MAIN F_BIT_SET(25)
+#define F_READ_ENTRY_MMx_MAIN(id) F_BIT_SET(25+id)
+#define F_READ_ENTRY_PFH F_BIT_SET(24)
+#define F_READ_ENTRY_MAIN_IDX(idx) F_VAL(idx, 21, 16)
+#define F_READ_ENTRY_PFH_IDX(idx) F_VAL(idx, 11, 5)
+ /* #define F_READ_ENTRY_PFH_HI_LO(high) F_VAL(high, 4,4) */
+ /* #define F_READ_ENTRY_PFH_PAGE(page) F_VAL(page, 3,2) */
+#define F_READ_ENTRY_PFH_PAGE_IDX(idx) F_VAL(idx, 4, 2)
+#define F_READ_ENTRY_PFH_WAY(way) F_VAL(way, 1, 0)
+
+#define REG_MMU_DES_RDATA 0x104
+
+#define REG_MMU_PFH_TAG_RDATA 0x108
+#define F_PFH_TAG_VA_GET(mmu, tag) (F_MSK_SHIFT(tag, 14, 4)<<(MMU_SET_MSB_OFFSET(mmu)+1))
+#define F_PFH_TAG_LAYER_BIT F_BIT_SET(3)
+#define F_PFH_TAG_16X_BIT F_BIT_SET(2) /* this bit is always 0 -- cost down. */
+#define F_PFH_TAG_SEC_BIT F_BIT_SET(1)
+#define F_PFH_TAG_AUTO_PFH F_BIT_SET(0)
+
+
+/* tag related macro */
+ /* #define MMU0_SET_ORDER 7 */
+ /* #define MMU1_SET_ORDER 6 */
+#define MMU_SET_ORDER(mmu) (7-(mmu))
+#define MMU_SET_NR(mmu) (1<<MMU_SET_ORDER(mmu))
+#define MMU_SET_LSB_OFFSET 15
+#define MMU_SET_MSB_OFFSET(mmu) (MMU_SET_LSB_OFFSET+MMU_SET_ORDER(mmu)-1)
+#define MMU_PFH_VA_TO_SET(mmu, va) F_MSK_SHIFT(va, MMU_SET_MSB_OFFSET(mmu), MMU_SET_LSB_OFFSET)
+
+#define MMU_PAGE_PER_LINE 8
+#define MMU_WAY_NR 4
+#define MMU_PFH_TOTAL_LINE(mmu) (MMU_SET_NR(mmu)*MMU_WAY_NR)
+
+
+#define REG_MMU_CTRL_REG 0x110
+#define F_MMU_CTRL_PFH_DIS(dis) F_BIT_VAL(dis, 0)
+#define F_MMU_CTRL_TLB_WALK_DIS(dis) F_BIT_VAL(dis, 1)
+#define F_MMU_CTRL_MONITOR_EN(en) F_BIT_VAL(en, 2)
+#define F_MMU_CTRL_MONITOR_CLR(clr) F_BIT_VAL(clr, 3)
+#define F_MMU_CTRL_PFH_RT_RPL_MODE(mod) F_BIT_VAL(mod, 4)
+#define F_MMU_CTRL_TF_PROT_VAL(prot) F_VAL(prot, 6, 5)
+#define F_MMU_CTRL_TF_PROT_MSK F_MSK(6, 5)
+#define F_MMU_CTRL_INT_HANG_en(en) F_BIT_VAL(en, 7)
+#define F_MMU_CTRL_COHERE_EN(en) F_BIT_VAL(en, 8)
+#define F_MMU_CTRL_IN_ORDER_WR(en) F_BIT_VAL(en, 9)
+#define F_MMU_CTRL_MAIN_TLB_SHARE_ALL(en) F_BIT_VAL(en, 10)
+
+
+#define REG_MMU_IVRP_PADDR 0x114
+#define F_MMU_IVRP_PA_SET(PA) (PA>>1)
+#define F_MMU_IVRP_8G_PA_SET(PA) ((PA>>1)|(1<<31))
+
+#define REG_MMU_INT_L2_CONTROL 0x120
+#define F_INT_L2_CLR_BIT (1<<12)
+#define F_INT_L2_MULTI_HIT_FAULT F_BIT_SET(0)
+#define F_INT_L2_TABLE_WALK_FAULT F_BIT_SET(1)
+#define F_INT_L2_PFH_DMA_FIFO_OVERFLOW F_BIT_SET(2)
+#define F_INT_L2_MISS_DMA_FIFO_OVERFLOW F_BIT_SET(3)
+#define F_INT_L2_INVALD_DONE F_BIT_SET(4)
+#define F_INT_L2_PFH_IN_OUT_FIFO_ERROR F_BIT_SET(5)
+#define F_INT_L2_MISS_FIFO_ERR F_BIT_SET(6)
+
+#define REG_MMU_INT_MAIN_CONTROL 0x124
+#define F_INT_TRANSLATION_FAULT(MMU) F_BIT_SET(0+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_MAIN_MULTI_HIT_FAULT(MMU) F_BIT_SET(1+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_INVALID_PHYSICAL_ADDRESS_FAULT(MMU) F_BIT_SET(2+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_ENTRY_REPLACEMENT_FAULT(MMU) F_BIT_SET(3+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_TLB_MISS_FAULT(MMU) F_BIT_SET(5+(((MMU)<<1)|((MMU)<<2)))
+#define F_INT_PFH_FIFO_ERR(MMU) F_BIT_SET(6+(((MMU)<<1)|((MMU)<<2)))
+
+#define F_INT_MAU(mmu, set) F_BIT_SET(14+(set)+(mmu<<2)) /* (14+(set)+(mmu*4)) */
+
+#define F_INT_MMU0_MAIN_MSK F_MSK(6, 0)
+#define F_INT_MMU1_MAIN_MSK F_MSK(13, 7)
+#define F_INT_MMU0_MAU_MSK F_MSK(17, 14)
+#define F_INT_MMU1_MAU_MSK F_MSK(21, 18)
+
+#define REG_MMU_CPE_DONE_SEC 0x128
+#define REG_MMU_CPE_DONE 0x12C
+
+#define REG_MMU_L2_FAULT_ST 0x130
+#define F_INT_L2_MISS_OUT_FIFO_ERROR F_BIT_SET(7)
+#define F_INT_L2_MISS_IN_FIFO_ERR F_BIT_SET(8)
+#define REG_MMU_MAIN_FAULT_ST 0x134
+
+#define REG_MMU_TBWALK_FAULT_VA 0x138
+#define F_MMU_TBWALK_FAULT_VA_MSK F_MSK(31, 12)
+#define F_MMU_TBWALK_FAULT_LAYER(regval) F_MSK_SHIFT(regval, 0, 0)
+
+#define REG_MMU_FAULT_VA(mmu) (0x13c+((mmu)<<3))
+#define F_MMU_FAULT_VA_MSK F_MSK(31, 12)
+#define F_MMU_FAULT_VA_WRITE_BIT F_BIT_SET(1)
+#define F_MMU_FAULT_VA_LAYER_BIT F_BIT_SET(0)
+
+#define REG_MMU_INVLD_PA(mmu) (0x140+((mmu)<<3))
+#define REG_MMU_INT_ID(mmu) (0x150+((mmu)<<2))
+
+#define REG_MMU_PF_MSCNT 0x160
+#define REG_MMU_PF_CNT 0x164
+#define REG_MMU_ACC_CNT(mmu) (0x168+(((mmu)<<3)|((mmu)<<2))) /* (0x168+((mmu)*12) */
+#define REG_MMU_MAIN_MSCNT(mmu) (0x16c+(((mmu)<<3)|((mmu)<<2)))
+#define REG_MMU_RS_PERF_CNT(mmu) (0x170+(((mmu)<<3)|((mmu)<<2)))
+
+#define MMU01_SQ_OFFSET (0x600-0x300)
+#define REG_MMU_SQ_START(mmu, x) (0x300+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+#define F_SQ_VA_MASK F_MSK(31, 18)
+#define F_SQ_EN_BIT (1<<17)
+ /* #define F_SQ_MULTI_ENTRY_VAL(x) (((x)&0xf)<<13) */
+#define REG_MMU_SQ_END(mmu, x) (0x304+((x)<<3)+((mmu)*MMU01_SQ_OFFSET))
+
+
+#define MMU_TOTAL_RS_NR 8
+#define REG_MMU_RSx_VA(mmu, x) (0x380+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MMU_RSx_VA_GET(regval) ((regval)&F_MSK(31, 12))
+#define F_MMU_RSx_VA_VALID(regval) F_MSK_SHIFT(regval, 11, 11)
+#define F_MMU_RSx_VA_PID(regval) F_MSK_SHIFT(regval, 9, 0)
+
+#define REG_MMU_RSx_PA(mmu, x) (0x384+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MMU_RSx_PA_GET(regval) ((regval)&F_MSK(31, 12))
+#define F_MMU_RSx_PA_VALID(regval) F_MSK_SHIFT(regval, 1, 0)
+
+#define REG_MMU_RSx_2ND_BASE(mmu, x) (0x388+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+
+#define REG_MMU_RSx_ST(mmu, x) (0x38c+((x)<<4)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MMU_RSx_ST_LID(regval) F_MSK_SHIFT(regval, 21, 20)
+#define F_MMU_RSx_ST_WRT(regval) F_MSK_SHIFT(regval, 12, 12)
+#define F_MMU_RSx_ST_OTHER(regval) F_MSK_SHIFT(regval, 8, 0)
+
+#define REG_MMU_MAIN_TAG(mmu, x) (0x500+((x)<<2)+((mmu)*MMU01_SQ_OFFSET))
+#define F_MAIN_TLB_VA_MSK F_MSK(31, 12)
+#define F_MAIN_TLB_LOCK_BIT (1<<11)
+#define F_MAIN_TLB_VALID_BIT (1<<10)
+#define F_MAIN_TLB_LAYER_BIT F_BIT_SET(9)
+#define F_MAIN_TLB_16X_BIT F_BIT_SET(8)
+#define F_MAIN_TLB_SEC_BIT F_BIT_SET(7)
+#define F_MAIN_TLB_INV_DES_BIT (1<<6)
+#define F_MAIN_TLB_SQ_EN_BIT (1<<5)
+#define F_MAIN_TLB_SQ_INDEX_MSK F_MSK(4, 1)
+#define F_MAIN_TLB_SQ_INDEX_GET(regval) F_MSK_SHIFT(regval, 4, 1)
+
+
+#define REG_MMU_MAU_START(mmu, mau) (0x900+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_START_BIT32(mmu, mau) (0x904+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END(mmu, mau) (0x908+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_END_BIT32(mmu, mau) (0x90C+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_PORT_EN(mmu, mau) (0x910+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ID(mmu, mau) (0x914+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR(mmu, mau) (0x918+((mau)*0x20)+((mmu)*0xa0))
+#define REG_MMU_MAU_ADDR_BIT32(mmu, mau) (0x91C+((mau)*0x20)+((mmu)*0xa0))
+
+#define REG_MMU_MAU_LARB_EN(mmu) (0x980+((mmu)*0xa0))
+#define F_MAU_LARB_VAL(mau, larb) ((larb)<<(mau*8))
+#define F_MAU_LARB_MSK(mau) (0xff<<(mau*8))
+#define REG_MMU_MAU_CLR(mmu) (0x984+((mmu)*0xa0))
+#define REG_MMU_MAU_IO(mmu) (0x988+((mmu)*0xa0))
+#define F_MAU_BIT_VAL(val, mau) F_BIT_VAL(val, mau)
+#define REG_MMU_MAU_RW(mmu) (0x98c+((mmu)*0xa0))
+#define REG_MMU_MAU_VA(mmu) (0x990+((mmu)*0xa0))
+#define REG_MMU_MAU_ASSERT_ST(mmu) (0x994+((mmu)*0xa0))
+
+#define REG_MMU_PFH_VLD_0 (0x180)
+#define REG_MMU_PFH_VLD(set, way) (REG_MMU_PFH_VLD_0+(((set)>>5)<<2)+((way)<<4)) /* +((set/32)*4)+(way*16) */
+#define F_MMU_PFH_VLD_BIT(set, way) F_BIT_SET((set)&0x1f) /* set%32 */
+
+
+
+/* ================================================================ */
+/* SMI larb */
+/* ================================================================ */
+
+#define SMI_ERROR_ADDR 0
+
+#if defined D2
+#define SMI_LARB_NR 3
+
+#define SMI_LARB0_PORT_NUM 8
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 13
+#elif defined D1
+#define SMI_LARB_NR 4
+
+#define SMI_LARB0_PORT_NUM 7
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 21
+#define SMI_LARB3_PORT_NUM 13
+#elif defined D3
+#define SMI_LARB_NR 4
+
+#define SMI_LARB0_PORT_NUM 10
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 21
+#define SMI_LARB3_PORT_NUM 13
+#elif defined R
+#define SMI_LARB_NR 2
+
+#define SMI_LARB0_PORT_NUM 7
+#define SMI_LARB1_PORT_NUM 11
+
+#elif defined MT73
+
+#define SMI_LARB_NR 6
+
+#elif defined MT27
+
+#define SMI_LARB_NR 3
+
+#endif
+
+#define SMI_LARB_STAT (0x0)
+#define SMI_LARB_IRQ_EN (0x4)
+#define SMI_LARB_IRQ_STATUS (0x8)
+#define SMI_LARB_SLP_CON (0xc)
+#define SMI_LARB_CON (0x10)
+#define SMI_LARB_CON_SET (0x14)
+#define SMI_LARB_CON_CLR (0x18)
+#define SMI_LARB_VC_PRI_MODE (0x20)
+#define SMI_LARB_CMD_THRT_CON (0x24)
+#define SMI_LARB_STARV_CON (0x28)
+#define SMI_LARB_EMI_CON (0x2C)
+#define SMI_LARB_SHARE_EN (0x30)
+#define SMI_LARB_BWL_EN (0x50)
+#define SMI_LARB_BWL_SOFT_EN (0x54)
+#define SMI_LARB_BWL_CON (0x58)
+#define SMI_LARB_OSTDL_EN (0x60)
+#define SMI_LARB_OSTDL_SOFT_EN (0x64)
+#define SMI_LARB_ULTRA_DIS (0x70)
+#define SMI_LARB_PREULTRA_DIS (0x74)
+#define SMI_LARB_FORCE_ULTRA (0x78)
+#define SMI_LARB_FORCE_PREULTRA (0x7c)
+#define SMI_LARB_MST_GRP_SEL_L (0x80)
+#define SMI_LARB_MST_GRP_SEL_H (0x84)
+#define SMI_LARB_INT_PATH_SEL (0x90)
+#define SMI_LARB_EXT_GREQ_VIO (0xa0)
+#define SMI_LARB_INT_GREQ_VIO (0xa4)
+#define SMI_LARB_OSTD_UDF_VIO (0xa8)
+#define SMI_LARB_OSTD_CRS_VIO (0xac)
+#define SMI_LARB_FIFO_STAT (0xb0)
+#define SMI_LARB_BUS_STAT (0xb4)
+#define SMI_LARB_CMD_THRT_STAT (0xb8)
+#define SMI_LARB_MON_REQ (0xbc)
+#define SMI_LARB_REQ_MASK (0xc0)
+#define SMI_LARB_REQ_DET (0xc4)
+#define SMI_LARB_EXT_ONGOING (0xc8)
+#define SMI_LARB_INT_ONGOING (0xcc)
+#define SMI_LARB_MISC_MON0 (0xd0)
+#define SMI_LARB_DBG_CON (0xf0)
+#define SMI_LARB_TST_MODE (0xf4)
+#define SMI_LARB_WRR_PORT (0x100)
+#define SMI_LARB_BWL_PORT (0x180)
+#define SMI_LARB_OSTDL_PORT (0x200)
+#define SMI_LARB_OSTD_MON_PORT (0x280)
+#define SMI_LARB_PINFO (0x300)
+#define SMI_LARB_MON_EN (0x400)
+#define SMI_LARB_MON_CLR (0x404)
+#define SMI_LARB_MON_PORT (0x408)
+#define SMI_LARB_MON_CON (0x40c)
+#define SMI_LARB_MON_ACT_CNT (0x410)
+#define SMI_LARB_MON_REQ_CNT (0x414)
+#define SMI_LARB_MON_BEAT_CNT (0x418)
+#define SMI_LARB_MON_BYTE_CNT (0x41c)
+#define SMI_LARB_MON_CP_CNT (0x420)
+#define SMI_LARB_MON_DP_CNT (0x424)
+#define SMI_LARB_MON_OSTD_CNT (0x428)
+#define SMI_LARB_MON_CP_MAX (0x430)
+#define SMI_LARB_MON_COS_MAX (0x434)
+#define SMI_LARB_MMU_EN (0xf00)
+#define F_SMI_MMU_EN(port, en) ((en)<<((port)))
+#define F_SMI_SEC_EN(port, en) ((en)<<((port)))
+#define REG_SMI_LARB_DOMN_OF_PORT(port) (((port) > 15) ? 0xf0c : 0xf08)
+#define F_SMI_DOMN(port, domain) (((domain)&0x3)<<((((port) > 15) ? (port-16) : port)<<1))
+
+
+
+
+/*
+#define SMI_SHARE_EN (0x210)
+ #define F_SMI_SHARE_EN(port) F_BIT_SET(m4u_port_2_larb_port(port))
+#define SMI_ROUTE_SEL (0x220)
+ #define F_SMI_ROUTE_SEL_EMI(port) F_BIT_SET(m4u_port_2_larb_port(port))
+#define SMI_MMULOCK_EN (0x230)
+*/
+
+
+/* ===============================================================
+ * SMI COMMON
+ * =============================================================== */
+#if defined R
+#define REG_OFFSET_SMI_L1LEN (0x200)
+#define REG_OFFSET_SMI_L1ARB0 (0x204)
+#define REG_OFFSET_SMI_L1ARB1 (0x208)
+#define REG_OFFSET_SMI_L1ARB2 (0x20C)
+#define REG_OFFSET_SMI_L1ARB3 (0x210)
+#define REG_OFFSET_SMI_L1ARB4 (0x214)
+#elif defined MT73
+#define REG_OFFSET_SMI_L1LEN (0x200)
+#define REG_OFFSET_SMI_L1ARB0 (0x204)
+#define REG_OFFSET_SMI_L1ARB1 (0x208)
+#define REG_OFFSET_SMI_L1ARB2 (0x20C)
+#define REG_OFFSET_SMI_L1ARB3 (0x210)
+#define REG_OFFSET_SMI_L1ARB4 (0x214)
+#define REG_OFFSET_SMI_L1ARB5 (0x218)
+#else
+#define REG_OFFSET_SMI_L1LEN (0x100)
+#define REG_OFFSET_SMI_L1ARB0 (0x104)
+#define REG_OFFSET_SMI_L1ARB1 (0x108)
+#define REG_OFFSET_SMI_L1ARB2 (0x10C)
+#define REG_OFFSET_SMI_L1ARB3 (0x110)
+#define REG_OFFSET_SMI_L1ARB4 (0x114)
+#endif
+
+/*
+#define REG_SMI_MON_AXI_ENA (0x1a0+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_CLR (0x1a4+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_TYPE (0x1ac+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_CON (0x1b0+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_ACT_CNT (0x1c0+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_REQ_CNT (0x1c4+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_OSTD_CNT (0x1c8+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_BEA_CNT (0x1cc+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_BYT_CNT (0x1d0+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_CP_CNT (0x1d4+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_DP_CNT (0x1d8+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_CP_MAX (0x1dc+SMI_COMMON_EXT_BASE)
+#define REG_SMI_MON_AXI_COS_MAX (0x1e0+SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1LEN (0x200+SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB0 (0x204+SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB1 (0x208+SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB2 (0x20C+SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB3 (0x210+SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB4 (0x214+SMI_COMMON_EXT_BASE)
+#define REG_SMI_BUS_SEL (0x220+SMI_COMMON_EXT_BASE)
+ #define F_SMI_BUS_SEL_larb0(mmu_idx) F_VAL(mmu_idx, 1, 0)
+ #define F_SMI_BUS_SEL_larb1(mmu_idx) F_VAL(mmu_idx, 3, 2)
+ #define F_SMI_BUS_SEL_larb2(mmu_idx) F_VAL(mmu_idx, 5, 4)
+ #define F_SMI_BUS_SEL_larb3(mmu_idx) F_VAL(mmu_idx, 7, 6)
+ #define F_SMI_BUS_SEL_larb4(mmu_idx) F_VAL(mmu_idx, 9, 8)
+#define REG_SMI_WRR_REG0 (0x228+SMI_COMMON_EXT_BASE)
+#define REG_SMI_READ_FIFO_TH (0x230+SMI_COMMON_EXT_BASE)
+#define REG_SMI_SMI_M4U_TH (0x234+SMI_COMMON_EXT_BASE)
+#define REG_SMI_SMI_FIFO2_TH (0x238+SMI_COMMON_EXT_BASE)
+#define REG_SMI_SMI_PREULTRA_MASK0 (0x23c+SMI_COMMON_EXT_BASE)
+#define REG_SMI_SMI_PREULTRA_MASK1 (0x240+SMI_COMMON_EXT_BASE)
+#define REG_SMI_DCM (0x300+SMI_COMMON_EXT_BASE)
+#define REG_SMI_SMI_ELA (0x304+SMI_COMMON_EXT_BASE)
+#define REG_SMI_DEBUG0 (0x400+SMI_COMMON_EXT_BASE)
+#define REG_SMI_DEBUG1 (0x404+SMI_COMMON_EXT_BASE)
+#define REG_SMI_DEBUG2 (0x408+SMI_COMMON_EXT_BASE)
+#define REG_SMI_DUMMY (0x418+SMI_COMMON_EXT_BASE)
+
+*/
+
+#define REG_SMI_M4U_TH (0x234 + SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1LEN (0x200 + SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB0 (0x204 + SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB1 (0x208 + SMI_COMMON_EXT_BASE)
+#define REG_SMI_L1ARB2 (0x20C + SMI_COMMON_EXT_BASE)
+#define REG_SMI_WRR_REG0 (0x228 + SMI_COMMON_EXT_BASE)
+#define REG_SMI_READ_FIFO_TH (0x230 + SMI_COMMON_EXT_BASE)
+
+
+/* ========================================================================= */
+/* peripheral system */
+/* ========================================================================= */
+#define REG_PERIAXI_BUS_CTL3 (0x208+0xf0003000)
+#define F_PERI_MMU_EN(port, en) ((en)<<((port)))
+
+
+static inline unsigned int M4U_ReadReg32(unsigned long M4uBase, unsigned long Offset)
+{
+ unsigned int val;
+
+ val = ioread32((void *)(M4uBase + Offset));
+
+ return val;
+}
+
+static inline void M4U_WriteReg32(unsigned long M4uBase, unsigned long Offset, unsigned int Val)
+{
+ /* unsigned int read; */
+ iowrite32(Val, (void *)(M4uBase + Offset));
+ /* make sure memory manipulation sequence is OK */
+ mb();
+
+}
+
+static inline unsigned int COM_ReadReg32(unsigned long addr)
+{
+ return ioread32((void *)addr);
+}
+
+static inline void COM_WriteReg32(unsigned long addr, unsigned int Val)
+{
+ iowrite32(Val, (void *)addr);
+ /* make sure memory manipulation sequence is OK */
+ mb();
+}
+
+
+extern unsigned long smi_reg_base_common_ext;
+extern unsigned long smi_reg_base_barb0;
+extern unsigned long smi_reg_base_barb1;
+#if defined D2
+extern unsigned long smi_reg_base_barb2;
+#elif defined D1 || defined D3
+extern unsigned long smi_reg_base_barb2;
+extern unsigned long smi_reg_base_barb3;
+#endif
+
+#endif
diff --git a/drivers/misc/mediatek/smi/variant/smi_variant.c b/drivers/misc/mediatek/smi/variant/smi_variant.c
new file mode 100644
index 000000000..f9adae4bd
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_variant.c
@@ -0,0 +1,1760 @@
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kobject.h>
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#endif
+
+#include "mt_smi.h"
+
+#include "smi_reg.h"
+#include "smi_common.h"
+#include "smi_debug.h"
+
+#include "smi_priv.h"
+#include "m4u.h"
+
+/*#include "mmdvfs_mgr.h"*/
+
+#define SMI_LOG_TAG "SMI"
+
+#define LARB_BACKUP_REG_SIZE 128
+#ifdef MT73
+#define SMI_COMMON_BACKUP_REG_NUM 10
+
+/* SMI COMMON register list to be backuped */
+static unsigned short g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = {
+ 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x220, 0x230, 0x234, 0x238
+};
+
+#elif defined MT27
+/*
+ * MT8127 do not have the following register, offset(0x220, 0x238),
+ * which are SMI_BUS_SEL and SMI_FIFO2_TH, so do not backup them.
+ */
+#define SMI_COMMON_BACKUP_REG_NUM 8
+
+static unsigned short g_smi_common_backup_reg_offset[SMI_COMMON_BACKUP_REG_NUM] = {
+ 0x200, 0x204, 0x208, 0x20c, 0x210, 0x214, 0x230, 0x234
+};
+
+#endif
+
+#define SF_HWC_PIXEL_MAX_NORMAL (2560 * 1600 * 7)
+#define SF_HWC_PIXEL_MAX_VR (2560 * 1600 * 7)
+#define SF_HWC_PIXEL_MAX_VP (2560 * 1600 * 7)
+#define SF_HWC_PIXEL_MAX_ALWAYS_GPU (2560 * 1600 * 1)
+
+#define SMIDBG(level, x...) \
+ do { if (smi_debug_level >= (level))\
+ SMIMSG(x);\
+ } while (0)
+
+struct SMI_struct {
+ spinlock_t SMI_lock;
+ /*one bit represent one module */
+ unsigned int pu4ConcurrencyTable[SMI_BWC_SCEN_CNT];
+};
+
+static struct SMI_struct g_SMIInfo;
+
+static struct device *smiDeviceUevent;
+
+static bool fglarbcallback; /*larb backuprestore */
+
+struct mtk_smi_data *smi_data;
+
+static struct cdev *pSmiDev;
+
+static unsigned int g_smi_common_backup[SMI_COMMON_BACKUP_REG_NUM];
+
+/* To keep the HW's init value*/
+static bool is_default_value_saved;
+static unsigned int default_val_smi_l1arb[SMI_LARB_NR] = { 0 };
+
+static unsigned int wifi_disp_transaction;
+
+/* debug level */
+static unsigned int smi_debug_level;
+
+/* tuning mode, 1 for register ioctl */
+static unsigned int smi_tuning_mode;
+
+static unsigned int smi_profile = SMI_BWC_SCEN_NORMAL;
+
+static unsigned int *pLarbRegBackUp[SMI_LARB_NR];
+static int g_bInited;
+
+static MTK_SMI_BWC_MM_INFO g_smi_bwc_mm_info = { 0, 0, {0, 0}, {0, 0},
+{0, 0}, {0, 0}, 0, 0, 0,
+SF_HWC_PIXEL_MAX_NORMAL
+};
+
+struct mtk_smi_common {
+ void __iomem *base;
+ struct clk *clk_apb;
+ struct clk *clk_smi;
+};
+struct mtk_smi_larb {
+ void __iomem *base;
+ struct clk *clk_apb;
+ struct clk *clk_smi;
+ struct device *smi;
+};
+
+static void smi_dumpLarb(unsigned int index);
+static void smi_dumpCommon(void);
+static int _mtk_smi_larb_get(struct device *larbdev, bool pm);
+static void _mtk_smi_larb_put(struct device *larbdev, bool pm);
+
+#if IS_ENABLED(CONFIG_COMPAT)
+static long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+#define MTK_SMI_COMPAT_ioctl NULL
+#endif
+
+/* Use this function to get base address of Larb resgister
+* to support error checking
+*/
+static unsigned long get_larb_base_addr(int larb_id)
+{
+ if (larb_id >= SMI_LARB_NR || larb_id < 0 || !smi_data)
+ return SMI_ERROR_ADDR;
+ else
+ return smi_data->larb_base[larb_id];
+}
+
+unsigned long mtk_smi_larb_get_base(int larbid)
+{
+ return get_larb_base_addr(larbid);
+}
+
+static unsigned int smi_get_larb_index(struct device *dev)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < smi_data->larb_nr; idx++) {
+ if (smi_data->larb[idx] == dev)
+ break;
+ }
+ return idx;
+}
+
+int mtk_smi_larb_clock_on(int larbid, bool pm)
+{
+ if (!smi_data || larbid < 0 || larbid >= smi_data->larb_nr)
+ return -EINVAL;
+
+ return _mtk_smi_larb_get(smi_data->larb[larbid], pm);
+}
+
+void mtk_smi_larb_clock_off(int larbid, bool pm)
+{
+ if (!smi_data || larbid < 0 || larbid >= smi_data->larb_nr)
+ return;
+
+ _mtk_smi_larb_put(smi_data->larb[larbid], pm);
+}
+
+static void backup_smi_common(void)
+{
+ int i;
+
+ for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
+ g_smi_common_backup[i] =
+ M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ (unsigned long)g_smi_common_backup_reg_offset[i]);
+ }
+}
+
+static void restore_smi_common(void)
+{
+ int i;
+
+ for (i = 0; i < SMI_COMMON_BACKUP_REG_NUM; i++) {
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE,
+ (unsigned long)g_smi_common_backup_reg_offset[i],
+ g_smi_common_backup[i]);
+ }
+}
+
+static void backup_larb_smi(int index)
+{
+ int port_index = 0;
+ unsigned short int *backup_ptr = NULL;
+ unsigned long larb_base = get_larb_base_addr(index);
+ unsigned long larb_offset = 0x200;
+ int total_port_num = 0;
+
+ /* boundary check for larb_port_num and larb_port_backup access */
+ if (index < 0 || index >= SMI_LARB_NR)
+ return;
+
+ total_port_num = smi_data->smi_priv->larb_port_num[index];
+ backup_ptr = smi_data->larb_port_backup + index*SMI_LARB_PORT_NR_MAX;
+
+ /* boundary check for port value access */
+ if (total_port_num <= 0 || backup_ptr == NULL)
+ return;
+
+ for (port_index = 0; port_index < total_port_num; port_index++) {
+ *backup_ptr = (unsigned short int)(M4U_ReadReg32(larb_base, larb_offset));
+ backup_ptr++;
+ larb_offset += 4;
+ }
+
+ /* backup smi common along with larb0,
+ * smi common clk is guaranteed to be on when processing larbs */
+ if (index == 0)
+ backup_smi_common();
+
+}
+
+
+static void restore_larb_smi(int index)
+{
+ int port_index = 0;
+ unsigned short int *backup_ptr = NULL;
+ unsigned long larb_base = get_larb_base_addr(index);
+ unsigned long larb_offset = 0x200;
+ unsigned int backup_value = 0;
+ int total_port_num = 0;
+
+ /* boundary check for larb_port_num and larb_port_backup access */
+ if (index < 0 || index >= SMI_LARB_NR)
+ return;
+
+ total_port_num = smi_data->smi_priv->larb_port_num[index];
+ backup_ptr = smi_data->larb_port_backup + index*SMI_LARB_PORT_NR_MAX;
+
+ /* boundary check for port value access */
+ if (total_port_num <= 0 || backup_ptr == NULL)
+ return;
+
+ /* restore smi common along with larb0,
+ * smi common clk is guaranteed to be on when processing larbs */
+ if (index == 0)
+ restore_smi_common();
+
+ for (port_index = 0; port_index < total_port_num; port_index++) {
+ backup_value = *backup_ptr;
+ M4U_WriteReg32(larb_base, larb_offset, backup_value);
+ backup_ptr++;
+ larb_offset += 4;
+ }
+
+#ifndef MT27
+ /* we do not backup 0x20 because it is a fixed setting */
+ M4U_WriteReg32(larb_base, 0x20, smi_data->smi_priv->larb_vc_setting[index]);
+#endif
+ /* turn off EMI empty OSTD dobule, fixed setting */
+ M4U_WriteReg32(larb_base, 0x2c, 4);
+
+}
+
+static int larb_reg_backup(int larb)
+{
+ unsigned int *pReg = pLarbRegBackUp[larb];
+ unsigned long larb_base = get_larb_base_addr(larb);
+
+ *(pReg++) = M4U_ReadReg32(larb_base, SMI_LARB_CON);
+
+ /* *(pReg++) = M4U_ReadReg32(larb_base, SMI_SHARE_EN); */
+ /* *(pReg++) = M4U_ReadReg32(larb_base, SMI_ROUTE_SEL); */
+
+ backup_larb_smi(larb);
+
+ if (0 == larb)
+ g_bInited = 0;
+#ifndef MT27
+ m4u_larb_backup_sec(larb);
+#endif
+ return 0;
+}
+
+static int smi_larb_init(unsigned int larb)
+{
+ unsigned int regval = 0;
+ unsigned int regval1 = 0;
+ unsigned int regval2 = 0;
+ unsigned long larb_base = get_larb_base_addr(larb);
+
+ /* Clock manager enable LARB clock before call back restore already,
+ *it will be disabled after restore call back returns
+ * Got to enable OSTD before engine starts */
+ regval = M4U_ReadReg32(larb_base, SMI_LARB_STAT);
+
+ /*todo */
+ /* regval1 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ0); */
+ /* regval2 = M4U_ReadReg32(larb_base , SMI_LARB_MON_BUS_REQ1); */
+
+ if (0 == regval) {
+ SMIDBG(1, "Init OSTD for larb_base: 0x%lx\n", larb_base);
+ M4U_WriteReg32(larb_base, SMI_LARB_OSTDL_SOFT_EN, 0xffffffff);
+ } else {
+ SMIMSG("Larb: 0x%lx is busy : 0x%x , port:0x%x,0x%x ,fail to set OSTD\n", larb_base,
+ regval, regval1, regval2);
+ smi_dumpDebugMsg();
+ if (smi_debug_level >= 1) {
+ SMIERR("DISP_MDP LARB 0x%lx OSTD cannot be set:0x%x,port:0x%x,0x%x\n",
+ larb_base, regval, regval1, regval2);
+ } else {
+ dump_stack();
+ }
+ }
+
+ restore_larb_smi(larb);
+
+ return 0;
+}
+
+int larb_reg_restore(int larb)
+{
+ unsigned long larb_base = SMI_ERROR_ADDR;
+ unsigned int regval = 0;
+ unsigned int *pReg = NULL;
+
+ larb_base = get_larb_base_addr(larb);
+
+ /* The larb assign doesn't exist */
+ if (larb_base == SMI_ERROR_ADDR) {
+ SMIMSG("Can't find the base address for Larb%d\n", larb);
+ return 0;
+ }
+
+ pReg = pLarbRegBackUp[larb];
+
+ SMIDBG(1, "+larb_reg_restore(), larb_idx=%d\n", larb);
+ SMIDBG(1, "m4u part restore, larb_idx=%d\n", larb);
+ /*warning: larb_con is controlled by set/clr */
+ regval = *(pReg++);
+ M4U_WriteReg32(larb_base, SMI_LARB_CON_CLR, ~(regval));
+ M4U_WriteReg32(larb_base, SMI_LARB_CON_SET, (regval));
+
+ /*M4U_WriteReg32(larb_base, SMI_SHARE_EN, *(pReg++) ); */
+ /*M4U_WriteReg32(larb_base, SMI_ROUTE_SEL, *(pReg++) ); */
+
+ smi_larb_init(larb);
+#ifndef MT27
+ m4u_larb_restore_sec(larb);
+#endif
+ return 0;
+}
+
+/* Fake mode check, e.g. WFD */
+static int fake_mode_handling(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
+{
+ if (p_conf->scenario == SMI_BWC_SCEN_WFD) {
+ if (p_conf->b_on_off) {
+ wifi_disp_transaction = 1;
+ SMIMSG("Enable WFD in profile: %d\n", smi_profile);
+ } else {
+ wifi_disp_transaction = 0;
+ SMIMSG("Disable WFD in profile: %d\n", smi_profile);
+ }
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static int ovl_limit_uevent(int bwc_scenario, int ovl_pixel_limit)
+{
+ int err = 0;
+ char *envp[3];
+ char scenario_buf[32] = "";
+ char ovl_limit_buf[32] = "";
+
+ /* scenario_buf = kzalloc(sizeof(char)*128, GFP_KERNEL); */
+ /* ovl_limit_buf = kzalloc(sizeof(char)*128, GFP_KERNEL); */
+
+ snprintf(scenario_buf, 31, "SCEN=%d", bwc_scenario);
+ snprintf(ovl_limit_buf, 31, "HWOVL=%d", ovl_pixel_limit);
+
+ envp[0] = scenario_buf;
+ envp[1] = ovl_limit_buf;
+ envp[2] = NULL;
+
+ if (pSmiDev != NULL) {
+ /* err = kobject_uevent_env(&(pSmiDev->kobj), KOBJ_CHANGE, envp); */
+ /* use smi_data->dev.lobj instead */
+ /* err = kobject_uevent_env(&(smi_data->dev->kobj), KOBJ_CHANGE, envp); */
+ /* user smiDeviceUevent->kobj instead */
+ err = kobject_uevent_env(&(smiDeviceUevent->kobj), KOBJ_CHANGE, envp);
+ SMIMSG("Notify OVL limitaion=%d, SCEN=%d", ovl_pixel_limit, bwc_scenario);
+ }
+ /* kfree(scenario_buf); */
+ /* kfree(ovl_limit_buf); */
+
+ if (err < 0)
+ SMIMSG(KERN_INFO "[%s] kobject_uevent_env error = %d\n", __func__, err);
+
+ return err;
+}
+
+static int smi_bwc_config(MTK_SMI_BWC_CONFIG *p_conf, unsigned int *pu4LocalCnt)
+{
+ int i;
+ int result = 0;
+ unsigned int u4Concurrency = 0;
+ MTK_SMI_BWC_SCEN eFinalScen;
+ static MTK_SMI_BWC_SCEN ePreviousFinalScen = SMI_BWC_SCEN_CNT;
+ struct mtk_smi_priv *smicur = (struct mtk_smi_priv *)smi_data->smi_priv;
+
+ if (smi_tuning_mode == 1) {
+ SMIMSG("Doesn't change profile in tunning mode");
+ return 0;
+ }
+
+ spin_lock(&g_SMIInfo.SMI_lock);
+ result = fake_mode_handling(p_conf, pu4LocalCnt);
+ spin_unlock(&g_SMIInfo.SMI_lock);
+
+ /* Fake mode is not a real SMI profile, so we need to return here */
+ if (result == 1)
+ return 0;
+
+ if ((SMI_BWC_SCEN_CNT <= p_conf->scenario) || (0 > p_conf->scenario)) {
+ SMIERR("Incorrect SMI BWC config : 0x%x, how could this be...\n", p_conf->scenario);
+ return -1;
+ }
+/* Debug - S */
+/* SMIMSG("SMI setTo%d,%s,%d\n" , p_conf->scenario , (p_conf->b_on_off ? "on" : "off") , ePreviousFinalScen); */
+/* Debug - E */
+#if 0
+ if (p_conf->b_on_off) {
+ /* set mmdvfs step according to certain scenarios */
+ mmdvfs_notify_scenario_enter(p_conf->scenario);
+ } else {
+ /* set mmdvfs step to default after the scenario exits */
+ mmdvfs_notify_scenario_exit(p_conf->scenario);
+ }
+#endif
+ /* turn on larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ mtk_smi_larb_clock_on(i, true);
+
+ spin_lock(&g_SMIInfo.SMI_lock);
+
+ if (p_conf->b_on_off) {
+ /* turn on certain scenario */
+ g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] += 1;
+
+ if (NULL != pu4LocalCnt)
+ pu4LocalCnt[p_conf->scenario] += 1;
+ } else {
+ /* turn off certain scenario */
+ if (0 == g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]) {
+ SMIMSG("Too many turning off for global SMI profile:%d,%d\n",
+ p_conf->scenario, g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario]);
+ } else {
+ g_SMIInfo.pu4ConcurrencyTable[p_conf->scenario] -= 1;
+ }
+
+ if (NULL != pu4LocalCnt) {
+ if (0 == pu4LocalCnt[p_conf->scenario]) {
+ SMIMSG
+ ("Process : %s did too many turning off for local SMI profile:%d,%d\n",
+ current->comm, p_conf->scenario,
+ pu4LocalCnt[p_conf->scenario]);
+ } else {
+ pu4LocalCnt[p_conf->scenario] -= 1;
+ }
+ }
+ }
+
+ for (i = 0; i < SMI_BWC_SCEN_CNT; i++) {
+ if (g_SMIInfo.pu4ConcurrencyTable[i])
+ u4Concurrency |= (1 << i);
+ }
+
+ if ((1 << SMI_BWC_SCEN_MM_GPU) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_MM_GPU;
+ else if ((1 << SMI_BWC_SCEN_VR_SLOW) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VR_SLOW;
+ else if ((1 << SMI_BWC_SCEN_VR) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VR;
+ else if ((1 << SMI_BWC_SCEN_ICFP) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VR;
+ else if ((1 << SMI_BWC_SCEN_VP) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VP;
+ else if ((1 << SMI_BWC_SCEN_SWDEC_VP) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_SWDEC_VP;
+ else if ((1 << SMI_BWC_SCEN_VENC) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_VENC;
+ else if ((1 << SMI_BWC_SCEN_HDMI) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_HDMI;
+ else if ((1 << SMI_BWC_SCEN_HDMI4K) & u4Concurrency)
+ eFinalScen = SMI_BWC_SCEN_HDMI4K;
+ else
+ eFinalScen = SMI_BWC_SCEN_NORMAL;
+
+ if (ePreviousFinalScen == eFinalScen) {
+ SMIMSG("Scen equal%d,don't change\n", eFinalScen);
+ goto err_clkoff;
+ } else {
+ ePreviousFinalScen = eFinalScen;
+ }
+
+ smi_profile = eFinalScen;
+
+ /* Bandwidth Limiter */
+ switch (eFinalScen) {
+ case SMI_BWC_SCEN_VP:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VP");
+ smicur->vp_setting(smi_data);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
+ break;
+
+ case SMI_BWC_SCEN_SWDEC_VP:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_SWDEC_VP");
+ smicur->vp_setting(smi_data);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VP;
+ break;
+
+ case SMI_BWC_SCEN_VR:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
+ smicur->vr_setting(smi_data);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
+ break;
+
+ case SMI_BWC_SCEN_VR_SLOW:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_VR");
+ smi_profile = SMI_BWC_SCEN_VR_SLOW;
+ smicur->vr_setting(smi_data);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_VR;
+ break;
+
+ case SMI_BWC_SCEN_VENC:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_VENC");
+ smicur->vr_setting(smi_data);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+
+ case SMI_BWC_SCEN_NORMAL:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_NORMAL");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ smicur->init_setting(smi_data, &is_default_value_saved,
+ default_val_smi_l1arb, smi_data->larb_nr);
+ break;
+
+ case SMI_BWC_SCEN_MM_GPU:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_MM_GPU");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ smicur->init_setting(smi_data, &is_default_value_saved,
+ default_val_smi_l1arb, smi_data->larb_nr);
+ break;
+
+ case SMI_BWC_SCEN_HDMI:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_HDMI");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ smicur->hdmi_setting(smi_data);
+ break;
+
+ case SMI_BWC_SCEN_HDMI4K:
+ SMIMSG("[SMI_PROFILE] : %s\n", "SMI_BWC_SCEN_HDMI4K");
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ smicur->hdmi_4k_setting(smi_data);
+ break;
+
+ default:
+ SMIMSG("[SMI_PROFILE] : %s %d\n", "initSetting", eFinalScen);
+ smicur->init_setting(smi_data, &is_default_value_saved,
+ default_val_smi_l1arb, smi_data->larb_nr);
+ g_smi_bwc_mm_info.hw_ovl_limit = SF_HWC_PIXEL_MAX_NORMAL;
+ break;
+ }
+
+ spin_unlock(&g_SMIInfo.SMI_lock);
+
+ /*turn off larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ mtk_smi_larb_clock_off(i, true);
+
+ /* Since send uevent may trigger sleeping, we must send the event after releasing spin lock */
+ ovl_limit_uevent(smi_profile, g_smi_bwc_mm_info.hw_ovl_limit);
+#ifndef MT27
+ /* force 30 fps in VR slow motion, because disp driver set fps apis got mutex,
+ * call these APIs only when necessary */
+ {
+ static unsigned int current_fps;
+
+ if ((eFinalScen == SMI_BWC_SCEN_VR_SLOW) && (current_fps != 30)) {
+ /* force 30 fps in VR slow motion profile */
+ primary_display_force_set_vsync_fps(30);
+ current_fps = 30;
+ SMIMSG("[SMI_PROFILE] set 30 fps\n");
+ } else if ((eFinalScen != SMI_BWC_SCEN_VR_SLOW) && (current_fps == 30)) {
+ /* back to normal fps */
+ current_fps = primary_display_get_fps();
+ primary_display_force_set_vsync_fps(current_fps);
+ SMIMSG("[SMI_PROFILE] back to %u fps\n", current_fps);
+ }
+ }
+#endif
+ SMIMSG("SMI_PROFILE to:%d %s,cur:%d,%d,%d,%d\n", p_conf->scenario,
+ (p_conf->b_on_off ? "on" : "off"), eFinalScen,
+ g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL],
+ g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VR],
+ g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_VP]);
+
+ return 0;
+
+/* Debug usage - S */
+/* smi_dumpDebugMsg(); */
+/* SMIMSG("Config:%d,%d,%d\n" , eFinalScen ,
+*g_SMIInfo.pu4ConcurrencyTable[SMI_BWC_SCEN_NORMAL] ,
+*(NULL == pu4LocalCnt ? (-1) : pu4LocalCnt[p_conf->scenario])); */
+/* Debug usage - E */
+
+err_clkoff:
+ spin_unlock(&g_SMIInfo.SMI_lock);
+
+ /*turn off larb clock */
+ for (i = 0; i < SMI_LARB_NR; i++)
+ mtk_smi_larb_clock_off(i, true);
+ return 0;
+}
+
+/*
+const struct dev_pm_ops mtk_smi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(smiclk_subsys_before_off, smiclk_subsys_after_on)
+};*/
+
+int smi_common_init(void)
+{
+ int i;
+
+ for (i = 0; i < SMI_LARB_NR; i++) {
+ pLarbRegBackUp[i] = kmalloc(LARB_BACKUP_REG_SIZE, GFP_KERNEL | __GFP_ZERO);
+ if (pLarbRegBackUp[i] == NULL)
+ SMIERR("pLarbRegBackUp kmalloc fail %d\n", i);
+ }
+
+ for (i = 0; i < smi_data->larb_nr; i++)
+ mtk_smi_larb_clock_on(i, true);
+
+ /* apply init setting after kernel boot */
+ smi_data->smi_priv->init_setting(smi_data, &is_default_value_saved,
+ default_val_smi_l1arb, smi_data->larb_nr);
+
+
+ fglarbcallback = true;
+
+ for (i = smi_data->larb_nr; i >= 0; i--)
+ mtk_smi_larb_clock_off(i, true);
+
+ return 0;
+}
+
+static int smi_open(struct inode *inode, struct file *file)
+{
+ file->private_data = kmalloc_array(SMI_BWC_SCEN_CNT, sizeof(unsigned int), GFP_ATOMIC);
+
+ if (NULL == file->private_data) {
+ SMIMSG("Not enough entry for DDP open operation\n");
+ return -ENOMEM;
+ }
+
+ memset(file->private_data, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
+
+ return 0;
+}
+
+static int smi_release(struct inode *inode, struct file *file)
+{
+ if (NULL != file->private_data) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+
+ return 0;
+}
+
+/* GMP start */
+
+void smi_bwc_mm_info_set(int property_id, long val1, long val2)
+{
+
+ switch (property_id) {
+ case SMI_BWC_INFO_CON_PROFILE:
+ g_smi_bwc_mm_info.concurrent_profile = (int)val1;
+ break;
+ case SMI_BWC_INFO_SENSOR_SIZE:
+ g_smi_bwc_mm_info.sensor_size[0] = val1;
+ g_smi_bwc_mm_info.sensor_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_VIDEO_RECORD_SIZE:
+ g_smi_bwc_mm_info.video_record_size[0] = val1;
+ g_smi_bwc_mm_info.video_record_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_DISP_SIZE:
+ g_smi_bwc_mm_info.display_size[0] = val1;
+ g_smi_bwc_mm_info.display_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_TV_OUT_SIZE:
+ g_smi_bwc_mm_info.tv_out_size[0] = val1;
+ g_smi_bwc_mm_info.tv_out_size[1] = val2;
+ break;
+ case SMI_BWC_INFO_FPS:
+ g_smi_bwc_mm_info.fps = (int)val1;
+ break;
+ case SMI_BWC_INFO_VIDEO_ENCODE_CODEC:
+ g_smi_bwc_mm_info.video_encode_codec = (int)val1;
+ break;
+ case SMI_BWC_INFO_VIDEO_DECODE_CODEC:
+ g_smi_bwc_mm_info.video_decode_codec = (int)val1;
+ break;
+ }
+}
+
+/* GMP end */
+
+
+
+static long smi_ioctl(struct file *pFile, unsigned int cmd, unsigned long param)
+{
+ int ret = 0;
+/* unsigned long * pu4Cnt = (unsigned long *)pFile->private_data; */
+
+ switch (cmd) {
+
+ case MTK_IOC_SMI_BWC_CONFIG:
+ {
+ MTK_SMI_BWC_CONFIG cfg;
+
+ ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_CONFIG));
+ if (ret) {
+ SMIMSG(" SMI_BWC_CONFIG, copy_from_user failed: %d\n", ret);
+ return -EFAULT;
+ }
+
+ ret = smi_bwc_config(&cfg, NULL);
+ }
+ break;
+ /* GMP start */
+ case MTK_IOC_SMI_BWC_INFO_SET:
+ {
+ MTK_SMI_BWC_INFO_SET cfg;
+ /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... start"); */
+ ret = copy_from_user(&cfg, (void *)param, sizeof(MTK_SMI_BWC_INFO_SET));
+ if (ret) {
+ SMIMSG(" MTK_IOC_SMI_BWC_INFO_SET, copy_to_user failed: %d\n", ret);
+ return -EFAULT;
+ }
+ /* Set the address to the value assigned by user space program */
+ smi_bwc_mm_info_set(cfg.property, cfg.value1, cfg.value2);
+ /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_SET request... finish"); */
+ break;
+ }
+ case MTK_IOC_SMI_BWC_INFO_GET:
+ {
+ ret = copy_to_user((void *)param, (void *)&g_smi_bwc_mm_info,
+ sizeof(MTK_SMI_BWC_MM_INFO));
+
+ if (ret) {
+ SMIMSG(" MTK_IOC_SMI_BWC_INFO_GET, copy_to_user failed: %d\n", ret);
+ return -EFAULT;
+ }
+ /* SMIMSG("Handle MTK_IOC_SMI_BWC_INFO_GET request... finish"); */
+ break;
+ }
+ /* GMP end */
+
+ case MTK_IOC_SMI_DUMP_LARB:
+ {
+ unsigned int larb_index;
+
+ ret = copy_from_user(&larb_index, (void *)param, sizeof(unsigned int));
+ if (ret)
+ return -EFAULT;
+
+ smi_dumpLarb(larb_index);
+ }
+ break;
+
+ case MTK_IOC_SMI_DUMP_COMMON:
+ {
+ unsigned int arg;
+
+ ret = copy_from_user(&arg, (void *)param, sizeof(unsigned int));
+ if (ret)
+ return -EFAULT;
+
+ smi_dumpCommon();
+ }
+ break;
+
+ /*case MTK_IOC_MMDVFS_CMD:
+ {
+ MTK_MMDVFS_CMD mmdvfs_cmd;
+
+ if (copy_from_user(&mmdvfs_cmd, (void *)param, sizeof(MTK_MMDVFS_CMD)))
+ return -EFAULT;
+
+ mmdvfs_handle_cmd(&mmdvfs_cmd);
+
+ if (copy_to_user
+ ((void *)param, (void *)&mmdvfs_cmd, sizeof(MTK_MMDVFS_CMD)))
+ return -EFAULT;
+
+ break;
+ }*/
+ default:
+ return -1;
+ }
+
+ return ret;
+}
+
+static const struct file_operations smiFops = {
+ .owner = THIS_MODULE,
+ .open = smi_open,
+ .release = smi_release,
+ .unlocked_ioctl = smi_ioctl,
+ .compat_ioctl = MTK_SMI_COMPAT_ioctl
+};
+
+static dev_t smiDevNo = MKDEV(MTK_SMI_MAJOR_NUMBER, 0);
+static inline int smi_register(void)
+{
+ if (alloc_chrdev_region(&smiDevNo, 0, 1, "MTK_SMI")) {
+ SMIERR("Allocate device No. failed");
+ return -EAGAIN;
+ }
+ /* Allocate driver */
+ pSmiDev = cdev_alloc();
+
+ if (NULL == pSmiDev) {
+ unregister_chrdev_region(smiDevNo, 1);
+ SMIERR("Allocate mem for kobject failed");
+ return -ENOMEM;
+ }
+ /* Attatch file operation. */
+ cdev_init(pSmiDev, &smiFops);
+ pSmiDev->owner = THIS_MODULE;
+
+ /* Add to system */
+ if (cdev_add(pSmiDev, smiDevNo, 1)) {
+ SMIERR("Attatch file operation failed");
+ unregister_chrdev_region(smiDevNo, 1);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static struct class *pSmiClass;
+static int smi_dev_register(void)
+{
+ int ret;
+ struct device *smiDevice = NULL;
+
+ if (smi_register()) {
+ pr_err("register SMI failed\n");
+ return -EAGAIN;
+ }
+
+ pSmiClass = class_create(THIS_MODULE, "MTK_SMI");
+ if (IS_ERR(pSmiClass)) {
+ ret = PTR_ERR(pSmiClass);
+ SMIERR("Unable to create class, err = %d", ret);
+ return ret;
+ }
+
+ smiDevice = device_create(pSmiClass, NULL, smiDevNo, NULL, "MTK_SMI");
+ smiDeviceUevent = smiDevice;
+
+ return 0;
+}
+
+static int mtk_smi_common_get(struct device *smidev, bool pm)
+{
+ struct mtk_smi_common *smipriv = dev_get_drvdata(smidev);
+ int ret;
+
+ if (pm) {
+ ret = pm_runtime_get_sync(smidev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(smipriv->clk_apb);
+ if (ret) {
+ dev_err(smidev, "Failed to enable the apb clock\n");
+ goto err_put_pm;
+ }
+ ret = clk_prepare_enable(smipriv->clk_smi);
+ if (ret) {
+ dev_err(smidev, "Failed to enable the smi clock\n");
+ goto err_disable_apb;
+ }
+ return ret;
+
+err_disable_apb:
+ clk_disable_unprepare(smipriv->clk_apb);
+err_put_pm:
+ if (pm)
+ pm_runtime_put_sync(smidev);
+ return ret;
+}
+
+static void mtk_smi_common_put(struct device *smidev, bool pm)
+{
+ struct mtk_smi_common *smipriv = dev_get_drvdata(smidev);
+
+ if (pm)
+ pm_runtime_put_sync(smidev);
+ clk_disable_unprepare(smipriv->clk_smi);
+ clk_disable_unprepare(smipriv->clk_apb);
+}
+
+static int _mtk_smi_larb_get(struct device *larbdev, bool pm)
+{
+ struct mtk_smi_larb *larbpriv = dev_get_drvdata(larbdev);
+ int ret;
+
+ ret = mtk_smi_common_get(larbpriv->smi, pm);
+ if (ret)
+ return ret;
+
+ if (pm) {
+ ret = pm_runtime_get_sync(larbdev);
+ if (ret < 0)
+ goto err_put_smicommon;
+ }
+
+ ret = clk_prepare_enable(larbpriv->clk_apb);
+ if (ret) {
+ dev_err(larbdev, "Failed to enable the apb clock\n");
+ goto err_put_pm;
+ }
+
+ ret = clk_prepare_enable(larbpriv->clk_smi);
+ if (ret) {
+ dev_err(larbdev, "Failed to enable the smi clock\n");
+ goto err_disable_apb;
+ }
+
+ return ret;
+
+err_disable_apb:
+ clk_disable_unprepare(larbpriv->clk_apb);
+err_put_pm:
+ if (pm)
+ pm_runtime_put_sync(larbdev);
+err_put_smicommon:
+ mtk_smi_common_put(larbpriv->smi, pm);
+ return ret;
+}
+
+static void _mtk_smi_larb_put(struct device *larbdev, bool pm)
+{
+ struct mtk_smi_larb *larbpriv = dev_get_drvdata(larbdev);
+
+ clk_disable_unprepare(larbpriv->clk_smi);
+ clk_disable_unprepare(larbpriv->clk_apb);
+ if (pm)
+ pm_runtime_put_sync(larbdev);
+ mtk_smi_common_put(larbpriv->smi, pm);
+}
+
+/* The power is alway on during power-domain callback.*/
+static int mtk_smi_larb_runtime_suspend(struct device *dev)
+{
+ unsigned int idx = smi_get_larb_index(dev);
+ int ret;
+
+ if (!fglarbcallback)
+ return 0;
+ if (idx >= SMI_LARB_NR)
+ return 0;
+
+ ret = _mtk_smi_larb_get(dev, false);
+ if (ret) {
+ dev_warn(dev, "runtime suspend clk-warn larb%d\n", idx);
+ return 0;
+ }
+
+ larb_reg_backup(idx);
+
+ _mtk_smi_larb_put(dev, false);
+ dev_dbg(dev, "runtime suspend larb%d\n", idx);
+ return 0;
+}
+
+static int mtk_smi_larb_runtime_resume(struct device *dev)
+{
+ unsigned int idx = smi_get_larb_index(dev);
+ int ret;
+
+ if (!fglarbcallback)
+ return 0;
+ if (idx >= SMI_LARB_NR)
+ return 0;
+
+ ret = _mtk_smi_larb_get(dev, false);
+ if (ret) {
+ dev_warn(dev, "runtime resume clk-warn larb%d\n", idx);
+ return 0;
+ }
+
+ larb_reg_restore(idx);
+
+ _mtk_smi_larb_put(dev, false);
+ dev_dbg(dev, "runtime resume larb%d\n", idx);
+ return 0;
+}
+
+/* modify this to avoid build error when runtime_pm not configured */
+static const struct dev_pm_ops mtk_smi_larb_ops = {
+ .runtime_suspend = mtk_smi_larb_runtime_suspend,
+ .runtime_resume = mtk_smi_larb_runtime_resume,
+};
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larbpriv;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *smi_node;
+ struct platform_device *smi_pdev;
+ int ret, larbid;
+
+ if (!dev->pm_domain)
+ return -EPROBE_DEFER;
+
+ larbpriv = devm_kzalloc(dev, sizeof(*larbpriv), GFP_KERNEL);
+ if (!larbpriv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ larbpriv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(larbpriv->base))
+ return PTR_ERR(larbpriv->base);
+
+ larbpriv->clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(larbpriv->clk_apb))
+ return PTR_ERR(larbpriv->clk_apb);
+
+ larbpriv->clk_smi = devm_clk_get(dev, "smi");
+ if (IS_ERR(larbpriv->clk_smi))
+ return PTR_ERR(larbpriv->clk_smi);
+
+ smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_node)
+ return -EINVAL;
+
+ ret = of_property_read_u32(dev->of_node, "mediatek,larbid", &larbid);
+ if (ret)
+ return ret;
+
+ smi_pdev = of_find_device_by_node(smi_node);
+ of_node_put(smi_node);
+ if (smi_pdev) {
+ larbpriv->smi = &smi_pdev->dev;
+ } else {
+ dev_err(dev, "Failed to get the smi_common device\n");
+ return -EINVAL;
+ }
+
+ smi_data->larb_base[larbid] = (unsigned long)larbpriv->base;
+ smi_data->larb[larbid] = dev;
+ smi_data->larb_nr++;
+
+ SMIMSG("larb %d-cnt %d probe done\n", larbid, smi_data->larb_nr);
+
+ pm_runtime_enable(dev);
+ dev_set_drvdata(dev, larbpriv);
+ return 0;
+}
+
+static int mtk_smi_larb_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mtk_smi_larb_of_ids[] = {
+ { .compatible = "mediatek,mt8173-smi-larb", },
+ { .compatible = "mediatek,mt8127-smi-larb", },
+ {}
+};
+
+static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+ .remove = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+#ifdef CONFIG_PM
+ .pm = &mtk_smi_larb_ops,
+#endif
+ }
+};
+
+static int mtk_smi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_smi_common *smipriv;
+ struct resource *res;
+
+ if (!dev->pm_domain)
+ return -EPROBE_DEFER;
+
+ smipriv = devm_kzalloc(dev, sizeof(*smipriv), GFP_KERNEL);
+ if (!smipriv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ smipriv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(smipriv->base))
+ return PTR_ERR(smipriv->base);
+
+ smipriv->clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(smipriv->clk_apb))
+ return PTR_ERR(smipriv->clk_apb);
+
+ smipriv->clk_smi = devm_clk_get(dev, "smi");
+ if (IS_ERR(smipriv->clk_smi))
+ return PTR_ERR(smipriv->clk_smi);
+
+ smi_data->smicommon = dev;
+ smi_data->smi_common_base = (unsigned long)smipriv->base;
+
+ pm_runtime_enable(dev);
+ dev_set_drvdata(dev, smipriv);
+ return 0;
+}
+
+static int mtk_smi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mtk_smi_of_ids[] = {
+ { .compatible = "mediatek,mt8173-smi",},
+ { .compatible = "mediatek,mt8127-smi",},
+ {}
+};
+
+static struct platform_driver mtk_smi_driver = {
+ .probe = mtk_smi_probe,
+ .remove = mtk_smi_remove,
+ .driver = {
+ .name = "mtk-smi",
+ .of_match_table = mtk_smi_of_ids,
+ }
+};
+
+static int __init smi_init(void)
+{
+ int ret;
+
+ smi_data = kzalloc(sizeof(*smi_data), GFP_KERNEL);
+ if (smi_data == NULL) {
+ SMIERR("Unable to allocate memory for smi driver");
+ return -ENOMEM;
+ }
+
+ ret = platform_driver_register(&mtk_smi_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMI driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&mtk_smi_larb_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMI-LARB driver\n");
+ return ret;
+ }
+
+ ret = smi_dev_register();
+ if (ret) {
+ SMIMSG("register dev/smi failed\n");
+ return ret;
+ }
+
+ memset(g_SMIInfo.pu4ConcurrencyTable, 0, SMI_BWC_SCEN_CNT * sizeof(unsigned int));
+ spin_lock_init(&g_SMIInfo.SMI_lock);
+
+ SMI_DBG_Init();
+
+ #if defined MT73
+ smi_data->smi_priv = &smi_mt8173_priv;
+ #elif defined MT27
+ smi_data->smi_priv = &smi_mt8127_priv;
+ #endif
+
+ SMIMSG("smi_init done\n");
+
+ return 0;
+}
+
+static void __exit smi_exit(void)
+{
+ platform_driver_unregister(&mtk_smi_driver);
+ platform_driver_unregister(&mtk_smi_larb_driver);
+}
+
+static int __init smi_init_late(void)
+{
+ /*init clk/mtcmos should be late while ccf */
+ SMIMSG("smi_init_late-\n");
+
+ smi_common_init();
+
+ return 0;
+}
+
+static void smi_dumpCommonDebugMsg(void)
+{
+ unsigned long u4Base;
+
+ /* SMI COMMON dump */
+ SMIMSG("===SMI common reg dump===\n");
+
+ u4Base = SMI_COMMON_EXT_BASE;
+ SMIMSG("[0x200,0x204,0x208]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x200),
+ M4U_ReadReg32(u4Base, 0x204), M4U_ReadReg32(u4Base, 0x208));
+ SMIMSG("[0x20C,0x210,0x214]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x20C),
+ M4U_ReadReg32(u4Base, 0x210), M4U_ReadReg32(u4Base, 0x214));
+ #ifdef MT73
+ SMIMSG("[0x220,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x220),
+ M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base,
+ 0x238));
+ SMIMSG("[0x400,0x404,0x408]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x400),
+ M4U_ReadReg32(u4Base, 0x404), M4U_ReadReg32(u4Base, 0x408));
+
+ #elif defined MT27
+
+ SMIMSG("[0x218,0x230,0x234,0x238]=[0x%x,0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x218),
+ M4U_ReadReg32(u4Base, 0x230), M4U_ReadReg32(u4Base, 0x234), M4U_ReadReg32(u4Base,
+ 0x238));
+ SMIMSG("[0x400,0x404,]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x400),
+ M4U_ReadReg32(u4Base, 0x404));
+
+ #endif
+
+ /* TBD: M4U should dump these, the offset of MT27 have been checked and same with the followings. */
+/*
+ For VA and PA check:
+ 0x1000C5C0 , 0x1000C5C4, 0x1000C5C8, 0x1000C5CC, 0x1000C5D0
+ u4Base = SMI_COMMON_AO_BASE;
+ SMIMSG("===SMI always on reg dump===\n");
+ SMIMSG("[0x5C0,0x5C4,0x5C8]=[0x%x,0x%x,0x%x]\n" ,
+ M4U_ReadReg32(u4Base , 0x5C0),M4U_ReadReg32(u4Base , 0x5C4),
+ M4U_ReadReg32(u4Base , 0x5C8));
+ SMIMSG("[0x5CC,0x5D0]=[0x%x,0x%x]\n" ,M4U_ReadReg32(u4Base , 0x5CC),
+ M4U_ReadReg32(u4Base , 0x5D0));
+*/
+}
+
+static void smi_dumpLarbDebugMsg(unsigned int u4Index)
+{
+ unsigned long u4Base;
+
+ u4Base = get_larb_base_addr(u4Index);
+
+ if (u4Base == SMI_ERROR_ADDR) {
+ SMIMSG("Doesn't support reg dump for Larb%d\n", u4Index);
+ } else {
+ SMIMSG("===SMI LARB%d reg dump===\n", u4Index);
+
+ #ifdef MT73
+ SMIMSG("[0x0,0x8,0x10]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
+ M4U_ReadReg32(u4Base, 0x8), M4U_ReadReg32(u4Base, 0x10));
+ SMIMSG("[0x24,0x50,0x60]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x24),
+ M4U_ReadReg32(u4Base, 0x50), M4U_ReadReg32(u4Base, 0x60));
+ SMIMSG("[0xa0,0xa4,0xa8]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xa0),
+ M4U_ReadReg32(u4Base, 0xa4), M4U_ReadReg32(u4Base, 0xa8));
+ SMIMSG("[0xac,0xb0,0xb4]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xac),
+ M4U_ReadReg32(u4Base, 0xb0), M4U_ReadReg32(u4Base, 0xb4));
+ SMIMSG("[0xb8,0xbc,0xc0]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xb8),
+ M4U_ReadReg32(u4Base, 0xbc), M4U_ReadReg32(u4Base, 0xc0));
+ SMIMSG("[0xc8,0xcc]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0xc8),
+ M4U_ReadReg32(u4Base, 0xcc));
+ #elif defined MT27
+ {
+ unsigned int u4Offset = 0;
+
+ SMIMSG("[0x0,0x10,0x60]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x0),
+ M4U_ReadReg32(u4Base, 0x10), M4U_ReadReg32(u4Base, 0x60));
+ SMIMSG("[0x64,0x8c,0x450]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x64),
+ M4U_ReadReg32(u4Base, 0x8c), M4U_ReadReg32(u4Base, 0x450));
+ SMIMSG("[0x454,0x600,0x604]=[0x%x,0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x454),
+ M4U_ReadReg32(u4Base, 0x600), M4U_ReadReg32(u4Base, 0x604));
+ SMIMSG("[0x610,0x614]=[0x%x,0x%x]\n", M4U_ReadReg32(u4Base, 0x610),
+ M4U_ReadReg32(u4Base, 0x614));
+
+ for (u4Offset = 0x200; u4Offset < 0x200 + SMI_LARB_NR * 4; u4Offset += 4)
+ SMIMSG("[0x%x = 0x%x ]\n", u4Offset, M4U_ReadReg32(u4Base , u4Offset));
+ }
+ #endif
+ }
+}
+
+static void smi_dump_format(unsigned long base, unsigned int from, unsigned int to)
+{
+ int i, j, left;
+ unsigned int value[8];
+
+ for (i = from; i <= to; i += 32) {
+ for (j = 0; j < 8; j++)
+ value[j] = M4U_ReadReg32(base, i + j * 4);
+
+ SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i, value[0], value[1], value[2], value[3],
+ value[4], value[5], value[6], value[7]);
+ }
+
+ left = ((from - to) / 4 + 1) % 8;
+
+ if (left) {
+ memset(value, 0, 8 * sizeof(unsigned int));
+
+ for (j = 0; j < left; j++)
+ value[j] = M4U_ReadReg32(base, i - 32 + j * 4);
+
+ SMIMSG2("%8x %x %x %x %x %x %x %x %x\n", i - 32 + j * 4, value[0], value[1],
+ value[2], value[3], value[4], value[5], value[6], value[7]);
+ }
+}
+
+static void smi_dumpLarb(unsigned int index)
+{
+ unsigned long u4Base;
+
+ u4Base = get_larb_base_addr(index);
+
+ if (u4Base == SMI_ERROR_ADDR) {
+ SMIMSG2("Doesn't support reg dump for Larb%d\n", index);
+
+ } else {
+ SMIMSG2("===SMI LARB%d reg dump base 0x%lx===\n", index, u4Base);
+
+ smi_dump_format(u4Base, 0, 0x434);
+ smi_dump_format(u4Base, 0xF00, 0xF0C);
+ }
+}
+
+static void smi_dumpCommon(void)
+{
+ SMIMSG2("===SMI COMMON reg dump base 0x%lx===\n", SMI_COMMON_EXT_BASE);
+
+ smi_dump_format(SMI_COMMON_EXT_BASE, 0x1A0, 0x418);
+}
+
+void smi_dumpDebugMsg(void)
+{
+ unsigned int u4Index;
+
+ /* SMI COMMON dump */
+ smi_dumpCommonDebugMsg();
+
+ /* dump all SMI LARB */
+ for (u4Index = 0; u4Index < SMI_LARB_NR; u4Index++)
+ smi_dumpLarbDebugMsg(u4Index);
+}
+
+int smi_debug_bus_hanging_detect(unsigned int larbs, int show_dump)
+{
+#ifdef CONFIG_MTK_SMI_EXT
+ int i = 0;
+ int dump_time = 0;
+ int is_smi_issue = 0;
+ int status_code = 0;
+ /* Keep the dump result */
+ unsigned char smi_common_busy_count = 0;
+ /*volatile */ unsigned int reg_temp = 0;
+ unsigned char smi_larb_busy_count[SMI_LARB_NR] = { 0 };
+ unsigned char smi_larb_mmu_status[SMI_LARB_NR] = { 0 };
+
+ /* dump resister and save resgister status */
+ for (dump_time = 0; dump_time < 5; dump_time++) {
+ unsigned int u4Index = 0;
+
+ reg_temp = M4U_ReadReg32(SMI_COMMON_EXT_BASE, 0x400);
+ if ((reg_temp & (1 << 30)) == 0) {
+ /* smi common is busy */
+ smi_common_busy_count++;
+ }
+ /* Dump smi common regs */
+ if (show_dump != 0)
+ smi_dumpCommonDebugMsg();
+
+ for (u4Index = 0; u4Index < SMI_LARB_NR; u4Index++) {
+ unsigned long u4Base = get_larb_base_addr(u4Index);
+
+ if (u4Base != SMI_ERROR_ADDR) {
+ reg_temp = M4U_ReadReg32(u4Base, 0x0);
+ if (reg_temp != 0) {
+ /* Larb is busy */
+ smi_larb_busy_count[u4Index]++;
+ }
+ smi_larb_mmu_status[u4Index] = M4U_ReadReg32(u4Base, 0xa0);
+ if (show_dump != 0)
+ smi_dumpLarbDebugMsg(u4Index);
+ }
+ }
+
+ }
+
+ /* Show the checked result */
+ for (i = 0; i < SMI_LARB_NR; i++) { /* Check each larb */
+ if (SMI_DGB_LARB_SELECT(larbs, i)) {
+ /* larb i has been selected */
+ /* Get status code */
+
+ if (smi_larb_busy_count[i] == 5) { /* The larb is always busy */
+ if (smi_common_busy_count == 5) { /* smi common is always busy */
+ status_code = 1;
+ } else if (smi_common_busy_count == 0) { /* smi common is always idle */
+ status_code = 2;
+ } else {
+ status_code = 5; /* smi common is sometimes busy and idle */
+ }
+ } else if (smi_larb_busy_count[i] == 0) { /* The larb is always idle */
+ if (smi_common_busy_count == 5) { /* smi common is always busy */
+ status_code = 3;
+ } else if (smi_common_busy_count == 0) { /* smi common is always idle */
+ status_code = 4;
+ } else {
+ status_code = 6; /* smi common is sometimes busy and idle */
+ }
+ } else { /* sometime the larb is busy */
+ if (smi_common_busy_count == 5) { /* smi common is always busy */
+ status_code = 7;
+ } else if (smi_common_busy_count == 0) { /* smi common is always idle */
+ status_code = 8;
+ } else {
+ status_code = 9; /* smi common is sometimes busy and idle */
+ }
+ }
+
+ /* Send the debug message according to the final result */
+ switch (status_code) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ case 8:
+ SMIMSG
+ ("Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> Check engine's state first",
+ i, smi_larb_busy_count[i], smi_common_busy_count, status_code);
+ SMIMSG
+ ("If the engine is waiting for Larb%ds' response, it needs SMI HW's check",
+ i);
+ break;
+ case 2:
+ if (smi_larb_mmu_status[i] == 0) {
+ SMIMSG("Larb%d Busy=%d/5, Common Busy=%d/5,status=%d=>Check engine state first",
+ i, smi_larb_busy_count[i], smi_common_busy_count,
+ status_code);
+ SMIMSG("If the engine is waiting for Larb%ds' response,it needs SMI HW's check",
+ i);
+ } else {
+ SMIMSG("Larb%d Busy=%d/5, Common Busy=%d/5, status=%d==>MMU port config error",
+ i, smi_larb_busy_count[i], smi_common_busy_count,
+ status_code);
+ is_smi_issue = 1;
+ }
+ break;
+ case 4:
+ case 6:
+ case 9:
+ SMIMSG
+ ("Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> not SMI issue",
+ i, smi_larb_busy_count[i], smi_common_busy_count, status_code);
+ break;
+ default:
+ SMIMSG
+ ("Larb%d Busy=%d/5, SMI Common Busy=%d/5, status=%d ==> status unknown",
+ i, smi_larb_busy_count[i], smi_common_busy_count, status_code);
+ break;
+ }
+ }
+ }
+
+ return is_smi_issue;
+#endif
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_COMPAT)
+/* 32 bits process ioctl support: */
+/* This is prepared for the future extension since currently the sizes of 32 bits */
+/* and 64 bits smi parameters are the same. */
+
+typedef struct {
+ compat_int_t scenario;
+ compat_int_t b_on_off; /* 0 : exit this scenario , 1 : enter this scenario */
+} MTK_SMI_COMPAT_BWC_CONFIG;
+
+typedef struct {
+ compat_int_t property;
+ compat_int_t value1;
+ compat_int_t value2;
+} MTK_SMI_COMPAT_BWC_INFO_SET;
+
+typedef struct {
+ compat_uint_t flag; /* Reserved */
+ compat_int_t concurrent_profile;
+ compat_int_t sensor_size[2];
+ compat_int_t video_record_size[2];
+ compat_int_t display_size[2];
+ compat_int_t tv_out_size[2];
+ compat_int_t fps;
+ compat_int_t video_encode_codec;
+ compat_int_t video_decode_codec;
+ compat_int_t hw_ovl_limit;
+} MTK_SMI_COMPAT_BWC_MM_INFO;
+
+#define COMPAT_MTK_IOC_SMI_BWC_CONFIG MTK_IOW(24, MTK_SMI_COMPAT_BWC_CONFIG)
+#define COMPAT_MTK_IOC_SMI_BWC_INFO_SET MTK_IOWR(28, MTK_SMI_COMPAT_BWC_INFO_SET)
+#define COMPAT_MTK_IOC_SMI_BWC_INFO_GET MTK_IOWR(29, MTK_SMI_COMPAT_BWC_MM_INFO)
+
+static int compat_get_smi_bwc_config_struct(MTK_SMI_COMPAT_BWC_CONFIG __user *data32,
+ MTK_SMI_BWC_CONFIG __user *data)
+{
+
+ compat_int_t i;
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(i, &(data32->scenario));
+ err |= put_user(i, &(data->scenario));
+ err |= get_user(i, &(data32->b_on_off));
+ err |= put_user(i, &(data->b_on_off));
+
+ return err;
+}
+
+static int compat_get_smi_bwc_mm_info_set_struct(MTK_SMI_COMPAT_BWC_INFO_SET __user *data32,
+ MTK_SMI_BWC_INFO_SET __user *data)
+{
+
+ compat_int_t i;
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(i, &(data32->property));
+ err |= put_user(i, &(data->property));
+ err |= get_user(i, &(data32->value1));
+ err |= put_user(i, &(data->value1));
+ err |= get_user(i, &(data32->value2));
+ err |= put_user(i, &(data->value2));
+
+ return err;
+}
+
+static int compat_get_smi_bwc_mm_info_struct(MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
+ MTK_SMI_BWC_MM_INFO __user *data)
+{
+ compat_uint_t u;
+ compat_int_t i;
+ compat_int_t p[2];
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(u, &(data32->flag));
+ err |= put_user(u, &(data->flag));
+ err |= get_user(i, &(data32->concurrent_profile));
+ err |= put_user(i, &(data->concurrent_profile));
+ err |= copy_from_user(p, &(data32->sensor_size), sizeof(p));
+ err |= copy_to_user(&(data->sensor_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data32->video_record_size), sizeof(p));
+ err |= copy_to_user(&(data->video_record_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data32->display_size), sizeof(p));
+ err |= copy_to_user(&(data->display_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data32->tv_out_size), sizeof(p));
+ err |= copy_to_user(&(data->tv_out_size), p, sizeof(p));
+ err |= get_user(i, &(data32->fps));
+ err |= put_user(i, &(data->fps));
+ err |= get_user(i, &(data32->video_encode_codec));
+ err |= put_user(i, &(data->video_encode_codec));
+ err |= get_user(i, &(data32->video_decode_codec));
+ err |= put_user(i, &(data->video_decode_codec));
+ err |= get_user(i, &(data32->hw_ovl_limit));
+ err |= put_user(i, &(data->hw_ovl_limit));
+
+
+ return err;
+}
+
+static int compat_put_smi_bwc_mm_info_struct(MTK_SMI_COMPAT_BWC_MM_INFO __user *data32,
+ MTK_SMI_BWC_MM_INFO __user *data)
+{
+
+ compat_uint_t u;
+ compat_int_t i;
+ compat_int_t p[2];
+ int err;
+
+ /* since the int sizes of 32 A32 and A64 are equal so we don't convert them actually here */
+ err = get_user(u, &(data->flag));
+ err |= put_user(u, &(data32->flag));
+ err |= get_user(i, &(data->concurrent_profile));
+ err |= put_user(i, &(data32->concurrent_profile));
+ err |= copy_from_user(p, &(data->sensor_size), sizeof(p));
+ err |= copy_to_user(&(data32->sensor_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data->video_record_size), sizeof(p));
+ err |= copy_to_user(&(data32->video_record_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data->display_size), sizeof(p));
+ err |= copy_to_user(&(data32->display_size), p, sizeof(p));
+ err |= copy_from_user(p, &(data->tv_out_size), sizeof(p));
+ err |= copy_to_user(&(data32->tv_out_size), p, sizeof(p));
+ err |= get_user(i, &(data->fps));
+ err |= put_user(i, &(data32->fps));
+ err |= get_user(i, &(data->video_encode_codec));
+ err |= put_user(i, &(data32->video_encode_codec));
+ err |= get_user(i, &(data->video_decode_codec));
+ err |= put_user(i, &(data32->video_decode_codec));
+ err |= get_user(i, &(data->hw_ovl_limit));
+ err |= put_user(i, &(data32->hw_ovl_limit));
+ return err;
+}
+
+long MTK_SMI_COMPAT_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ if (!filp->f_op || !filp->f_op->unlocked_ioctl)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case COMPAT_MTK_IOC_SMI_BWC_CONFIG:
+ {
+ if (COMPAT_MTK_IOC_SMI_BWC_CONFIG == MTK_IOC_SMI_BWC_CONFIG) {
+ SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_CONFIG");
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ } else {
+
+ MTK_SMI_COMPAT_BWC_CONFIG __user *data32;
+ MTK_SMI_BWC_CONFIG __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_CONFIG));
+
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_smi_bwc_config_struct(data32, data);
+ if (err)
+ return err;
+
+ ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_CONFIG,
+ (unsigned long)data);
+ return ret;
+ }
+ }
+
+ case COMPAT_MTK_IOC_SMI_BWC_INFO_SET:
+ {
+
+ if (COMPAT_MTK_IOC_SMI_BWC_INFO_SET == MTK_IOC_SMI_BWC_INFO_SET) {
+ SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_SET");
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ } else {
+
+ MTK_SMI_COMPAT_BWC_INFO_SET __user *data32;
+ MTK_SMI_BWC_INFO_SET __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_INFO_SET));
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_smi_bwc_mm_info_set_struct(data32, data);
+ if (err)
+ return err;
+
+ return filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_SET,
+ (unsigned long)data);
+ }
+ }
+ break;
+
+ case COMPAT_MTK_IOC_SMI_BWC_INFO_GET:
+ {
+ if (COMPAT_MTK_IOC_SMI_BWC_INFO_GET == MTK_IOC_SMI_BWC_INFO_GET) {
+ SMIMSG("Optimized compct IOCTL: COMPAT_MTK_IOC_SMI_BWC_INFO_GET");
+ return filp->f_op->unlocked_ioctl(filp, cmd,
+ (unsigned long)compat_ptr(arg));
+ } else {
+ MTK_SMI_COMPAT_BWC_MM_INFO __user *data32;
+ MTK_SMI_BWC_MM_INFO __user *data;
+ int err;
+
+ data32 = compat_ptr(arg);
+ data = compat_alloc_user_space(sizeof(MTK_SMI_BWC_MM_INFO));
+
+ if (data == NULL)
+ return -EFAULT;
+
+ err = compat_get_smi_bwc_mm_info_struct(data32, data);
+ if (err)
+ return err;
+
+ ret = filp->f_op->unlocked_ioctl(filp, MTK_IOC_SMI_BWC_INFO_GET,
+ (unsigned long)data);
+
+ err = compat_put_smi_bwc_mm_info_struct(data32, data);
+
+ if (err)
+ return err;
+
+ return ret;
+ }
+ }
+ break;
+
+ case MTK_IOC_SMI_DUMP_LARB:
+ case MTK_IOC_SMI_DUMP_COMMON:
+ case MTK_IOC_MMDVFS_CMD:
+
+ return filp->f_op->unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+}
+
+#endif
+
+module_init(smi_init);
+module_exit(smi_exit);
+late_initcall(smi_init_late);
+
+module_param_named(debug_level, smi_debug_level, uint, S_IRUGO | S_IWUSR);
+module_param_named(tuning_mode, smi_tuning_mode, uint, S_IRUGO | S_IWUSR);
+module_param_named(wifi_disp_transaction, wifi_disp_transaction, uint, S_IRUGO | S_IWUSR);
+
+MODULE_DESCRIPTION("MTK SMI driver");
+MODULE_AUTHOR("Glory Hung<glory.hung@mediatek.com>");
+MODULE_AUTHOR("Yong Wu<yong.wu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mediatek/smi/variant/smi_variant_config_8127.c b/drivers/misc/mediatek/smi/variant/smi_variant_config_8127.c
new file mode 100644
index 000000000..6c333e8b1
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_variant_config_8127.c
@@ -0,0 +1,220 @@
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kobject.h>
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include "smi_reg.h"
+#include "smi_common.h"
+#include "smi_priv.h"
+
+#define SMI_LARB0_PORT_NUM 10
+#define SMI_LARB1_PORT_NUM 7
+#define SMI_LARB2_PORT_NUM 17
+
+static void initSetting(struct mtk_smi_data *smidev, bool *default_saved,
+ u32 *default_smi_val, unsigned int larbid)
+{
+
+
+ SMIMSG("Current Setting: GPU - new");
+ if (!SMI_COMMON_EXT_BASE || !LARB0_BASE) {
+ SMIMSG("smi and smi_larb should have been probe first\n");
+ return;
+ }
+ /* 2 non-ultra write, 3 write command , 4 non-ultra read , 5 ultra read */
+ M4U_WriteReg32(REG_SMI_M4U_TH, 0, ((0x3 << 15) + (0x4 << 10) + (0x4 << 5) + 0x5));
+ /*
+ * Level 1 LARB, apply new outstanding control method, 1/4 bandwidth
+ * limiter overshoot control , enable warb channel
+ */
+ M4U_WriteReg32(REG_SMI_L1LEN, 0, 0xB);
+ /*
+ * total 8 commnads between smi common to M4U, 12 non ultra commands
+ * between smi common to M4U, 1 commnads can in write AXI slice for all LARBs
+ */
+ M4U_WriteReg32(REG_SMI_READ_FIFO_TH, 0, ((0x7 << 11) + (0x8 << 6) + 0x3F));
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0xC); /* DISP_OVL_0 */
+ M4U_WriteReg32(LARB0_BASE, 0x204, 0x1); /* DISP_RDMA_1 */
+ M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); /* DISP_RDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x20C, 0x2); /* DISP_WDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); /* MM_CMDQ */
+ M4U_WriteReg32(LARB0_BASE, 0x214, 0x5); /* MDP_RDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); /* MDP_WDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x21C, 0x3); /* MDP_ROT */
+ M4U_WriteReg32(LARB0_BASE, 0x220, 0x1); /* MDP_ROTCO */
+ M4U_WriteReg32(LARB0_BASE, 0x224, 0x1); /* MDP ROTVO */
+
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x1); /* HW_VDEC_MC_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x1); /* HW_VDEC_PP_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* HW_VDEC_AVC_MV-EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x20C, 0x1); /* HW_VDEC_PRED_RD_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); /* HW_VDEC_PRED_WR_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); /* HW_VDEC_VLD_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* HW_VDEC_PP_INT */
+
+ M4U_WriteReg32(LARB2_BASE, 0x200, 0x1); /* CAM_IMGO */
+ M4U_WriteReg32(LARB2_BASE, 0x204, 0x1); /* CAM_IMG2O */
+ M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); /* CAM_LSCI */
+ M4U_WriteReg32(LARB2_BASE, 0x20C, 0x1); /* CAM_IMGI */
+ M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); /* CAM_ESFKO */
+ M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); /* CAM_AAO */
+ M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); /* CAM_LCEI */
+ M4U_WriteReg32(LARB2_BASE, 0x21C, 0x1); /* CAM_LCSO */
+ M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); /* JPGENC_RDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); /* JPGENC_BSDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); /* VENC_SV_COMV */
+ M4U_WriteReg32(LARB2_BASE, 0x22C, 0x1); /* VENC_RD_COMV */
+ M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); /* VENC_RCPU */
+ M4U_WriteReg32(LARB2_BASE, 0x234, 0x1); /* VENC_REC_FRM */
+ M4U_WriteReg32(LARB2_BASE, 0x238, 0x1); /* VENC_REF_LUMA */
+ M4U_WriteReg32(LARB2_BASE, 0x23C, 0x1); /* VENC_REF_CHROMA */
+ M4U_WriteReg32(LARB2_BASE, 0x244, 0x1); /* VENC_BSDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x248, 0x1); /* VENC_CUR_LUMA */
+ M4U_WriteReg32(LARB2_BASE, 0x24C, 0x1); /* VENC_CUR_CHROMA */
+}
+
+static void vpSetting(struct mtk_smi_data *smidev)
+{
+ /* 2 non-ultra write, 3 write command , 4 non-ultra read , 5 ultra read */
+ M4U_WriteReg32(REG_SMI_M4U_TH, 0, ((0x2 << 15) + (0x3 << 10) + (0x4 << 5) + 0x5));
+ /*
+ * Level 1 LARB, apply new outstanding control method, 1/4 bandwidth limiter
+ * overshoot control , enable warb channel
+ */
+ M4U_WriteReg32(REG_SMI_L1LEN, 0, 0x1B);
+ /*
+ * total 8 commnads between smi common to M4U, 12 non ultra commands
+ * between smi common to M4U, 1 commnads can in write AXI slice for all LARBs
+ */
+ M4U_WriteReg32(REG_SMI_READ_FIFO_TH, 0, 0x323F);
+
+ M4U_WriteReg32(REG_SMI_L1ARB0, 0, 0xC3A); /* 1111/4096 maximum grant counts, soft limiter */
+ M4U_WriteReg32(REG_SMI_L1ARB1, 0, 0x9E8); /* 503/4096 maximum grant counts, soft limiter */
+ M4U_WriteReg32(REG_SMI_L1ARB2, 0, 0x943); /* 353/4096 maximum grant counts, soft limiter */
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0xC); /* DISP_OVL_0 */
+ M4U_WriteReg32(LARB0_BASE, 0x204, 0x1); /* DISP_RDMA_1 */
+ M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); /* DISP_RDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x20C, 0x2); /* DISP_WDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); /* MM_CMDQ */
+ M4U_WriteReg32(LARB0_BASE, 0x214, 0x5); /* MDP_RDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x218, 0x1); /* MDP_WDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x21C, 0x3); /* MDP_ROT */
+ M4U_WriteReg32(LARB0_BASE, 0x220, 0x1); /* MDP_ROTCO */
+ M4U_WriteReg32(LARB0_BASE, 0x224, 0x1); /* MDP ROTVO */
+
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x6); /* HW_VDEC_MC_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x2); /* HW_VDEC_PP_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* HW_VDEC_AVC_MV-EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x20C, 0x3); /* HW_VDEC_PRED_RD_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x3); /* HW_VDEC_PRED_WR_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); /* HW_VDEC_VLD_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* HW_VDEC_PP_INT */
+
+ M4U_WriteReg32(LARB2_BASE, 0x200, 0x1); /* CAM_IMGO */
+ M4U_WriteReg32(LARB2_BASE, 0x204, 0x1); /* CAM_IMG2O */
+ M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); /* CAM_LSCI */
+ M4U_WriteReg32(LARB2_BASE, 0x20C, 0x1); /* CAM_IMGI */
+ M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); /* CAM_ESFKO */
+ M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); /* CAM_AAO */
+ M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); /* CAM_LCEI */
+ M4U_WriteReg32(LARB2_BASE, 0x21C, 0x1); /* CAM_LCSO */
+ M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); /* JPGENC_RDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); /* JPGENC_BSDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); /* VENC_SV_COMV */
+ M4U_WriteReg32(LARB2_BASE, 0x22C, 0x1); /* VENC_RD_COMV */
+ M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); /* VENC_RCPU */
+ M4U_WriteReg32(LARB2_BASE, 0x234, 0x1); /* VENC_REC_FRM */
+ M4U_WriteReg32(LARB2_BASE, 0x238, 0x1); /* VENC_REF_LUMA */
+ M4U_WriteReg32(LARB2_BASE, 0x23C, 0x1); /* VENC_REF_CHROMA */
+ M4U_WriteReg32(LARB2_BASE, 0x244, 0x1); /* VENC_BSDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x248, 0x1); /* VENC_CUR_LUMA */
+ M4U_WriteReg32(LARB2_BASE, 0x24C, 0x1); /* VENC_CUR_CHROMA */
+
+}
+
+static void vrSetting(struct mtk_smi_data *smidev)
+{
+ /* 2 non-ultra write, 3 write command , 4 non-ultra read , 5 ultra read */
+ M4U_WriteReg32(REG_SMI_M4U_TH, 0, ((0x2 << 15) + (0x3 << 10) + (0x4 << 5) + 0x5));
+ /*
+ * Level 1 LARB, apply new outstanding control method, 1/4 bandwidth limiter
+ * overshoot control , enable warb channel
+ */
+ M4U_WriteReg32(REG_SMI_L1LEN, 0, 0xB);
+ /*
+ * total 8 commnads between smi common to M4U, 12 non ultra commands between smi common
+ * to M4U, 1 commnads can in write AXI slice for all LARBs
+ */
+ M4U_WriteReg32(REG_SMI_READ_FIFO_TH, 0, ((0x6 << 11) + (0x8 << 6) + 0x3F));
+
+ M4U_WriteReg32(REG_SMI_L1ARB0, 0, 0xC26); /* 1111/4096 maximum grant counts, soft limiter */
+ M4U_WriteReg32(REG_SMI_L1ARB1, 0, 0x943); /* 503/4096 maximum grant counts, soft limiter */
+ M4U_WriteReg32(REG_SMI_L1ARB2, 0, 0xD4F); /* 1359/4096 maximum grant counts, soft limiter */
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0xC); /* DISP_OVL_0 */
+ M4U_WriteReg32(LARB0_BASE, 0x204, 0x1); /* DISP_RDMA_1 */
+ M4U_WriteReg32(LARB0_BASE, 0x208, 0x1); /* DISP_RDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x20C, 0x1); /* DISP_WDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x210, 0x1); /* MM_CMDQ */
+ M4U_WriteReg32(LARB0_BASE, 0x214, 0x2); /* MDP_RDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x218, 0x2); /* MDP_WDMA */
+ M4U_WriteReg32(LARB0_BASE, 0x21C, 0x4); /* MDP_ROT */
+ M4U_WriteReg32(LARB0_BASE, 0x220, 0x2); /* MDP_ROTCO */
+ M4U_WriteReg32(LARB0_BASE, 0x224, 0x2); /* MDP ROTVO */
+
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x1); /* HW_VDEC_MC_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x1); /* HW_VDEC_PP_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* HW_VDEC_AVC_MV-EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x20C, 0x1); /* HW_VDEC_PRED_RD_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); /* HW_VDEC_PRED_WR_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x1); /* HW_VDEC_VLD_EXT */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* HW_VDEC_PP_INT */
+
+ M4U_WriteReg32(LARB2_BASE, 0x200, 0x6); /* CAM_IMGO */
+ M4U_WriteReg32(LARB2_BASE, 0x204, 0x1); /* CAM_IMG2O */
+ M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); /* CAM_LSCI */
+ M4U_WriteReg32(LARB2_BASE, 0x20C, 0x4); /* CAM_IMGI */
+ M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); /* CAM_ESFKO */
+ M4U_WriteReg32(LARB2_BASE, 0x214, 0x1); /* CAM_AAO */
+ M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); /* CAM_LCEI */
+ M4U_WriteReg32(LARB2_BASE, 0x21C, 0x1); /* CAM_LCSO */
+ M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); /* JPGENC_RDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x224, 0x1); /* JPGENC_BSDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x228, 0x1); /* VENC_SV_COMV */
+ M4U_WriteReg32(LARB2_BASE, 0x22C, 0x1); /* VENC_RD_COMV */
+ M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); /* VENC_RCPU */
+ M4U_WriteReg32(LARB2_BASE, 0x234, 0x2); /* VENC_REC_FRM */
+ M4U_WriteReg32(LARB2_BASE, 0x238, 0x4); /* VENC_REF_LUMA */
+ M4U_WriteReg32(LARB2_BASE, 0x23C, 0x2); /* VENC_REF_CHROMA */
+ M4U_WriteReg32(LARB2_BASE, 0x244, 0x1); /* VENC_BSDMA */
+ M4U_WriteReg32(LARB2_BASE, 0x248, 0x2); /* VENC_CUR_LUMA */
+ M4U_WriteReg32(LARB2_BASE, 0x24C, 0x1); /* VENC_CUR_CHROMA */
+}
+
+static void hdmiSetting(struct mtk_smi_data *smidev)
+{
+}
+
+static void hdmi4kSetting(struct mtk_smi_data *smidev)
+{
+}
+
+const struct mtk_smi_priv smi_mt8127_priv = {
+ .larb_port_num = { SMI_LARB0_PORT_NUM, SMI_LARB1_PORT_NUM, SMI_LARB2_PORT_NUM },
+ .init_setting = initSetting,
+ .vp_setting = vpSetting,
+ .vr_setting = vrSetting,
+ .hdmi_setting = hdmiSetting,
+ .hdmi_4k_setting = hdmi4kSetting,
+};
diff --git a/drivers/misc/mediatek/smi/variant/smi_variant_config_8173.c b/drivers/misc/mediatek/smi/variant/smi_variant_config_8173.c
new file mode 100644
index 000000000..143830bde
--- /dev/null
+++ b/drivers/misc/mediatek/smi/variant/smi_variant_config_8173.c
@@ -0,0 +1,258 @@
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/kobject.h>
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include "smi_reg.h"
+#include "smi_common.h"
+#include "smi_priv.h"
+
+static void initSetting(struct mtk_smi_data *smidev, bool *default_saved,
+ u32 *default_smi_val, unsigned int larbid)
+{
+
+ /* save default larb regs */
+ if (!(*default_saved)) {
+ SMIMSG("Save default config:\n");
+ default_smi_val[0] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ REG_OFFSET_SMI_L1ARB0);
+ default_smi_val[1] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ REG_OFFSET_SMI_L1ARB1);
+ default_smi_val[2] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ REG_OFFSET_SMI_L1ARB2);
+ default_smi_val[3] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ REG_OFFSET_SMI_L1ARB3);
+ default_smi_val[4] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ REG_OFFSET_SMI_L1ARB4);
+ default_smi_val[5] = M4U_ReadReg32(SMI_COMMON_EXT_BASE,
+ REG_OFFSET_SMI_L1ARB5);
+ SMIMSG("l1arb[0-2]= 0x%x, 0x%x, 0x%x\n", default_smi_val[0],
+ default_smi_val[1], default_smi_val[2]);
+ SMIMSG("l1arb[3-4]= 0x%x, 0x%x 0x%x\n", default_smi_val[3],
+ default_smi_val[4], default_smi_val[5]);
+
+ *default_saved = true;
+ }
+ /* Keep the HW's init setting in REG_SMI_L1ARB0 ~ REG_SMI_L1ARB4 */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, default_smi_val[0]);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, default_smi_val[1]);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, default_smi_val[2]);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, default_smi_val[3]);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB4, default_smi_val[4]);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB5, default_smi_val[5]);
+
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x200, 0x1b);
+ /* disp(larb0+larb4): emi0, other:emi1 */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x220, (0x1<<0) | (0x1<<8));
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x234,
+ (0x1 << 31) + (0x13 << 26) + (0x14 << 21) + (0x0 << 20) + (0x2 << 15) +
+ (0x3 << 10) + (0x4 << 5) + 0x5);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x238,
+ (0x2 << 25) + (0x3 << 20) + (0x4 << 15) + (0x5 << 10) + (0x6 << 5) + 0x8);
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, 0x230, 0x1f + (0x8 << 5) + (0x6 << 10));
+
+ /* Set VC priority: MMSYS = ISP > VENC > VDEC = MJC */
+ M4U_WriteReg32(LARB0_BASE, 0x20, 0x0); /* MMSYS */
+ M4U_WriteReg32(LARB1_BASE, 0x20, 0x2); /* VDEC */
+ M4U_WriteReg32(LARB2_BASE, 0x20, 0x0); /* ISP */
+ M4U_WriteReg32(LARB3_BASE, 0x20, 0x1); /* VENC */
+ M4U_WriteReg32(LARB4_BASE, 0x20, 0x0); /* DISP1 */
+ M4U_WriteReg32(LARB5_BASE, 0x20, 0x1); /* VENC2 */
+
+ /* turn off EMI empty double OSTD */
+ M4U_WriteReg32(LARB0_BASE, 0x2c, M4U_ReadReg32(LARB0_BASE, 0x2c) | (1 << 2));
+ M4U_WriteReg32(LARB1_BASE, 0x2c, M4U_ReadReg32(LARB1_BASE, 0x2c) | (1 << 2));
+ M4U_WriteReg32(LARB2_BASE, 0x2c, M4U_ReadReg32(LARB2_BASE, 0x2c) | (1 << 2));
+ M4U_WriteReg32(LARB3_BASE, 0x2c, M4U_ReadReg32(LARB3_BASE, 0x2c) | (1 << 2));
+ M4U_WriteReg32(LARB4_BASE, 0x2c, M4U_ReadReg32(LARB4_BASE, 0x2c) | (1 << 2));
+ M4U_WriteReg32(LARB5_BASE, 0x2c, M4U_ReadReg32(LARB5_BASE, 0x2c) | (1 << 2));
+
+ /* confirm. sometimes the reg can not be wrote while its clock is disable */
+ if ((M4U_ReadReg32(LARB1_BASE, 0x20) != 0x2) ||
+ (M4U_ReadReg32(LARB0_BASE, 0x20) != 0x0)) {
+ SMIMSG("warning setting failed. please check clk. 0x%x-0x%x\n",
+ M4U_ReadReg32(LARB1_BASE , 0x20),
+ M4U_ReadReg32(LARB0_BASE , 0x20));
+ }
+}
+
+static void vpSetting(struct mtk_smi_data *smidev)
+{
+ /* VP 4K */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x17C0); /* LARB0, DISP+MDP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x161B); /* LARB1, VDEC */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x1000); /* LARB2, ISP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x1000); /* LARB3, VENC+JPG */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB4, 0x17C0); /* LARB4, DISP2+MDP2 */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB5, 0x1000); /* LARB5, VENC2 */
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0x18); /* ovl_ch0_0/1 */
+ M4U_WriteReg32(LARB0_BASE, 0x214, 0x4); /* mdp_rdma0; min(4,5) */
+ M4U_WriteReg32(LARB0_BASE, 0x21c, 0x5); /* mdp_wrot0 */
+
+ M4U_WriteReg32(LARB4_BASE, 0x200, 0x8); /* ovl_ch1_0/1 */
+ M4U_WriteReg32(LARB4_BASE, 0x210, 0x4); /* mdp_rdma1; min(4,5) */
+ M4U_WriteReg32(LARB4_BASE, 0x214, 0x3); /* mdp_wrot1 */
+
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x1f); /* port#0, mc */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x06); /* port#1, pp */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* port#2, ufo */
+ M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); /* port#3, vld */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); /* port#4, vld2 */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x2); /* port#5, mv */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* port#6, pred rd */
+ M4U_WriteReg32(LARB1_BASE, 0x21c, 0x1); /* port#7, pred wr */
+ M4U_WriteReg32(LARB1_BASE, 0x220, 0x1); /* port#8, ppwrap */
+
+}
+
+static void vrSetting(struct mtk_smi_data *smidev)
+{
+ /* VR 4K */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x1614); /* LARB0, DISP+MDP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x1000); /* LARB1, VDEC */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x11F7); /* LARB2, ISP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x1584); /* LARB3, VENC+JPG */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB4, 0x1614); /* LARB4, DISP2+MDP2 */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB5, 0x1584); /* LARB5, VENC2 */
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0x1f); /* ovl_ch0_0+ovl_ch0_1 */
+ M4U_WriteReg32(LARB0_BASE, 0x218, 0x2); /* mdp_wdma */
+ M4U_WriteReg32(LARB0_BASE, 0x21C, 0x5); /* mdp_wrot0; min(9,5) */
+
+ M4U_WriteReg32(LARB2_BASE, 0x200, 0x8); /* imgo */
+ M4U_WriteReg32(LARB2_BASE, 0x208, 0x1); /* aao */
+ M4U_WriteReg32(LARB2_BASE, 0x20c, 0x1); /* lsco */
+ M4U_WriteReg32(LARB2_BASE, 0x210, 0x1); /* esfko */
+ M4U_WriteReg32(LARB2_BASE, 0x218, 0x1); /* lsci */
+ M4U_WriteReg32(LARB2_BASE, 0x220, 0x1); /* bpci */
+ M4U_WriteReg32(LARB2_BASE, 0x22c, 0x4); /* imgi */
+ M4U_WriteReg32(LARB2_BASE, 0x230, 0x1); /* img2o */
+ M4U_WriteReg32(LARB2_BASE, 0x244, 0x1); /* lcei */
+
+ M4U_WriteReg32(LARB3_BASE, 0x200, 0x1); /* venc_rcpu */
+ M4U_WriteReg32(LARB3_BASE, 0x204, 0x4); /* venc_rec_frm */
+ M4U_WriteReg32(LARB3_BASE, 0x208, 0x1); /* venc_bsdma */
+ M4U_WriteReg32(LARB3_BASE, 0x20c, 0x1); /* venc_sv_comv */
+ M4U_WriteReg32(LARB3_BASE, 0x210, 0x1); /* venc_rd_comv */
+ M4U_WriteReg32(LARB3_BASE, 0x224, 0x8); /* venc_cur_luma */
+ M4U_WriteReg32(LARB3_BASE, 0x228, 0x4); /* venc_cur_chroma */
+ M4U_WriteReg32(LARB3_BASE, 0x230, 0x10); /* venc_ref_chroma */
+
+ M4U_WriteReg32(LARB4_BASE, 0x200, 0x1f); /* ovl_ch1_0+ovl_ch1_1 */
+ M4U_WriteReg32(LARB4_BASE, 0x218, 0x2); /* mdp_wdma */
+ M4U_WriteReg32(LARB4_BASE, 0x21C, 0x5); /* mdp_wrot0; min(9,5) */
+
+
+ /* VP concurrent settings */
+ /* LARB0 */
+ /*M4U_WriteReg32(LARB0_BASE, 0x210, 0x8); *//* port 4:ovl_ch1_0/1 */
+ /*M4U_WriteReg32(LARB0_BASE, 0x21C, 0x4); *//* port 7:mdp_rdma0; min(4,5) */
+ /*M4U_WriteReg32(LARB0_BASE, 0x22C, 0x3); *//* port11:mdp_wrot1 */
+
+ /* VDEC */
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x1f); /* port#0, mc */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x06); /* port#1, pp */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* port#2, ufo */
+ M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); /* port#3, vld */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); /* port#4, vld2 */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x2); /* port#5, avc mv */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* port#6, pred rd */
+ M4U_WriteReg32(LARB1_BASE, 0x21c, 0x1); /* port#7, pred wr */
+ M4U_WriteReg32(LARB1_BASE, 0x220, 0x1); /* port#8, ppwrap */
+
+ /*venc2 */
+ M4U_WriteReg32(LARB5_BASE, 0x200, 0x1); /* venc_rcpu2 */
+ M4U_WriteReg32(LARB5_BASE, 0x204, 0x4); /* venc_rec_frm2 */
+ /* venc_ref_luma2 */
+ M4U_WriteReg32(LARB5_BASE, 0x20c, 0x10); /* venc_ref_chroma2 */
+ M4U_WriteReg32(LARB5_BASE, 0x210, 0x1); /* venc_bsdma2 */
+ M4U_WriteReg32(LARB5_BASE, 0x214, 0x8); /* venc_cur_luma2 */
+ M4U_WriteReg32(LARB5_BASE, 0x218, 0x4); /* venc_cur_chroma2 */
+ M4U_WriteReg32(LARB5_BASE, 0x21c, 0x1); /* venc_rd_comv2 */
+ M4U_WriteReg32(LARB5_BASE, 0x220, 0x1); /* venc_sv_comv2 */
+
+}
+
+
+static void hdmiSetting(struct mtk_smi_data *smidev)
+{
+ /* VP 4K */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x1117); /* LARB0, DISP+MDP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x1659); /* LARB1, VDEC */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x1000); /* LARB2, ISP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x1000); /* LARB3, VENC+JPG */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB4, 0x1750); /* LARB4, DISP2+MDP2 */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB5, 0x1000); /* LARB5, VENC2 */
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0x18); /* ovl_ch0_0/1 */
+ M4U_WriteReg32(LARB0_BASE, 0x214, 0x4); /* mdp_rdma0; min(4,5) */
+ M4U_WriteReg32(LARB0_BASE, 0x21c, 0x5); /* mdp_wrot0 */
+
+ M4U_WriteReg32(LARB4_BASE, 0x200, 0x8); /* ovl_ch1_0/1 */
+ M4U_WriteReg32(LARB4_BASE, 0x210, 0x4); /* mdp_rdma1; min(4,5) */
+ M4U_WriteReg32(LARB4_BASE, 0x214, 0x3); /* mdp_wrot1 */
+
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x1f); /* port#0, mc */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x06); /* port#1, pp */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* port#2, ufo */
+ M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); /* port#3, vld */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); /* port#4, vld2 */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x2); /* port#5, mv */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* port#6, pred rd */
+ M4U_WriteReg32(LARB1_BASE, 0x21c, 0x1); /* port#7, pred wr */
+ M4U_WriteReg32(LARB1_BASE, 0x220, 0x1); /* port#8, ppwrap */
+
+}
+
+static void hdmi4kSetting(struct mtk_smi_data *smidev)
+{
+
+ /* VP 4K */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB0, 0x12A6); /* LARB0, DISP+MDP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB1, 0x158B); /* LARB1, VDEC */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB2, 0x1000); /* LARB2, ISP */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB3, 0x1000); /* LARB3, VENC+JPG */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB4, 0x1A6D); /* LARB4, DISP2+MDP2 */
+ M4U_WriteReg32(SMI_COMMON_EXT_BASE, REG_OFFSET_SMI_L1ARB5, 0x1000); /* LARB5, VENC2 */
+
+ M4U_WriteReg32(LARB0_BASE, 0x200, 0x18); /* ovl_ch0_0/1 */
+ M4U_WriteReg32(LARB0_BASE, 0x214, 0x4); /* mdp_rdma0; min(4,5) */
+ M4U_WriteReg32(LARB0_BASE, 0x21c, 0x5); /* mdp_wrot0 */
+
+ M4U_WriteReg32(LARB4_BASE, 0x200, 0x8); /* ovl_ch1_0/1 */
+ M4U_WriteReg32(LARB4_BASE, 0x210, 0x4); /* mdp_rdma1; min(4,5) */
+ M4U_WriteReg32(LARB4_BASE, 0x214, 0x3); /* mdp_wrot1 */
+
+ M4U_WriteReg32(LARB1_BASE, 0x200, 0x1f); /* port#0, mc */
+ M4U_WriteReg32(LARB1_BASE, 0x204, 0x06); /* port#1, pp */
+ M4U_WriteReg32(LARB1_BASE, 0x208, 0x1); /* port#2, ufo */
+ M4U_WriteReg32(LARB1_BASE, 0x20c, 0x1); /* port#3, vld */
+ M4U_WriteReg32(LARB1_BASE, 0x210, 0x1); /* port#4, vld2 */
+ M4U_WriteReg32(LARB1_BASE, 0x214, 0x2); /* port#5, mv */
+ M4U_WriteReg32(LARB1_BASE, 0x218, 0x1); /* port#6, pred rd */
+ M4U_WriteReg32(LARB1_BASE, 0x21c, 0x1); /* port#7, pred wr */
+ M4U_WriteReg32(LARB1_BASE, 0x220, 0x1); /* port#8, ppwrap */
+
+}
+
+/* Make sure all the clock is enabled */
+const struct mtk_smi_priv smi_mt8173_priv = {
+ .larb_port_num = {8, 9, 21, 15, 6, 9},
+ .larb_vc_setting = { 0, 2, 0, 1, 0, 1 },
+ .init_setting = initSetting,
+ .vp_setting = vpSetting,
+ .vr_setting = vrSetting,
+ .hdmi_setting = hdmiSetting,
+ .hdmi_4k_setting = hdmi4kSetting,
+};
+