aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/common
diff options
context:
space:
mode:
authorMeizu OpenSource <patchwork@meizu.com>2016-08-15 10:19:42 +0800
committerMeizu OpenSource <patchwork@meizu.com>2016-08-15 10:19:42 +0800
commitd2e1446d81725c351dc73a03b397ce043fb18452 (patch)
tree4dbc616b7f92aea39cd697a9084205ddb805e344 /arch/arm/common
first commit
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/Kconfig23
-rw-r--r--arch/arm/common/Makefile19
-rw-r--r--arch/arm/common/dmabounce.c579
-rw-r--r--arch/arm/common/fiq_glue.S118
-rw-r--r--arch/arm/common/fiq_glue_setup.c147
-rw-r--r--arch/arm/common/firmware.c18
-rw-r--r--arch/arm/common/icst.c100
-rw-r--r--arch/arm/common/it8152.c355
-rw-r--r--arch/arm/common/locomo.c914
-rw-r--r--arch/arm/common/mcpm_entry.c263
-rw-r--r--arch/arm/common/mcpm_head.S219
-rw-r--r--arch/arm/common/mcpm_platsmp.c89
-rw-r--r--arch/arm/common/sa1111.c1458
-rw-r--r--arch/arm/common/scoop.c284
-rw-r--r--arch/arm/common/sharpsl_param.c62
-rw-r--r--arch/arm/common/timer-sp.c299
-rw-r--r--arch/arm/common/via82c505.c83
-rw-r--r--arch/arm/common/vlock.S108
-rw-r--r--arch/arm/common/vlock.h29
19 files changed, 5167 insertions, 0 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
new file mode 100644
index 000000000..ce01364a9
--- /dev/null
+++ b/arch/arm/common/Kconfig
@@ -0,0 +1,23 @@
+config ICST
+ bool
+
+config SA1111
+ bool
+ select DMABOUNCE if !ARCH_PXA
+
+config DMABOUNCE
+ bool
+ select ZONE_DMA
+
+config SHARP_LOCOMO
+ bool
+
+config SHARP_PARAM
+ bool
+
+config SHARP_SCOOP
+ bool
+
+config FIQ_GLUE
+ bool
+ select FIQ
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
new file mode 100644
index 000000000..0a0821fa9
--- /dev/null
+++ b/arch/arm/common/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y += firmware.o
+
+obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
+obj-$(CONFIG_ICST) += icst.o
+obj-$(CONFIG_SA1111) += sa1111.o
+obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
+obj-$(CONFIG_DMABOUNCE) += dmabounce.o
+obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
+obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
+obj-$(CONFIG_SHARP_SCOOP) += scoop.o
+obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
+obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
+obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
+AFLAGS_mcpm_head.o := -march=armv7-a
+AFLAGS_vlock.o := -march=armv7-a
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
new file mode 100644
index 000000000..1143c4d5c
--- /dev/null
+++ b/arch/arm/common/dmabounce.c
@@ -0,0 +1,579 @@
+/*
+ * arch/arm/common/dmabounce.c
+ *
+ * Special dma_{map/unmap/dma_sync}_* routines for systems that have
+ * limited DMA windows. These functions utilize bounce buffers to
+ * copy data to/from buffers located outside the DMA region. This
+ * only works for systems in which DMA memory is at the bottom of
+ * RAM, the remainder of memory is at the top and the DMA memory
+ * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
+ * DMA windows will require custom implementations that reserve memory
+ * areas at early bootup.
+ *
+ * Original version by Brad Parker (brad@heeltoe.com)
+ * Re-written by Christopher Hoover <ch@murgatroid.com>
+ * Made generic by Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright (C) 2002 Hewlett Packard Company.
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/page-flags.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#undef STATS
+
+#ifdef STATS
+#define DO_STATS(X) do { X ; } while (0)
+#else
+#define DO_STATS(X) do { } while (0)
+#endif
+
+/* ************************************************** */
+
+struct safe_buffer {
+ struct list_head node;
+
+ /* original request */
+ void *ptr;
+ size_t size;
+ int direction;
+
+ /* safe buffer info */
+ struct dmabounce_pool *pool;
+ void *safe;
+ dma_addr_t safe_dma_addr;
+};
+
+struct dmabounce_pool {
+ unsigned long size;
+ struct dma_pool *pool;
+#ifdef STATS
+ unsigned long allocs;
+#endif
+};
+
+struct dmabounce_device_info {
+ struct device *dev;
+ struct list_head safe_buffers;
+#ifdef STATS
+ unsigned long total_allocs;
+ unsigned long map_op_count;
+ unsigned long bounce_count;
+ int attr_res;
+#endif
+ struct dmabounce_pool small;
+ struct dmabounce_pool large;
+
+ rwlock_t lock;
+
+ int (*needs_bounce)(struct device *, dma_addr_t, size_t);
+};
+
+#ifdef STATS
+static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+ return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
+ device_info->small.allocs,
+ device_info->large.allocs,
+ device_info->total_allocs - device_info->small.allocs -
+ device_info->large.allocs,
+ device_info->total_allocs,
+ device_info->map_op_count,
+ device_info->bounce_count);
+}
+
+static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
+#endif
+
+
+/* allocate a 'safe' buffer and keep track of it */
+static inline struct safe_buffer *
+alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct safe_buffer *buf;
+ struct dmabounce_pool *pool;
+ struct device *dev = device_info->dev;
+ unsigned long flags;
+
+ dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
+ __func__, ptr, size, dir);
+
+ if (size <= device_info->small.size) {
+ pool = &device_info->small;
+ } else if (size <= device_info->large.size) {
+ pool = &device_info->large;
+ } else {
+ pool = NULL;
+ }
+
+ buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
+ if (buf == NULL) {
+ dev_warn(dev, "%s: kmalloc failed\n", __func__);
+ return NULL;
+ }
+
+ buf->ptr = ptr;
+ buf->size = size;
+ buf->direction = dir;
+ buf->pool = pool;
+
+ if (pool) {
+ buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
+ &buf->safe_dma_addr);
+ } else {
+ buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
+ GFP_ATOMIC);
+ }
+
+ if (buf->safe == NULL) {
+ dev_warn(dev,
+ "%s: could not alloc dma memory (size=%d)\n",
+ __func__, size);
+ kfree(buf);
+ return NULL;
+ }
+
+#ifdef STATS
+ if (pool)
+ pool->allocs++;
+ device_info->total_allocs++;
+#endif
+
+ write_lock_irqsave(&device_info->lock, flags);
+ list_add(&buf->node, &device_info->safe_buffers);
+ write_unlock_irqrestore(&device_info->lock, flags);
+
+ return buf;
+}
+
+/* determine if a buffer is from our "safe" pool */
+static inline struct safe_buffer *
+find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
+{
+ struct safe_buffer *b, *rb = NULL;
+ unsigned long flags;
+
+ read_lock_irqsave(&device_info->lock, flags);
+
+ list_for_each_entry(b, &device_info->safe_buffers, node)
+ if (b->safe_dma_addr <= safe_dma_addr &&
+ b->safe_dma_addr + b->size > safe_dma_addr) {
+ rb = b;
+ break;
+ }
+
+ read_unlock_irqrestore(&device_info->lock, flags);
+ return rb;
+}
+
+static inline void
+free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
+{
+ unsigned long flags;
+
+ dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
+
+ write_lock_irqsave(&device_info->lock, flags);
+
+ list_del(&buf->node);
+
+ write_unlock_irqrestore(&device_info->lock, flags);
+
+ if (buf->pool)
+ dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
+ else
+ dma_free_coherent(device_info->dev, buf->size, buf->safe,
+ buf->safe_dma_addr);
+
+ kfree(buf);
+}
+
+/* ************************************************** */
+
+static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
+ dma_addr_t dma_addr, const char *where)
+{
+ if (!dev || !dev->archdata.dmabounce)
+ return NULL;
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "Trying to %s invalid mapping\n", where);
+ return NULL;
+ }
+ return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
+}
+
+static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ if (!dev || !dev->archdata.dmabounce)
+ return 0;
+
+ if (dev->dma_mask) {
+ unsigned long limit, mask = *dev->dma_mask;
+
+ limit = (mask + 1) & ~mask;
+ if (limit && size > limit) {
+ dev_err(dev, "DMA mapping too big (requested %#x "
+ "mask %#Lx)\n", size, *dev->dma_mask);
+ return -E2BIG;
+ }
+
+ /* Figure out if we need to bounce from the DMA mask. */
+ if ((dma_addr | (dma_addr + size - 1)) & ~mask)
+ return 1;
+ }
+
+ return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
+}
+
+static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+ struct safe_buffer *buf;
+
+ if (device_info)
+ DO_STATS ( device_info->map_op_count++ );
+
+ buf = alloc_safe_buffer(device_info, ptr, size, dir);
+ if (buf == NULL) {
+ dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
+ __func__, ptr);
+ return DMA_ERROR_CODE;
+ }
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
+ __func__, ptr, buf->safe, size);
+ memcpy(buf->safe, ptr, size);
+ }
+
+ return buf->safe_dma_addr;
+}
+
+static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
+ size_t size, enum dma_data_direction dir)
+{
+ BUG_ON(buf->size != size);
+ BUG_ON(buf->direction != dir);
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
+ buf->safe, buf->safe_dma_addr);
+
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
+
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ void *ptr = buf->ptr;
+
+ dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+ __func__, buf->safe, ptr, size);
+ memcpy(ptr, buf->safe, size);
+
+ /*
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
+ */
+ __cpuc_flush_dcache_area(ptr, size);
+ }
+ free_safe_buffer(dev->archdata.dmabounce, buf);
+}
+
+/* ************************************************** */
+
+/*
+ * see if a buffer address is in an 'unsafe' range. if it is
+ * allocate a 'safe' buffer and copy the unsafe buffer into it.
+ * substitute the safe buffer for the unsafe one.
+ * (basically move the buffer from an unsafe area to a safe one)
+ */
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t dma_addr;
+ int ret;
+
+ dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
+ __func__, page, offset, size, dir);
+
+ dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
+
+ ret = needs_bounce(dev, dma_addr, size);
+ if (ret < 0)
+ return DMA_ERROR_CODE;
+
+ if (ret == 0) {
+ arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
+ return dma_addr;
+ }
+
+ if (PageHighMem(page)) {
+ dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
+ return DMA_ERROR_CODE;
+ }
+
+ return map_single(dev, page_address(page) + offset, size, dir);
+}
+
+/*
+ * see if a mapped address was really a "safe" buffer and if so, copy
+ * the data from the safe buffer back to the unsafe buffer and free up
+ * the safe buffer. (basically return things back to the way they
+ * should be)
+ */
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct safe_buffer *buf;
+
+ dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
+ __func__, dma_addr, size, dir);
+
+ buf = find_safe_buffer_dev(dev, dma_addr, __func__);
+ if (!buf) {
+ arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
+ return;
+ }
+
+ unmap_single(dev, buf, size, dir);
+}
+
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
+{
+ struct safe_buffer *buf;
+ unsigned long off;
+
+ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+ __func__, addr, sz, dir);
+
+ buf = find_safe_buffer_dev(dev, addr, __func__);
+ if (!buf)
+ return 1;
+
+ off = addr - buf->safe_dma_addr;
+
+ BUG_ON(buf->direction != dir);
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
+ buf->safe, buf->safe_dma_addr);
+
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
+
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
+ __func__, buf->safe + off, buf->ptr + off, sz);
+ memcpy(buf->ptr + off, buf->safe + off, sz);
+ }
+ return 0;
+}
+
+static void dmabounce_sync_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
+{
+ struct safe_buffer *buf;
+ unsigned long off;
+
+ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
+ __func__, addr, sz, dir);
+
+ buf = find_safe_buffer_dev(dev, addr, __func__);
+ if (!buf)
+ return 1;
+
+ off = addr - buf->safe_dma_addr;
+
+ BUG_ON(buf->direction != dir);
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
+ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
+ buf->safe, buf->safe_dma_addr);
+
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
+
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+ dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
+ __func__,buf->ptr + off, buf->safe + off, sz);
+ memcpy(buf->safe + off, buf->ptr + off, sz);
+ }
+ return 0;
+}
+
+static void dmabounce_sync_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev->archdata.dmabounce)
+ return 0;
+
+ return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .get_sgtable = arm_dma_get_sgtable,
+ .map_page = dmabounce_map_page,
+ .unmap_page = dmabounce_unmap_page,
+ .sync_single_for_cpu = dmabounce_sync_for_cpu,
+ .sync_single_for_device = dmabounce_sync_for_device,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = dmabounce_set_mask,
+};
+
+static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
+ const char *name, unsigned long size)
+{
+ pool->size = size;
+ DO_STATS(pool->allocs = 0);
+ pool->pool = dma_pool_create(name, dev, size,
+ 0 /* byte alignment */,
+ 0 /* no page-crossing issues */);
+
+ return pool->pool ? 0 : -ENOMEM;
+}
+
+int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
+ unsigned long large_buffer_size,
+ int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
+{
+ struct dmabounce_device_info *device_info;
+ int ret;
+
+ device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
+ if (!device_info) {
+ dev_err(dev,
+ "Could not allocated dmabounce_device_info\n");
+ return -ENOMEM;
+ }
+
+ ret = dmabounce_init_pool(&device_info->small, dev,
+ "small_dmabounce_pool", small_buffer_size);
+ if (ret) {
+ dev_err(dev,
+ "dmabounce: could not allocate DMA pool for %ld byte objects\n",
+ small_buffer_size);
+ goto err_free;
+ }
+
+ if (large_buffer_size) {
+ ret = dmabounce_init_pool(&device_info->large, dev,
+ "large_dmabounce_pool",
+ large_buffer_size);
+ if (ret) {
+ dev_err(dev,
+ "dmabounce: could not allocate DMA pool for %ld byte objects\n",
+ large_buffer_size);
+ goto err_destroy;
+ }
+ }
+
+ device_info->dev = dev;
+ INIT_LIST_HEAD(&device_info->safe_buffers);
+ rwlock_init(&device_info->lock);
+ device_info->needs_bounce = needs_bounce_fn;
+
+#ifdef STATS
+ device_info->total_allocs = 0;
+ device_info->map_op_count = 0;
+ device_info->bounce_count = 0;
+ device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
+#endif
+
+ dev->archdata.dmabounce = device_info;
+ set_dma_ops(dev, &dmabounce_ops);
+
+ dev_info(dev, "dmabounce: registered device\n");
+
+ return 0;
+
+ err_destroy:
+ dma_pool_destroy(device_info->small.pool);
+ err_free:
+ kfree(device_info);
+ return ret;
+}
+EXPORT_SYMBOL(dmabounce_register_dev);
+
+void dmabounce_unregister_dev(struct device *dev)
+{
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+
+ dev->archdata.dmabounce = NULL;
+ set_dma_ops(dev, NULL);
+
+ if (!device_info) {
+ dev_warn(dev,
+ "Never registered with dmabounce but attempting"
+ "to unregister!\n");
+ return;
+ }
+
+ if (!list_empty(&device_info->safe_buffers)) {
+ dev_err(dev,
+ "Removing from dmabounce with pending buffers!\n");
+ BUG();
+ }
+
+ if (device_info->small.pool)
+ dma_pool_destroy(device_info->small.pool);
+ if (device_info->large.pool)
+ dma_pool_destroy(device_info->large.pool);
+
+#ifdef STATS
+ if (device_info->attr_res == 0)
+ device_remove_file(dev, &dev_attr_dmabounce_stats);
+#endif
+
+ kfree(device_info);
+
+ dev_info(dev, "dmabounce: device unregistered\n");
+}
+EXPORT_SYMBOL(dmabounce_unregister_dev);
+
+MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
+MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 000000000..24b42cec4
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .global fiq_glue_end
+
+ /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+ /* store pc, cpsr from previous mode, reserve space for spsr */
+ mrs r12, spsr
+ sub lr, lr, #4
+ subs r10, #1
+ bne nested_fiq
+
+ str r12, [sp, #-8]!
+ str lr, [sp, #-4]!
+
+ /* store r8-r14 from previous mode */
+ sub sp, sp, #(7 * 4)
+ stmia sp, {r8-r14}^
+ nop
+
+ /* store r0-r7 from previous mode */
+ stmfd sp!, {r0-r7}
+
+ /* setup func(data,regs) arguments */
+ mov r0, r9
+ mov r1, sp
+ mov r3, r8
+
+ mov r7, sp
+
+ /* Get sp and lr from non-user modes */
+ and r4, r12, #MODE_MASK
+ cmp r4, #USR_MODE
+ beq fiq_from_usr_mode
+
+ mov r7, sp
+ orr r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+ msr cpsr_c, r4
+ str sp, [r7, #(4 * 13)]
+ str lr, [r7, #(4 * 14)]
+ mrs r5, spsr
+ str r5, [r7, #(4 * 17)]
+
+ cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+ /* use fiq stack if we reenter this mode */
+ subne sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+ msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+ mov r2, sp
+ sub sp, r7, #12
+ stmfd sp!, {r2, ip, lr}
+ /* call func(data,regs) */
+ blx r3
+ ldmfd sp, {r2, ip, lr}
+ mov sp, r2
+
+ /* restore/discard saved state */
+ cmp r4, #USR_MODE
+ beq fiq_from_usr_mode_exit
+
+ msr cpsr_c, r4
+ ldr sp, [r7, #(4 * 13)]
+ ldr lr, [r7, #(4 * 14)]
+ msr spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+ msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+ ldmfd sp!, {r0-r7}
+ ldr lr, [sp, #(4 * 7)]
+ ldr r12, [sp, #(4 * 8)]
+ add sp, sp, #(10 * 4)
+exit_fiq:
+ msr spsr_cxsf, r12
+ add r10, #1
+ cmp r11, #0
+ moveqs pc, lr
+ bx r11 /* jump to custom fiq return function */
+
+nested_fiq:
+ orr r12, r12, #(PSR_F_BIT)
+ b exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+ stmfd sp!, {r4}
+ mrs r4, cpsr
+ msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+ movs r8, r0
+ mov r9, r1
+ mov sp, r2
+ mov r11, r3
+ moveq r10, #0
+ movne r10, #1
+ msr cpsr_c, r4
+ ldmfd sp!, {r4}
+ bx lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644
index 000000000..8cb1b611c
--- /dev/null
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+ fiq_return_handler_t fiq_return_handler);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+ .name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+ struct fiq_glue_handler *handler = info;
+ fiq_glue_setup(handler->fiq, handler,
+ __get_cpu_var(fiq_stack) + THREAD_START_SP,
+ fiq_return_handler);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+ int ret;
+ int cpu;
+
+ if (!handler || !handler->fiq)
+ return -EINVAL;
+
+ mutex_lock(&fiq_glue_lock);
+ if (fiq_stack) {
+ ret = -EBUSY;
+ goto err_busy;
+ }
+
+ for_each_possible_cpu(cpu) {
+ void *stack;
+ stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+ if (WARN_ON(!stack)) {
+ ret = -ENOMEM;
+ goto err_alloc_fiq_stack;
+ }
+ per_cpu(fiq_stack, cpu) = stack;
+ }
+
+ ret = claim_fiq(&fiq_debbuger_fiq_handler);
+ if (WARN_ON(ret))
+ goto err_claim_fiq;
+
+ current_handler = handler;
+ on_each_cpu(fiq_glue_setup_helper, handler, true);
+ set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+ mutex_unlock(&fiq_glue_lock);
+ return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+ for_each_possible_cpu(cpu) {
+ __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+ per_cpu(fiq_stack, cpu) = NULL;
+ }
+err_busy:
+ mutex_unlock(&fiq_glue_lock);
+ return ret;
+}
+
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+ fiq_return_handler = fiq_return;
+ if (current_handler)
+ on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+ int ret;
+
+ mutex_lock(&fiq_glue_lock);
+ if (fiq_return_handler) {
+ ret = -EBUSY;
+ goto err_busy;
+ }
+ fiq_glue_update_return_handler(fiq_return);
+ ret = 0;
+err_busy:
+ mutex_unlock(&fiq_glue_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+ int ret;
+
+ mutex_lock(&fiq_glue_lock);
+ if (WARN_ON(fiq_return_handler != fiq_return)) {
+ ret = -EINVAL;
+ goto err_inval;
+ }
+ fiq_glue_update_return_handler(NULL);
+ ret = 0;
+err_inval:
+ mutex_unlock(&fiq_glue_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+ if (!current_handler)
+ return;
+ fiq_glue_setup(current_handler->fiq, current_handler,
+ __get_cpu_var(fiq_stack) + THREAD_START_SP,
+ fiq_return_handler);
+ if (current_handler->resume)
+ current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/common/firmware.c b/arch/arm/common/firmware.c
new file mode 100644
index 000000000..27ddccb11
--- /dev/null
+++ b/arch/arm/common/firmware.c
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics.
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+
+#include <asm/firmware.h>
+
+static const struct firmware_ops default_firmware_ops;
+
+const struct firmware_ops *firmware_ops = &default_firmware_ops;
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
new file mode 100644
index 000000000..2dc6da70a
--- /dev/null
+++ b/arch/arm/common/icst.c
@@ -0,0 +1,100 @@
+/*
+ * linux/arch/arm/common/icst307.c
+ *
+ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support functions for calculating clocks/divisors for the ICST307
+ * clock generators. See http://www.idt.com/ for more information
+ * on these devices.
+ *
+ * This is an almost identical implementation to the ICST525 clock generator.
+ * The s2div and idx2s files are different
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <asm/hardware/icst.h>
+
+/*
+ * Divisors for each OD setting.
+ */
+const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
+const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
+EXPORT_SYMBOL(icst307_s2div);
+EXPORT_SYMBOL(icst525_s2div);
+
+unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+{
+ return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+}
+
+EXPORT_SYMBOL(icst_hz);
+
+/*
+ * Ascending divisor S values.
+ */
+const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
+const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 };
+EXPORT_SYMBOL(icst307_idx2s);
+EXPORT_SYMBOL(icst525_idx2s);
+
+struct icst_vco
+icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+{
+ struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
+ unsigned long f;
+ unsigned int i = 0, rd, best = (unsigned int)-1;
+
+ /*
+ * First, find the PLL output divisor such
+ * that the PLL output is within spec.
+ */
+ do {
+ f = freq * p->s2div[p->idx2s[i]];
+
+ if (f > p->vco_min && f <= p->vco_max)
+ break;
+ } while (i < 8);
+
+ if (i >= 8)
+ return vco;
+
+ vco.s = p->idx2s[i];
+
+ /*
+ * Now find the closest divisor combination
+ * which gives a PLL output of 'f'.
+ */
+ for (rd = p->rd_min; rd <= p->rd_max; rd++) {
+ unsigned long fref_div, f_pll;
+ unsigned int vd;
+ int f_diff;
+
+ fref_div = (2 * p->ref) / rd;
+
+ vd = (f + fref_div / 2) / fref_div;
+ if (vd < p->vd_min || vd > p->vd_max)
+ continue;
+
+ f_pll = fref_div * vd;
+ f_diff = f_pll - f;
+ if (f_diff < 0)
+ f_diff = -f_diff;
+
+ if ((unsigned)f_diff < best) {
+ vco.v = vd - 8;
+ vco.r = rd - 2;
+ if (f_diff == 0)
+ break;
+ best = f_diff;
+ }
+ }
+
+ return vco;
+}
+
+EXPORT_SYMBOL(icst_hz_to_vco);
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
new file mode 100644
index 000000000..001f49137
--- /dev/null
+++ b/arch/arm/common/it8152.c
@@ -0,0 +1,355 @@
+/*
+ * linux/arch/arm/common/it8152.c
+ *
+ * Copyright Compulab Ltd, 2002-2007
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * The DMA bouncing part is taken from arch/arm/mach-ixp4xx/common-pci.c
+ * (see this file for respective copyrights)
+ *
+ * Thanks to Guennadi Liakhovetski <gl@dsa-ac.de> for IRQ enumberation
+ * and demux code.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/export.h>
+
+#include <asm/mach/pci.h>
+#include <asm/hardware/it8152.h>
+
+#define MAX_SLOTS 21
+
+static void it8152_mask_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ if (irq >= IT8152_LD_IRQ(0)) {
+ __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) |
+ (1 << (irq - IT8152_LD_IRQ(0)))),
+ IT8152_INTC_LDCNIMR);
+ } else if (irq >= IT8152_LP_IRQ(0)) {
+ __raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) |
+ (1 << (irq - IT8152_LP_IRQ(0)))),
+ IT8152_INTC_LPCNIMR);
+ } else if (irq >= IT8152_PD_IRQ(0)) {
+ __raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) |
+ (1 << (irq - IT8152_PD_IRQ(0)))),
+ IT8152_INTC_PDCNIMR);
+ }
+}
+
+static void it8152_unmask_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ if (irq >= IT8152_LD_IRQ(0)) {
+ __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) &
+ ~(1 << (irq - IT8152_LD_IRQ(0)))),
+ IT8152_INTC_LDCNIMR);
+ } else if (irq >= IT8152_LP_IRQ(0)) {
+ __raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) &
+ ~(1 << (irq - IT8152_LP_IRQ(0)))),
+ IT8152_INTC_LPCNIMR);
+ } else if (irq >= IT8152_PD_IRQ(0)) {
+ __raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) &
+ ~(1 << (irq - IT8152_PD_IRQ(0)))),
+ IT8152_INTC_PDCNIMR);
+ }
+}
+
+static struct irq_chip it8152_irq_chip = {
+ .name = "it8152",
+ .irq_ack = it8152_mask_irq,
+ .irq_mask = it8152_mask_irq,
+ .irq_unmask = it8152_unmask_irq,
+};
+
+void it8152_init_irq(void)
+{
+ int irq;
+
+ __raw_writel((0xffff), IT8152_INTC_PDCNIMR);
+ __raw_writel((0), IT8152_INTC_PDCNIRR);
+ __raw_writel((0xffff), IT8152_INTC_LPCNIMR);
+ __raw_writel((0), IT8152_INTC_LPCNIRR);
+ __raw_writel((0xffff), IT8152_INTC_LDCNIMR);
+ __raw_writel((0), IT8152_INTC_LDCNIRR);
+
+ for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) {
+ irq_set_chip_and_handler(irq, &it8152_irq_chip,
+ handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+}
+
+void it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ int bits_pd, bits_lp, bits_ld;
+ int i;
+
+ while (1) {
+ /* Read all */
+ bits_pd = __raw_readl(IT8152_INTC_PDCNIRR);
+ bits_lp = __raw_readl(IT8152_INTC_LPCNIRR);
+ bits_ld = __raw_readl(IT8152_INTC_LDCNIRR);
+
+ /* Ack */
+ __raw_writel((~bits_pd), IT8152_INTC_PDCNIRR);
+ __raw_writel((~bits_lp), IT8152_INTC_LPCNIRR);
+ __raw_writel((~bits_ld), IT8152_INTC_LDCNIRR);
+
+ if (!(bits_ld | bits_lp | bits_pd)) {
+ /* Re-read to guarantee, that there was a moment of
+ time, when they all three were 0. */
+ bits_pd = __raw_readl(IT8152_INTC_PDCNIRR);
+ bits_lp = __raw_readl(IT8152_INTC_LPCNIRR);
+ bits_ld = __raw_readl(IT8152_INTC_LDCNIRR);
+ if (!(bits_ld | bits_lp | bits_pd))
+ return;
+ }
+
+ bits_pd &= ((1 << IT8152_PD_IRQ_COUNT) - 1);
+ while (bits_pd) {
+ i = __ffs(bits_pd);
+ generic_handle_irq(IT8152_PD_IRQ(i));
+ bits_pd &= ~(1 << i);
+ }
+
+ bits_lp &= ((1 << IT8152_LP_IRQ_COUNT) - 1);
+ while (bits_lp) {
+ i = __ffs(bits_lp);
+ generic_handle_irq(IT8152_LP_IRQ(i));
+ bits_lp &= ~(1 << i);
+ }
+
+ bits_ld &= ((1 << IT8152_LD_IRQ_COUNT) - 1);
+ while (bits_ld) {
+ i = __ffs(bits_ld);
+ generic_handle_irq(IT8152_LD_IRQ(i));
+ bits_ld &= ~(1 << i);
+ }
+ }
+}
+
+/* mapping for on-chip devices */
+int __init it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if ((dev->vendor == PCI_VENDOR_ID_ITE) &&
+ (dev->device == PCI_DEVICE_ID_ITE_8152)) {
+ if ((dev->class >> 8) == PCI_CLASS_MULTIMEDIA_AUDIO)
+ return IT8152_AUDIO_INT;
+ if ((dev->class >> 8) == PCI_CLASS_SERIAL_USB)
+ return IT8152_USB_INT;
+ if ((dev->class >> 8) == PCI_CLASS_SYSTEM_DMA)
+ return IT8152_CDMA_INT;
+ }
+
+ return 0;
+}
+
+static unsigned long it8152_pci_dev_base_address(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ unsigned long addr = 0;
+
+ if (bus->number == 0) {
+ if (devfn < PCI_DEVFN(MAX_SLOTS, 0))
+ addr = (devfn << 8);
+ } else
+ addr = (bus->number << 16) | (devfn << 8);
+
+ return addr;
+}
+
+static int it8152_pci_read_config(struct pci_bus *bus,
+ unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr = it8152_pci_dev_base_address(bus, devfn);
+ u32 v;
+ int shift;
+
+ shift = (where & 3);
+
+ __raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
+ v = (__raw_readl(IT8152_PCI_CFG_DATA) >> (8 * (shift)));
+
+ *value = v;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int it8152_pci_write_config(struct pci_bus *bus,
+ unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr = it8152_pci_dev_base_address(bus, devfn);
+ u32 v, vtemp, mask = 0;
+ int shift;
+
+ if (size == 1)
+ mask = 0xff;
+ if (size == 2)
+ mask = 0xffff;
+
+ shift = (where & 3);
+
+ __raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
+ vtemp = __raw_readl(IT8152_PCI_CFG_DATA);
+
+ if (mask)
+ vtemp &= ~(mask << (8 * shift));
+ else
+ vtemp = 0;
+
+ v = (value << (8 * shift));
+ __raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
+ __raw_writel((v | vtemp), IT8152_PCI_CFG_DATA);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops it8152_ops = {
+ .read = it8152_pci_read_config,
+ .write = it8152_pci_write_config,
+};
+
+static struct resource it8152_io = {
+ .name = "IT8152 PCI I/O region",
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource it8152_mem = {
+ .name = "IT8152 PCI memory region",
+ .start = 0x10000000,
+ .end = 0x13e00000,
+ .flags = IORESOURCE_MEM,
+};
+
+/*
+ * The following functions are needed for DMA bouncing.
+ * ITE8152 chip can address up to 64MByte, so all the devices
+ * connected to ITE8152 (PCI and USB) should have limited DMA window
+ */
+static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+ dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
+ __func__, dma_addr, size);
+ return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
+}
+
+/*
+ * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
+ * other devices.
+ */
+static int it8152_pci_platform_notify(struct device *dev)
+{
+ if (dev->bus == &pci_bus_type) {
+ if (dev->dma_mask)
+ *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
+ dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
+ dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
+ }
+ return 0;
+}
+
+static int it8152_pci_platform_notify_remove(struct device *dev)
+{
+ if (dev->bus == &pci_bus_type)
+ dmabounce_unregister_dev(dev);
+
+ return 0;
+}
+
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (mask >= PHYS_OFFSET + SZ_64M - 1)
+ return 0;
+
+ return -EIO;
+}
+
+int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ /*
+ * FIXME: use pci_ioremap_io to remap the IO space here and
+ * move over to the generic io.h implementation.
+ * This requires solving the same problem for PXA PCMCIA
+ * support.
+ */
+ it8152_io.start = (unsigned long)IT8152_IO_BASE + 0x12000;
+ it8152_io.end = (unsigned long)IT8152_IO_BASE + 0x12000 + 0x100000;
+
+ sys->mem_offset = 0x10000000;
+ sys->io_offset = (unsigned long)IT8152_IO_BASE;
+
+ if (request_resource(&ioport_resource, &it8152_io)) {
+ printk(KERN_ERR "PCI: unable to allocate IO region\n");
+ goto err0;
+ }
+ if (request_resource(&iomem_resource, &it8152_mem)) {
+ printk(KERN_ERR "PCI: unable to allocate memory region\n");
+ goto err1;
+ }
+
+ pci_add_resource_offset(&sys->resources, &it8152_io, sys->io_offset);
+ pci_add_resource_offset(&sys->resources, &it8152_mem, sys->mem_offset);
+
+ if (platform_notify || platform_notify_remove) {
+ printk(KERN_ERR "PCI: Can't use platform_notify\n");
+ goto err2;
+ }
+
+ platform_notify = it8152_pci_platform_notify;
+ platform_notify_remove = it8152_pci_platform_notify_remove;
+
+ return 1;
+
+err2:
+ release_resource(&it8152_io);
+err1:
+ release_resource(&it8152_mem);
+err0:
+ return -EBUSY;
+}
+
+/* ITE bridge requires setting latency timer to avoid early bus access
+ termination by PCI bus master devices
+*/
+void pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* no need to update on-chip OHCI controller */
+ if ((dev->vendor == PCI_VENDOR_ID_ITE) &&
+ (dev->device == PCI_DEVICE_ID_ITE_8152) &&
+ ((dev->class >> 8) == PCI_CLASS_SERIAL_USB))
+ return;
+
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16)
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+ else if (lat > pcibios_max_latency)
+ lat = pcibios_max_latency;
+ else
+ return;
+ printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n",
+ pci_name(dev), lat);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+}
+
+
+EXPORT_SYMBOL(dma_set_coherent_mask);
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
new file mode 100644
index 000000000..b55c3625d
--- /dev/null
+++ b/arch/arm/common/locomo.c
@@ -0,0 +1,914 @@
+/*
+ * linux/arch/arm/common/locomo.c
+ *
+ * Sharp LoCoMo support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains all generic LoCoMo support.
+ *
+ * All initialization functions provided here are intended to be called
+ * from machine specific code with proper arguments when required.
+ *
+ * Based on sa1111.c
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+
+#include <asm/hardware/locomo.h>
+
+/* LoCoMo Interrupts */
+#define IRQ_LOCOMO_KEY (0)
+#define IRQ_LOCOMO_GPIO (1)
+#define IRQ_LOCOMO_LT (2)
+#define IRQ_LOCOMO_SPI (3)
+
+/* M62332 output channel selection */
+#define M62332_EVR_CH 1 /* M62332 volume channel number */
+ /* 0 : CH.1 , 1 : CH. 2 */
+/* DAC send data */
+#define M62332_SLAVE_ADDR 0x4e /* Slave address */
+#define M62332_W_BIT 0x00 /* W bit (0 only) */
+#define M62332_SUB_ADDR 0x00 /* Sub address */
+#define M62332_A_BIT 0x00 /* A bit (0 only) */
+
+/* DAC setup and hold times (expressed in us) */
+#define DAC_BUS_FREE_TIME 5 /* 4.7 us */
+#define DAC_START_SETUP_TIME 5 /* 4.7 us */
+#define DAC_STOP_SETUP_TIME 4 /* 4.0 us */
+#define DAC_START_HOLD_TIME 5 /* 4.7 us */
+#define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */
+#define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */
+#define DAC_DATA_SETUP_TIME 1 /* 250 ns */
+#define DAC_DATA_HOLD_TIME 1 /* 300 ns */
+#define DAC_LOW_SETUP_TIME 1 /* 300 ns */
+#define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */
+
+/* the following is the overall data for the locomo chip */
+struct locomo {
+ struct device *dev;
+ unsigned long phys;
+ unsigned int irq;
+ int irq_base;
+ spinlock_t lock;
+ void __iomem *base;
+#ifdef CONFIG_PM
+ void *saved_state;
+#endif
+};
+
+struct locomo_dev_info {
+ unsigned long offset;
+ unsigned long length;
+ unsigned int devid;
+ unsigned int irq[1];
+ const char * name;
+};
+
+/* All the locomo devices. If offset is non-zero, the mapbase for the
+ * locomo_dev will be set to the chip base plus offset. If offset is
+ * zero, then the mapbase for the locomo_dev will be set to zero. An
+ * offset of zero means the device only uses GPIOs or other helper
+ * functions inside this file */
+static struct locomo_dev_info locomo_devices[] = {
+ {
+ .devid = LOCOMO_DEVID_KEYBOARD,
+ .irq = { IRQ_LOCOMO_KEY },
+ .name = "locomo-keyboard",
+ .offset = LOCOMO_KEYBOARD,
+ .length = 16,
+ },
+ {
+ .devid = LOCOMO_DEVID_FRONTLIGHT,
+ .irq = {},
+ .name = "locomo-frontlight",
+ .offset = LOCOMO_FRONTLIGHT,
+ .length = 8,
+
+ },
+ {
+ .devid = LOCOMO_DEVID_BACKLIGHT,
+ .irq = {},
+ .name = "locomo-backlight",
+ .offset = LOCOMO_BACKLIGHT,
+ .length = 8,
+ },
+ {
+ .devid = LOCOMO_DEVID_AUDIO,
+ .irq = {},
+ .name = "locomo-audio",
+ .offset = LOCOMO_AUDIO,
+ .length = 4,
+ },
+ {
+ .devid = LOCOMO_DEVID_LED,
+ .irq = {},
+ .name = "locomo-led",
+ .offset = LOCOMO_LED,
+ .length = 8,
+ },
+ {
+ .devid = LOCOMO_DEVID_UART,
+ .irq = {},
+ .name = "locomo-uart",
+ .offset = 0,
+ .length = 0,
+ },
+ {
+ .devid = LOCOMO_DEVID_SPI,
+ .irq = {},
+ .name = "locomo-spi",
+ .offset = LOCOMO_SPI,
+ .length = 0x30,
+ },
+};
+
+static void locomo_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct locomo *lchip = irq_get_chip_data(irq);
+ int req, i;
+
+ /* Acknowledge the parent IRQ */
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ /* check why this interrupt was generated */
+ req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
+
+ if (req) {
+ /* generate the next interrupt(s) */
+ irq = lchip->irq_base;
+ for (i = 0; i <= 3; i++, irq++) {
+ if (req & (0x0100 << i)) {
+ generic_handle_irq(irq);
+ }
+
+ }
+ }
+}
+
+static void locomo_ack_irq(struct irq_data *d)
+{
+}
+
+static void locomo_mask_irq(struct irq_data *d)
+{
+ struct locomo *lchip = irq_data_get_irq_chip_data(d);
+ unsigned int r;
+ r = locomo_readl(lchip->base + LOCOMO_ICR);
+ r &= ~(0x0010 << (d->irq - lchip->irq_base));
+ locomo_writel(r, lchip->base + LOCOMO_ICR);
+}
+
+static void locomo_unmask_irq(struct irq_data *d)
+{
+ struct locomo *lchip = irq_data_get_irq_chip_data(d);
+ unsigned int r;
+ r = locomo_readl(lchip->base + LOCOMO_ICR);
+ r |= (0x0010 << (d->irq - lchip->irq_base));
+ locomo_writel(r, lchip->base + LOCOMO_ICR);
+}
+
+static struct irq_chip locomo_chip = {
+ .name = "LOCOMO",
+ .irq_ack = locomo_ack_irq,
+ .irq_mask = locomo_mask_irq,
+ .irq_unmask = locomo_unmask_irq,
+};
+
+static void locomo_setup_irq(struct locomo *lchip)
+{
+ int irq = lchip->irq_base;
+
+ /*
+ * Install handler for IRQ_LOCOMO_HW.
+ */
+ irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
+ irq_set_chip_data(lchip->irq, lchip);
+ irq_set_chained_handler(lchip->irq, locomo_handler);
+
+ /* Install handlers for IRQ_LOCOMO_* */
+ for ( ; irq <= lchip->irq_base + 3; irq++) {
+ irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq);
+ irq_set_chip_data(irq, lchip);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+}
+
+
+static void locomo_dev_release(struct device *_dev)
+{
+ struct locomo_dev *dev = LOCOMO_DEV(_dev);
+
+ kfree(dev);
+}
+
+static int
+locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info)
+{
+ struct locomo_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * If the parent device has a DMA mask associated with it,
+ * propagate it down to the children.
+ */
+ if (lchip->dev->dma_mask) {
+ dev->dma_mask = *lchip->dev->dma_mask;
+ dev->dev.dma_mask = &dev->dma_mask;
+ }
+
+ dev_set_name(&dev->dev, "%s", info->name);
+ dev->devid = info->devid;
+ dev->dev.parent = lchip->dev;
+ dev->dev.bus = &locomo_bus_type;
+ dev->dev.release = locomo_dev_release;
+ dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask;
+
+ if (info->offset)
+ dev->mapbase = lchip->base + info->offset;
+ else
+ dev->mapbase = 0;
+ dev->length = info->length;
+
+ dev->irq[0] = (lchip->irq_base == NO_IRQ) ?
+ NO_IRQ : lchip->irq_base + info->irq[0];
+
+ ret = device_register(&dev->dev);
+ if (ret) {
+ out:
+ kfree(dev);
+ }
+ return ret;
+}
+
+#ifdef CONFIG_PM
+
+struct locomo_save_data {
+ u16 LCM_GPO;
+ u16 LCM_SPICT;
+ u16 LCM_GPE;
+ u16 LCM_ASD;
+ u16 LCM_SPIMD;
+};
+
+static int locomo_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct locomo *lchip = platform_get_drvdata(dev);
+ struct locomo_save_data *save;
+ unsigned long flags;
+
+ save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL);
+ if (!save)
+ return -ENOMEM;
+
+ lchip->saved_state = save;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+
+ save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */
+ locomo_writel(0x00, lchip->base + LOCOMO_GPO);
+ save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */
+ locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT);
+ save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */
+ locomo_writel(0x00, lchip->base + LOCOMO_GPE);
+ save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */
+ locomo_writel(0x00, lchip->base + LOCOMO_ASD);
+ save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */
+ locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD);
+
+ locomo_writel(0x00, lchip->base + LOCOMO_PAIF);
+ locomo_writel(0x00, lchip->base + LOCOMO_DAC);
+ locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC);
+
+ if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88))
+ locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */
+ else
+ /* 18MHz already enabled, so no wait */
+ locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */
+
+ locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/
+ locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */
+ locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */
+
+ spin_unlock_irqrestore(&lchip->lock, flags);
+
+ return 0;
+}
+
+static int locomo_resume(struct platform_device *dev)
+{
+ struct locomo *lchip = platform_get_drvdata(dev);
+ struct locomo_save_data *save;
+ unsigned long r;
+ unsigned long flags;
+
+ save = lchip->saved_state;
+ if (!save)
+ return 0;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+
+ locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO);
+ locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT);
+ locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE);
+ locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD);
+ locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD);
+
+ locomo_writel(0x00, lchip->base + LOCOMO_C32K);
+ locomo_writel(0x90, lchip->base + LOCOMO_TADC);
+
+ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC);
+ r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC);
+ r &= 0xFEFF;
+ locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC);
+ locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD);
+
+ spin_unlock_irqrestore(&lchip->lock, flags);
+
+ lchip->saved_state = NULL;
+ kfree(save);
+
+ return 0;
+}
+#endif
+
+
+/**
+ * locomo_probe - probe for a single LoCoMo chip.
+ * @phys_addr: physical address of device.
+ *
+ * Probe for a LoCoMo chip. This must be called
+ * before any other locomo-specific code.
+ *
+ * Returns:
+ * %-ENODEV device not found.
+ * %-EBUSY physical address already marked in-use.
+ * %0 successful.
+ */
+static int
+__locomo_probe(struct device *me, struct resource *mem, int irq)
+{
+ struct locomo_platform_data *pdata = me->platform_data;
+ struct locomo *lchip;
+ unsigned long r;
+ int i, ret = -ENODEV;
+
+ lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL);
+ if (!lchip)
+ return -ENOMEM;
+
+ spin_lock_init(&lchip->lock);
+
+ lchip->dev = me;
+ dev_set_drvdata(lchip->dev, lchip);
+
+ lchip->phys = mem->start;
+ lchip->irq = irq;
+ lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ;
+
+ /*
+ * Map the whole region. This also maps the
+ * registers for our children.
+ */
+ lchip->base = ioremap(mem->start, PAGE_SIZE);
+ if (!lchip->base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* locomo initialize */
+ locomo_writel(0, lchip->base + LOCOMO_ICR);
+ /* KEYBOARD */
+ locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC);
+
+ /* GPIO */
+ locomo_writel(0, lchip->base + LOCOMO_GPO);
+ locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14))
+ , lchip->base + LOCOMO_GPE);
+ locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14))
+ , lchip->base + LOCOMO_GPD);
+ locomo_writel(0, lchip->base + LOCOMO_GIE);
+
+ /* Frontlight */
+ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+ locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
+
+ /* Longtime timer */
+ locomo_writel(0, lchip->base + LOCOMO_LTINT);
+ /* SPI */
+ locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE);
+
+ locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD);
+ r = locomo_readl(lchip->base + LOCOMO_ASD);
+ r |= 0x8000;
+ locomo_writel(r, lchip->base + LOCOMO_ASD);
+
+ locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD);
+ r = locomo_readl(lchip->base + LOCOMO_HSD);
+ r |= 0x8000;
+ locomo_writel(r, lchip->base + LOCOMO_HSD);
+
+ locomo_writel(128 / 8, lchip->base + LOCOMO_HSC);
+
+ /* XON */
+ locomo_writel(0x80, lchip->base + LOCOMO_TADC);
+ udelay(1000);
+ /* CLK9MEN */
+ r = locomo_readl(lchip->base + LOCOMO_TADC);
+ r |= 0x10;
+ locomo_writel(r, lchip->base + LOCOMO_TADC);
+ udelay(100);
+
+ /* init DAC */
+ r = locomo_readl(lchip->base + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB;
+ locomo_writel(r, lchip->base + LOCOMO_DAC);
+
+ r = locomo_readl(lchip->base + LOCOMO_VER);
+ printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff));
+
+ /*
+ * The interrupt controller must be initialised before any
+ * other device to ensure that the interrupts are available.
+ */
+ if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ)
+ locomo_setup_irq(lchip);
+
+ for (i = 0; i < ARRAY_SIZE(locomo_devices); i++)
+ locomo_init_one_child(lchip, &locomo_devices[i]);
+ return 0;
+
+ out:
+ kfree(lchip);
+ return ret;
+}
+
+static int locomo_remove_child(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+static void __locomo_remove(struct locomo *lchip)
+{
+ device_for_each_child(lchip->dev, NULL, locomo_remove_child);
+
+ if (lchip->irq != NO_IRQ) {
+ irq_set_chained_handler(lchip->irq, NULL);
+ irq_set_handler_data(lchip->irq, NULL);
+ }
+
+ iounmap(lchip->base);
+ kfree(lchip);
+}
+
+static int locomo_probe(struct platform_device *dev)
+{
+ struct resource *mem;
+ int irq;
+
+ mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ return __locomo_probe(&dev->dev, mem, irq);
+}
+
+static int locomo_remove(struct platform_device *dev)
+{
+ struct locomo *lchip = platform_get_drvdata(dev);
+
+ if (lchip) {
+ __locomo_remove(lchip);
+ platform_set_drvdata(dev, NULL);
+ }
+
+ return 0;
+}
+
+/*
+ * Not sure if this should be on the system bus or not yet.
+ * We really want some way to register a system device at
+ * the per-machine level, and then have this driver pick
+ * up the registered devices.
+ */
+static struct platform_driver locomo_device_driver = {
+ .probe = locomo_probe,
+ .remove = locomo_remove,
+#ifdef CONFIG_PM
+ .suspend = locomo_suspend,
+ .resume = locomo_resume,
+#endif
+ .driver = {
+ .name = "locomo",
+ },
+};
+
+/*
+ * Get the parent device driver (us) structure
+ * from a child function device
+ */
+static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev)
+{
+ return (struct locomo *)dev_get_drvdata(ldev->dev.parent);
+}
+
+void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir)
+{
+ struct locomo *lchip = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int r;
+
+ if (!lchip)
+ return;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+
+ r = locomo_readl(lchip->base + LOCOMO_GPD);
+ if (dir)
+ r |= bits;
+ else
+ r &= ~bits;
+ locomo_writel(r, lchip->base + LOCOMO_GPD);
+
+ r = locomo_readl(lchip->base + LOCOMO_GPE);
+ if (dir)
+ r |= bits;
+ else
+ r &= ~bits;
+ locomo_writel(r, lchip->base + LOCOMO_GPE);
+
+ spin_unlock_irqrestore(&lchip->lock, flags);
+}
+EXPORT_SYMBOL(locomo_gpio_set_dir);
+
+int locomo_gpio_read_level(struct device *dev, unsigned int bits)
+{
+ struct locomo *lchip = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int ret;
+
+ if (!lchip)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+ ret = locomo_readl(lchip->base + LOCOMO_GPL);
+ spin_unlock_irqrestore(&lchip->lock, flags);
+
+ ret &= bits;
+ return ret;
+}
+EXPORT_SYMBOL(locomo_gpio_read_level);
+
+int locomo_gpio_read_output(struct device *dev, unsigned int bits)
+{
+ struct locomo *lchip = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int ret;
+
+ if (!lchip)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+ ret = locomo_readl(lchip->base + LOCOMO_GPO);
+ spin_unlock_irqrestore(&lchip->lock, flags);
+
+ ret &= bits;
+ return ret;
+}
+EXPORT_SYMBOL(locomo_gpio_read_output);
+
+void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set)
+{
+ struct locomo *lchip = dev_get_drvdata(dev);
+ unsigned long flags;
+ unsigned int r;
+
+ if (!lchip)
+ return;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+
+ r = locomo_readl(lchip->base + LOCOMO_GPO);
+ if (set)
+ r |= bits;
+ else
+ r &= ~bits;
+ locomo_writel(r, lchip->base + LOCOMO_GPO);
+
+ spin_unlock_irqrestore(&lchip->lock, flags);
+}
+EXPORT_SYMBOL(locomo_gpio_write);
+
+static void locomo_m62332_sendbit(void *mapbase, int bit)
+{
+ unsigned int r;
+
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SCLOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SCLOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
+
+ if (bit & 1) {
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SDAOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ } else {
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SDAOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ }
+
+ udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */
+}
+
+void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel)
+{
+ struct locomo *lchip = locomo_chip_driver(ldev);
+ int i;
+ unsigned char data;
+ unsigned int r;
+ void *mapbase = lchip->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lchip->lock, flags);
+
+ /* Start */
+ udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SDAOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_START_HOLD_TIME); /* 5.0 usec */
+ udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */
+
+ /* Send slave address and W bit (LSB is W bit) */
+ data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT;
+ for (i = 1; i <= 8; i++) {
+ locomo_m62332_sendbit(mapbase, data >> (8 - i));
+ }
+
+ /* Check A bit */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SCLOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SDAOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
+ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
+ printk(KERN_WARNING "locomo: m62332_senddata Error 1\n");
+ goto out;
+ }
+
+ /* Send Sub address (LSB is channel select) */
+ /* channel = 0 : ch1 select */
+ /* = 1 : ch2 select */
+ data = M62332_SUB_ADDR + channel;
+ for (i = 1; i <= 8; i++) {
+ locomo_m62332_sendbit(mapbase, data >> (8 - i));
+ }
+
+ /* Check A bit */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SCLOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SDAOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
+ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
+ printk(KERN_WARNING "locomo: m62332_senddata Error 2\n");
+ goto out;
+ }
+
+ /* Send DAC data */
+ for (i = 1; i <= 8; i++) {
+ locomo_m62332_sendbit(mapbase, dac_data >> (8 - i));
+ }
+
+ /* Check A bit */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SCLOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SDAOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
+ if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
+ printk(KERN_WARNING "locomo: m62332_senddata Error 3\n");
+ }
+
+out:
+ /* stop */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r &= ~(LOCOMO_DAC_SCLOEB);
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
+ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SDAOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */
+
+ r = locomo_readl(mapbase + LOCOMO_DAC);
+ r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB;
+ locomo_writel(r, mapbase + LOCOMO_DAC);
+ udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */
+ udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
+
+ spin_unlock_irqrestore(&lchip->lock, flags);
+}
+EXPORT_SYMBOL(locomo_m62332_senddata);
+
+/*
+ * Frontlight control
+ */
+
+void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf)
+{
+ unsigned long flags;
+ struct locomo *lchip = locomo_chip_driver(dev);
+
+ if (vr)
+ locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1);
+ else
+ locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0);
+
+ spin_lock_irqsave(&lchip->lock, flags);
+ locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+ udelay(100);
+ locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
+ locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
+ spin_unlock_irqrestore(&lchip->lock, flags);
+}
+EXPORT_SYMBOL(locomo_frontlight_set);
+
+/*
+ * LoCoMo "Register Access Bus."
+ *
+ * We model this as a regular bus type, and hang devices directly
+ * off this.
+ */
+static int locomo_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct locomo_dev *dev = LOCOMO_DEV(_dev);
+ struct locomo_driver *drv = LOCOMO_DRV(_drv);
+
+ return dev->devid == drv->devid;
+}
+
+static int locomo_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct locomo_dev *ldev = LOCOMO_DEV(dev);
+ struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
+ int ret = 0;
+
+ if (drv && drv->suspend)
+ ret = drv->suspend(ldev, state);
+ return ret;
+}
+
+static int locomo_bus_resume(struct device *dev)
+{
+ struct locomo_dev *ldev = LOCOMO_DEV(dev);
+ struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
+ int ret = 0;
+
+ if (drv && drv->resume)
+ ret = drv->resume(ldev);
+ return ret;
+}
+
+static int locomo_bus_probe(struct device *dev)
+{
+ struct locomo_dev *ldev = LOCOMO_DEV(dev);
+ struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
+ int ret = -ENODEV;
+
+ if (drv->probe)
+ ret = drv->probe(ldev);
+ return ret;
+}
+
+static int locomo_bus_remove(struct device *dev)
+{
+ struct locomo_dev *ldev = LOCOMO_DEV(dev);
+ struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
+ int ret = 0;
+
+ if (drv->remove)
+ ret = drv->remove(ldev);
+ return ret;
+}
+
+struct bus_type locomo_bus_type = {
+ .name = "locomo-bus",
+ .match = locomo_match,
+ .probe = locomo_bus_probe,
+ .remove = locomo_bus_remove,
+ .suspend = locomo_bus_suspend,
+ .resume = locomo_bus_resume,
+};
+
+int locomo_driver_register(struct locomo_driver *driver)
+{
+ driver->drv.bus = &locomo_bus_type;
+ return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(locomo_driver_register);
+
+void locomo_driver_unregister(struct locomo_driver *driver)
+{
+ driver_unregister(&driver->drv);
+}
+EXPORT_SYMBOL(locomo_driver_unregister);
+
+static int __init locomo_init(void)
+{
+ int ret = bus_register(&locomo_bus_type);
+ if (ret == 0)
+ platform_driver_register(&locomo_device_driver);
+ return ret;
+}
+
+static void __exit locomo_exit(void)
+{
+ platform_driver_unregister(&locomo_device_driver);
+ bus_unregister(&locomo_bus_type);
+}
+
+module_init(locomo_init);
+module_exit(locomo_exit);
+
+MODULE_DESCRIPTION("Sharp LoCoMo core driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
new file mode 100644
index 000000000..370236dd1
--- /dev/null
+++ b/arch/arm/common/mcpm_entry.c
@@ -0,0 +1,263 @@
+/*
+ * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+
+#include <asm/mcpm.h>
+#include <asm/cacheflush.h>
+#include <asm/idmap.h>
+#include <asm/cputype.h>
+
+extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
+
+void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
+{
+ unsigned long val = ptr ? virt_to_phys(ptr) : 0;
+ mcpm_entry_vectors[cluster][cpu] = val;
+ sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
+}
+
+static const struct mcpm_platform_ops *platform_ops;
+
+int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
+{
+ if (platform_ops)
+ return -EBUSY;
+ platform_ops = ops;
+ return 0;
+}
+
+int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
+{
+ if (!platform_ops)
+ return -EUNATCH; /* try not to shadow power_up errors */
+ might_sleep();
+ return platform_ops->power_up(cpu, cluster);
+}
+
+typedef void (*phys_reset_t)(unsigned long);
+
+void mcpm_cpu_power_down(void)
+{
+ phys_reset_t phys_reset;
+
+ BUG_ON(!platform_ops);
+ BUG_ON(!irqs_disabled());
+
+ /*
+ * Do this before calling into the power_down method,
+ * as it might not always be safe to do afterwards.
+ */
+ setup_mm_for_reboot();
+
+ platform_ops->power_down();
+
+ /*
+ * It is possible for a power_up request to happen concurrently
+ * with a power_down request for the same CPU. In this case the
+ * power_down method might not be able to actually enter a
+ * powered down state with the WFI instruction if the power_up
+ * method has removed the required reset condition. The
+ * power_down method is then allowed to return. We must perform
+ * a re-entry in the kernel as if the power_up method just had
+ * deasserted reset on the CPU.
+ *
+ * To simplify race issues, the platform specific implementation
+ * must accommodate for the possibility of unordered calls to
+ * power_down and power_up with a usage count. Therefore, if a
+ * call to power_up is issued for a CPU that is not down, then
+ * the next call to power_down must not attempt a full shutdown
+ * but only do the minimum (normally disabling L1 cache and CPU
+ * coherency) and return just as if a concurrent power_up request
+ * had happened as described above.
+ */
+
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+
+ /* should never get here */
+ BUG();
+}
+
+void mcpm_cpu_suspend(u64 expected_residency)
+{
+ phys_reset_t phys_reset;
+
+ BUG_ON(!platform_ops);
+ BUG_ON(!irqs_disabled());
+
+ /* Very similar to mcpm_cpu_power_down() */
+ setup_mm_for_reboot();
+ platform_ops->suspend(expected_residency);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset(virt_to_phys(mcpm_entry_point));
+ BUG();
+}
+
+int mcpm_cpu_powered_up(void)
+{
+ if (!platform_ops)
+ return -EUNATCH;
+ if (platform_ops->powered_up)
+ platform_ops->powered_up();
+ return 0;
+}
+
+struct sync_struct mcpm_sync;
+
+/*
+ * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
+ * This must be called at the point of committing to teardown of a CPU.
+ * The CPU cache (SCTRL.C bit) is expected to still be active.
+ */
+void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
+{
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+}
+
+/*
+ * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
+ * cluster can be torn down without disrupting this CPU.
+ * To avoid deadlocks, this must be called before a CPU is powered down.
+ * The CPU cache (SCTRL.C bit) is expected to be off.
+ * However L2 cache might or might not be active.
+ */
+void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
+ dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
+ * @state: the final state of the cluster:
+ * CLUSTER_UP: no destructive teardown was done and the cluster has been
+ * restored to the previous state (CPU cache still active); or
+ * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
+ * (CPU cache disabled, L2 cache either enabled or disabled).
+ */
+void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
+{
+ dmb();
+ mcpm_sync.clusters[cluster].cluster = state;
+ sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
+ dsb_sev();
+}
+
+/*
+ * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
+ * This function should be called by the last man, after local CPU teardown
+ * is complete. CPU cache expected to be active.
+ *
+ * Returns:
+ * false: the critical section was not entered because an inbound CPU was
+ * observed, or the cluster is already being set up;
+ * true: the critical section was entered: it is now safe to tear down the
+ * cluster.
+ */
+bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int i;
+ struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
+
+ /* Warn inbound CPUs that the cluster is being torn down: */
+ c->cluster = CLUSTER_GOING_DOWN;
+ sync_cache_w(&c->cluster);
+
+ /* Back out if the inbound cluster is already in the critical region: */
+ sync_cache_r(&c->inbound);
+ if (c->inbound == INBOUND_COMING_UP)
+ goto abort;
+
+ /*
+ * Wait for all CPUs to get out of the GOING_DOWN state, so that local
+ * teardown is complete on each CPU before tearing down the cluster.
+ *
+ * If any CPU has been woken up again from the DOWN state, then we
+ * shouldn't be taking the cluster down at all: abort in that case.
+ */
+ sync_cache_r(&c->cpus);
+ for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
+ int cpustate;
+
+ if (i == cpu)
+ continue;
+
+ while (1) {
+ cpustate = c->cpus[i].cpu;
+ if (cpustate != CPU_GOING_DOWN)
+ break;
+
+ wfe();
+ sync_cache_r(&c->cpus[i].cpu);
+ }
+
+ switch (cpustate) {
+ case CPU_DOWN:
+ continue;
+
+ default:
+ goto abort;
+ }
+ }
+
+ return true;
+
+abort:
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
+ return false;
+}
+
+int __mcpm_cluster_state(unsigned int cluster)
+{
+ sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
+ return mcpm_sync.clusters[cluster].cluster;
+}
+
+extern unsigned long mcpm_power_up_setup_phys;
+
+int __init mcpm_sync_init(
+ void (*power_up_setup)(unsigned int affinity_level))
+{
+ unsigned int i, j, mpidr, this_cluster;
+
+ BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
+ BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
+
+ /*
+ * Set initial CPU and cluster states.
+ * Only one cluster is assumed to be active at this point.
+ */
+ for (i = 0; i < MAX_NR_CLUSTERS; i++) {
+ mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
+ mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
+ for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
+ mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
+ }
+ mpidr = read_cpuid_mpidr();
+ this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ for_each_online_cpu(i)
+ mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
+ mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
+ sync_cache_w(&mcpm_sync);
+
+ if (power_up_setup) {
+ mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
+ sync_cache_w(&mcpm_power_up_setup_phys);
+ }
+
+ return 0;
+}
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
new file mode 100644
index 000000000..8178705c4
--- /dev/null
+++ b/arch/arm/common/mcpm_head.S
@@ -0,0 +1,219 @@
+/*
+ * arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Refer to Documentation/arm/cluster-pm-race-avoidance.txt
+ * for details of the synchronisation algorithms used here.
+ */
+
+#include <linux/linkage.h>
+#include <asm/mcpm.h>
+
+#include "vlock.h"
+
+.if MCPM_SYNC_CLUSTER_CPUS
+.error "cpus must be the first member of struct mcpm_sync_struct"
+.endif
+
+ .macro pr_dbg string
+#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
+ b 1901f
+1902: .asciz "CPU"
+1903: .asciz " cluster"
+1904: .asciz ": \string"
+ .align
+1901: adr r0, 1902b
+ bl printascii
+ mov r0, r9
+ bl printhex8
+ adr r0, 1903b
+ bl printascii
+ mov r0, r10
+ bl printhex8
+ adr r0, 1904b
+ bl printascii
+#endif
+ .endm
+
+ .arm
+ .align
+
+ENTRY(mcpm_entry_point)
+
+ THUMB( adr r12, BSYM(1f) )
+ THUMB( bx r12 )
+ THUMB( .thumb )
+1:
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ ubfx r9, r0, #0, #8 @ r9 = cpu
+ ubfx r10, r0, #8, #8 @ r10 = cluster
+ mov r3, #MAX_CPUS_PER_CLUSTER
+ mla r4, r3, r10, r9 @ r4 = canonical CPU index
+ cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
+ blo 2f
+
+ /* We didn't expect this CPU. Try to cheaply make it quiet. */
+1: wfi
+ wfe
+ b 1b
+
+2: pr_dbg "kernel mcpm_entry_point\n"
+
+ /*
+ * MMU is off so we need to get to various variables in a
+ * position independent way.
+ */
+ adr r5, 3f
+ ldmia r5, {r6, r7, r8, r11}
+ add r6, r5, r6 @ r6 = mcpm_entry_vectors
+ ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
+ add r8, r5, r8 @ r8 = mcpm_sync
+ add r11, r5, r11 @ r11 = first_man_locks
+
+ mov r0, #MCPM_SYNC_CLUSTER_SIZE
+ mla r8, r0, r10, r8 @ r8 = sync cluster base
+
+ @ Signal that this CPU is coming UP:
+ mov r0, #CPU_COMING_UP
+ mov r5, #MCPM_SYNC_CPU_SIZE
+ mla r5, r9, r5, r8 @ r5 = sync cpu address
+ strb r0, [r5]
+
+ @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
+ @ state, because there is at least one active CPU (this CPU).
+
+ mov r0, #VLOCK_SIZE
+ mla r11, r0, r10, r11 @ r11 = cluster first man lock
+ mov r0, r11
+ mov r1, r9 @ cpu
+ bl vlock_trylock @ implies DMB
+
+ cmp r0, #0 @ failed to get the lock?
+ bne mcpm_setup_wait @ wait for cluster setup if so
+
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP @ cluster already up?
+ bne mcpm_setup @ if not, set up the cluster
+
+ @ Otherwise, release the first man lock and skip setup:
+ mov r0, r11
+ bl vlock_unlock
+ b mcpm_setup_complete
+
+mcpm_setup:
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ @ Signal that the cluster is being brought up:
+ mov r0, #INBOUND_COMING_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+ dmb
+
+ @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
+ @ point onwards will observe INBOUND_COMING_UP and abort.
+
+ @ Wait for any previously-pending cluster teardown operations to abort
+ @ or complete:
+mcpm_teardown_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_GOING_DOWN
+ bne first_man_setup
+ wfe
+ b mcpm_teardown_wait
+
+first_man_setup:
+ dmb
+
+ @ If the outbound gave up before teardown started, skip cluster setup:
+
+ cmp r0, #CLUSTER_UP
+ beq mcpm_setup_leave
+
+ @ power_up_setup is now responsible for setting up the cluster:
+
+ cmp r7, #0
+ mov r0, #1 @ second (cluster) affinity level
+ blxne r7 @ Call power_up_setup if defined
+ dmb
+
+ mov r0, #CLUSTER_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ dmb
+
+mcpm_setup_leave:
+ @ Leave the cluster setup critical section:
+
+ mov r0, #INBOUND_NOT_COMING_UP
+ strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
+ dsb
+ sev
+
+ mov r0, r11
+ bl vlock_unlock @ implies DMB
+ b mcpm_setup_complete
+
+ @ In the contended case, non-first men wait here for cluster setup
+ @ to complete:
+mcpm_setup_wait:
+ ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
+ cmp r0, #CLUSTER_UP
+ wfene
+ bne mcpm_setup_wait
+ dmb
+
+mcpm_setup_complete:
+ @ If a platform-specific CPU setup hook is needed, it is
+ @ called from here.
+
+ cmp r7, #0
+ mov r0, #0 @ first (CPU) affinity level
+ blxne r7 @ Call power_up_setup if defined
+ dmb
+
+ @ Mark the CPU as up:
+
+ mov r0, #CPU_UP
+ strb r0, [r5]
+
+ @ Observability order of CPU_UP and opening of the gate does not matter.
+
+mcpm_entry_gated:
+ ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
+ cmp r5, #0
+ wfeeq
+ beq mcpm_entry_gated
+ dmb
+
+ pr_dbg "released\n"
+ bx r5
+
+ .align 2
+
+3: .word mcpm_entry_vectors - .
+ .word mcpm_power_up_setup_phys - 3b
+ .word mcpm_sync - 3b
+ .word first_man_locks - 3b
+
+ENDPROC(mcpm_entry_point)
+
+ .bss
+
+ .align CACHE_WRITEBACK_ORDER
+ .type first_man_locks, #object
+first_man_locks:
+ .space VLOCK_SIZE * MAX_NR_CLUSTERS
+ .align CACHE_WRITEBACK_ORDER
+
+ .type mcpm_entry_vectors, #object
+ENTRY(mcpm_entry_vectors)
+ .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
+ .type mcpm_power_up_setup_phys, #object
+ENTRY(mcpm_power_up_setup_phys)
+ .space 4 @ set by mcpm_sync_init()
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
new file mode 100644
index 000000000..3caed0db6
--- /dev/null
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/arm/mach-vexpress/mcpm_platsmp.c
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Code to handle secondary CPU bringup and hotplug for the cluster power API.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+
+#include <asm/mcpm.h>
+#include <asm/smp.h>
+#include <asm/smp_plat.h>
+
+static void __init simple_smp_init_cpus(void)
+{
+}
+
+static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned int mpidr, pcpu, pcluster, ret;
+ extern void secondary_startup(void);
+
+ mpidr = cpu_logical_map(cpu);
+ pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
+ __func__, cpu, pcpu, pcluster);
+
+ mcpm_set_entry_vector(pcpu, pcluster, NULL);
+ ret = mcpm_cpu_power_up(pcpu, pcluster);
+ if (ret)
+ return ret;
+ mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ dsb_sev();
+ return 0;
+}
+
+static void __cpuinit mcpm_secondary_init(unsigned int cpu)
+{
+ mcpm_cpu_powered_up();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int mcpm_cpu_disable(unsigned int cpu)
+{
+ /*
+ * We assume all CPUs may be shut down.
+ * This would be the hook to use for eventual Secure
+ * OS migration requests as described in the PSCI spec.
+ */
+ return 0;
+}
+
+static void mcpm_cpu_die(unsigned int cpu)
+{
+ unsigned int mpidr, pcpu, pcluster;
+ mpidr = read_cpuid_mpidr();
+ pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ mcpm_set_entry_vector(pcpu, pcluster, NULL);
+ mcpm_cpu_power_down();
+}
+
+#endif
+
+static struct smp_operations __initdata mcpm_smp_ops = {
+ .smp_init_cpus = simple_smp_init_cpus,
+ .smp_boot_secondary = mcpm_boot_secondary,
+ .smp_secondary_init = mcpm_secondary_init,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = mcpm_cpu_disable,
+ .cpu_die = mcpm_cpu_die,
+#endif
+};
+
+void __init mcpm_smp_set_ops(void)
+{
+ smp_set_ops(&mcpm_smp_ops);
+}
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
new file mode 100644
index 000000000..e57d7e5bf
--- /dev/null
+++ b/arch/arm/common/sa1111.c
@@ -0,0 +1,1458 @@
+/*
+ * linux/arch/arm/common/sa1111.c
+ *
+ * SA1111 support
+ *
+ * Original code by John Dorsey
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains all generic SA1111 support.
+ *
+ * All initialization functions provided here are intended to be called
+ * from machine specific code with proper arguments when required.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/mach/irq.h>
+#include <asm/mach-types.h>
+#include <asm/sizes.h>
+
+#include <asm/hardware/sa1111.h>
+
+/* SA1111 IRQs */
+#define IRQ_GPAIN0 (0)
+#define IRQ_GPAIN1 (1)
+#define IRQ_GPAIN2 (2)
+#define IRQ_GPAIN3 (3)
+#define IRQ_GPBIN0 (4)
+#define IRQ_GPBIN1 (5)
+#define IRQ_GPBIN2 (6)
+#define IRQ_GPBIN3 (7)
+#define IRQ_GPBIN4 (8)
+#define IRQ_GPBIN5 (9)
+#define IRQ_GPCIN0 (10)
+#define IRQ_GPCIN1 (11)
+#define IRQ_GPCIN2 (12)
+#define IRQ_GPCIN3 (13)
+#define IRQ_GPCIN4 (14)
+#define IRQ_GPCIN5 (15)
+#define IRQ_GPCIN6 (16)
+#define IRQ_GPCIN7 (17)
+#define IRQ_MSTXINT (18)
+#define IRQ_MSRXINT (19)
+#define IRQ_MSSTOPERRINT (20)
+#define IRQ_TPTXINT (21)
+#define IRQ_TPRXINT (22)
+#define IRQ_TPSTOPERRINT (23)
+#define SSPXMTINT (24)
+#define SSPRCVINT (25)
+#define SSPROR (26)
+#define AUDXMTDMADONEA (32)
+#define AUDRCVDMADONEA (33)
+#define AUDXMTDMADONEB (34)
+#define AUDRCVDMADONEB (35)
+#define AUDTFSR (36)
+#define AUDRFSR (37)
+#define AUDTUR (38)
+#define AUDROR (39)
+#define AUDDTS (40)
+#define AUDRDD (41)
+#define AUDSTO (42)
+#define IRQ_USBPWR (43)
+#define IRQ_HCIM (44)
+#define IRQ_HCIBUFFACC (45)
+#define IRQ_HCIRMTWKP (46)
+#define IRQ_NHCIMFCIR (47)
+#define IRQ_USB_PORT_RESUME (48)
+#define IRQ_S0_READY_NINT (49)
+#define IRQ_S1_READY_NINT (50)
+#define IRQ_S0_CD_VALID (51)
+#define IRQ_S1_CD_VALID (52)
+#define IRQ_S0_BVD1_STSCHG (53)
+#define IRQ_S1_BVD1_STSCHG (54)
+#define SA1111_IRQ_NR (55)
+
+extern void sa1110_mb_enable(void);
+extern void sa1110_mb_disable(void);
+
+/*
+ * We keep the following data for the overall SA1111. Note that the
+ * struct device and struct resource are "fake"; they should be supplied
+ * by the bus above us. However, in the interests of getting all SA1111
+ * drivers converted over to the device model, we provide this as an
+ * anchor point for all the other drivers.
+ */
+struct sa1111 {
+ struct device *dev;
+ struct clk *clk;
+ unsigned long phys;
+ int irq;
+ int irq_base; /* base for cascaded on-chip IRQs */
+ spinlock_t lock;
+ void __iomem *base;
+ struct sa1111_platform_data *pdata;
+#ifdef CONFIG_PM
+ void *saved_state;
+#endif
+};
+
+/*
+ * We _really_ need to eliminate this. Its only users
+ * are the PWM and DMA checking code.
+ */
+static struct sa1111 *g_sa1111;
+
+struct sa1111_dev_info {
+ unsigned long offset;
+ unsigned long skpcr_mask;
+ bool dma;
+ unsigned int devid;
+ unsigned int irq[6];
+};
+
+static struct sa1111_dev_info sa1111_devices[] = {
+ {
+ .offset = SA1111_USB,
+ .skpcr_mask = SKPCR_UCLKEN,
+ .dma = true,
+ .devid = SA1111_DEVID_USB,
+ .irq = {
+ IRQ_USBPWR,
+ IRQ_HCIM,
+ IRQ_HCIBUFFACC,
+ IRQ_HCIRMTWKP,
+ IRQ_NHCIMFCIR,
+ IRQ_USB_PORT_RESUME
+ },
+ },
+ {
+ .offset = 0x0600,
+ .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN,
+ .dma = true,
+ .devid = SA1111_DEVID_SAC,
+ .irq = {
+ AUDXMTDMADONEA,
+ AUDXMTDMADONEB,
+ AUDRCVDMADONEA,
+ AUDRCVDMADONEB
+ },
+ },
+ {
+ .offset = 0x0800,
+ .skpcr_mask = SKPCR_SCLKEN,
+ .devid = SA1111_DEVID_SSP,
+ },
+ {
+ .offset = SA1111_KBD,
+ .skpcr_mask = SKPCR_PTCLKEN,
+ .devid = SA1111_DEVID_PS2_KBD,
+ .irq = {
+ IRQ_TPRXINT,
+ IRQ_TPTXINT
+ },
+ },
+ {
+ .offset = SA1111_MSE,
+ .skpcr_mask = SKPCR_PMCLKEN,
+ .devid = SA1111_DEVID_PS2_MSE,
+ .irq = {
+ IRQ_MSRXINT,
+ IRQ_MSTXINT
+ },
+ },
+ {
+ .offset = 0x1800,
+ .skpcr_mask = 0,
+ .devid = SA1111_DEVID_PCMCIA,
+ .irq = {
+ IRQ_S0_READY_NINT,
+ IRQ_S0_CD_VALID,
+ IRQ_S0_BVD1_STSCHG,
+ IRQ_S1_READY_NINT,
+ IRQ_S1_CD_VALID,
+ IRQ_S1_BVD1_STSCHG,
+ },
+ },
+};
+
+/*
+ * SA1111 interrupt support. Since clearing an IRQ while there are
+ * active IRQs causes the interrupt output to pulse, the upper levels
+ * will call us again if there are more interrupts to process.
+ */
+static void
+sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int stat0, stat1, i;
+ struct sa1111 *sachip = irq_get_handler_data(irq);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+
+ stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
+ stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1);
+
+ sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
+
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
+
+ if (stat0 == 0 && stat1 == 0) {
+ do_bad_IRQ(irq, desc);
+ return;
+ }
+
+ for (i = 0; stat0; i++, stat0 >>= 1)
+ if (stat0 & 1)
+ generic_handle_irq(i + sachip->irq_base);
+
+ for (i = 32; stat1; i++, stat1 >>= 1)
+ if (stat1 & 1)
+ generic_handle_irq(i + sachip->irq_base);
+
+ /* For level-based interrupts */
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+}
+
+#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
+#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
+
+static void sa1111_ack_irq(struct irq_data *d)
+{
+}
+
+static void sa1111_mask_lowirq(struct irq_data *d)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned long ie0;
+
+ ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
+ ie0 &= ~SA1111_IRQMASK_LO(d->irq);
+ writel(ie0, mapbase + SA1111_INTEN0);
+}
+
+static void sa1111_unmask_lowirq(struct irq_data *d)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned long ie0;
+
+ ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
+ ie0 |= SA1111_IRQMASK_LO(d->irq);
+ sa1111_writel(ie0, mapbase + SA1111_INTEN0);
+}
+
+/*
+ * Attempt to re-trigger the interrupt. The SA1111 contains a register
+ * (INTSET) which claims to do this. However, in practice no amount of
+ * manipulation of INTEN and INTSET guarantees that the interrupt will
+ * be triggered. In fact, its very difficult, if not impossible to get
+ * INTSET to re-trigger the interrupt.
+ */
+static int sa1111_retrigger_lowirq(struct irq_data *d)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned int mask = SA1111_IRQMASK_LO(d->irq);
+ unsigned long ip0;
+ int i;
+
+ ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
+ for (i = 0; i < 8; i++) {
+ sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
+ sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
+ if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
+ break;
+ }
+
+ if (i == 8)
+ printk(KERN_ERR "Danger Will Robinson: failed to "
+ "re-trigger IRQ%d\n", d->irq);
+ return i == 8 ? -1 : 0;
+}
+
+static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned int mask = SA1111_IRQMASK_LO(d->irq);
+ unsigned long ip0;
+
+ if (flags == IRQ_TYPE_PROBE)
+ return 0;
+
+ if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
+ return -EINVAL;
+
+ ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
+ if (flags & IRQ_TYPE_EDGE_RISING)
+ ip0 &= ~mask;
+ else
+ ip0 |= mask;
+ sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
+ sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0);
+
+ return 0;
+}
+
+static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned int mask = SA1111_IRQMASK_LO(d->irq);
+ unsigned long we0;
+
+ we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
+ if (on)
+ we0 |= mask;
+ else
+ we0 &= ~mask;
+ sa1111_writel(we0, mapbase + SA1111_WAKEEN0);
+
+ return 0;
+}
+
+static struct irq_chip sa1111_low_chip = {
+ .name = "SA1111-l",
+ .irq_ack = sa1111_ack_irq,
+ .irq_mask = sa1111_mask_lowirq,
+ .irq_unmask = sa1111_unmask_lowirq,
+ .irq_retrigger = sa1111_retrigger_lowirq,
+ .irq_set_type = sa1111_type_lowirq,
+ .irq_set_wake = sa1111_wake_lowirq,
+};
+
+static void sa1111_mask_highirq(struct irq_data *d)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned long ie1;
+
+ ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
+ ie1 &= ~SA1111_IRQMASK_HI(d->irq);
+ sa1111_writel(ie1, mapbase + SA1111_INTEN1);
+}
+
+static void sa1111_unmask_highirq(struct irq_data *d)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned long ie1;
+
+ ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
+ ie1 |= SA1111_IRQMASK_HI(d->irq);
+ sa1111_writel(ie1, mapbase + SA1111_INTEN1);
+}
+
+/*
+ * Attempt to re-trigger the interrupt. The SA1111 contains a register
+ * (INTSET) which claims to do this. However, in practice no amount of
+ * manipulation of INTEN and INTSET guarantees that the interrupt will
+ * be triggered. In fact, its very difficult, if not impossible to get
+ * INTSET to re-trigger the interrupt.
+ */
+static int sa1111_retrigger_highirq(struct irq_data *d)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned int mask = SA1111_IRQMASK_HI(d->irq);
+ unsigned long ip1;
+ int i;
+
+ ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
+ for (i = 0; i < 8; i++) {
+ sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1);
+ sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
+ if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask)
+ break;
+ }
+
+ if (i == 8)
+ printk(KERN_ERR "Danger Will Robinson: failed to "
+ "re-trigger IRQ%d\n", d->irq);
+ return i == 8 ? -1 : 0;
+}
+
+static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned int mask = SA1111_IRQMASK_HI(d->irq);
+ unsigned long ip1;
+
+ if (flags == IRQ_TYPE_PROBE)
+ return 0;
+
+ if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
+ return -EINVAL;
+
+ ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
+ if (flags & IRQ_TYPE_EDGE_RISING)
+ ip1 &= ~mask;
+ else
+ ip1 |= mask;
+ sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
+ sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1);
+
+ return 0;
+}
+
+static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
+{
+ struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
+ void __iomem *mapbase = sachip->base + SA1111_INTC;
+ unsigned int mask = SA1111_IRQMASK_HI(d->irq);
+ unsigned long we1;
+
+ we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
+ if (on)
+ we1 |= mask;
+ else
+ we1 &= ~mask;
+ sa1111_writel(we1, mapbase + SA1111_WAKEEN1);
+
+ return 0;
+}
+
+static struct irq_chip sa1111_high_chip = {
+ .name = "SA1111-h",
+ .irq_ack = sa1111_ack_irq,
+ .irq_mask = sa1111_mask_highirq,
+ .irq_unmask = sa1111_unmask_highirq,
+ .irq_retrigger = sa1111_retrigger_highirq,
+ .irq_set_type = sa1111_type_highirq,
+ .irq_set_wake = sa1111_wake_highirq,
+};
+
+static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
+{
+ void __iomem *irqbase = sachip->base + SA1111_INTC;
+ unsigned i, irq;
+ int ret;
+
+ /*
+ * We're guaranteed that this region hasn't been taken.
+ */
+ request_mem_region(sachip->phys + SA1111_INTC, 512, "irq");
+
+ ret = irq_alloc_descs(-1, irq_base, SA1111_IRQ_NR, -1);
+ if (ret <= 0) {
+ dev_err(sachip->dev, "unable to allocate %u irqs: %d\n",
+ SA1111_IRQ_NR, ret);
+ if (ret == 0)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ sachip->irq_base = ret;
+
+ /* disable all IRQs */
+ sa1111_writel(0, irqbase + SA1111_INTEN0);
+ sa1111_writel(0, irqbase + SA1111_INTEN1);
+ sa1111_writel(0, irqbase + SA1111_WAKEEN0);
+ sa1111_writel(0, irqbase + SA1111_WAKEEN1);
+
+ /*
+ * detect on rising edge. Note: Feb 2001 Errata for SA1111
+ * specifies that S0ReadyInt and S1ReadyInt should be '1'.
+ */
+ sa1111_writel(0, irqbase + SA1111_INTPOL0);
+ sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
+ SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
+ irqbase + SA1111_INTPOL1);
+
+ /* clear all IRQs */
+ sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0);
+ sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
+
+ for (i = IRQ_GPAIN0; i <= SSPROR; i++) {
+ irq = sachip->irq_base + i;
+ irq_set_chip_and_handler(irq, &sa1111_low_chip,
+ handle_edge_irq);
+ irq_set_chip_data(irq, sachip);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
+ irq = sachip->irq_base + i;
+ irq_set_chip_and_handler(irq, &sa1111_high_chip,
+ handle_edge_irq);
+ irq_set_chip_data(irq, sachip);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ /*
+ * Register SA1111 interrupt
+ */
+ irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
+ irq_set_handler_data(sachip->irq, sachip);
+ irq_set_chained_handler(sachip->irq, sa1111_irq_handler);
+
+ dev_info(sachip->dev, "Providing IRQ%u-%u\n",
+ sachip->irq_base, sachip->irq_base + SA1111_IRQ_NR - 1);
+
+ return 0;
+}
+
+/*
+ * Bring the SA1111 out of reset. This requires a set procedure:
+ * 1. nRESET asserted (by hardware)
+ * 2. CLK turned on from SA1110
+ * 3. nRESET deasserted
+ * 4. VCO turned on, PLL_BYPASS turned off
+ * 5. Wait lock time, then assert RCLKEn
+ * 7. PCR set to allow clocking of individual functions
+ *
+ * Until we've done this, the only registers we can access are:
+ * SBI_SKCR
+ * SBI_SMCR
+ * SBI_SKID
+ */
+static void sa1111_wake(struct sa1111 *sachip)
+{
+ unsigned long flags, r;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+
+ clk_enable(sachip->clk);
+
+ /*
+ * Turn VCO on, and disable PLL Bypass.
+ */
+ r = sa1111_readl(sachip->base + SA1111_SKCR);
+ r &= ~SKCR_VCO_OFF;
+ sa1111_writel(r, sachip->base + SA1111_SKCR);
+ r |= SKCR_PLL_BYPASS | SKCR_OE_EN;
+ sa1111_writel(r, sachip->base + SA1111_SKCR);
+
+ /*
+ * Wait lock time. SA1111 manual _doesn't_
+ * specify a figure for this! We choose 100us.
+ */
+ udelay(100);
+
+ /*
+ * Enable RCLK. We also ensure that RDYEN is set.
+ */
+ r |= SKCR_RCLKEN | SKCR_RDYEN;
+ sa1111_writel(r, sachip->base + SA1111_SKCR);
+
+ /*
+ * Wait 14 RCLK cycles for the chip to finish coming out
+ * of reset. (RCLK=24MHz). This is 590ns.
+ */
+ udelay(1);
+
+ /*
+ * Ensure all clocks are initially off.
+ */
+ sa1111_writel(0, sachip->base + SA1111_SKPCR);
+
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+
+#ifdef CONFIG_ARCH_SA1100
+
+static u32 sa1111_dma_mask[] = {
+ ~0,
+ ~(1 << 20),
+ ~(1 << 23),
+ ~(1 << 24),
+ ~(1 << 25),
+ ~(1 << 20),
+ ~(1 << 20),
+ 0,
+};
+
+/*
+ * Configure the SA1111 shared memory controller.
+ */
+void
+sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
+ unsigned int cas_latency)
+{
+ unsigned int smcr = SMCR_DTIM | SMCR_MBGE | FInsrt(drac, SMCR_DRAC);
+
+ if (cas_latency == 3)
+ smcr |= SMCR_CLAT;
+
+ sa1111_writel(smcr, sachip->base + SA1111_SMCR);
+
+ /*
+ * Now clear the bits in the DMA mask to work around the SA1111
+ * DMA erratum (Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update, June 2000, Erratum #7).
+ */
+ if (sachip->dev->dma_mask)
+ *sachip->dev->dma_mask &= sa1111_dma_mask[drac >> 2];
+
+ sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
+}
+#endif
+
+static void sa1111_dev_release(struct device *_dev)
+{
+ struct sa1111_dev *dev = SA1111_DEV(_dev);
+
+ kfree(dev);
+}
+
+static int
+sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
+ struct sa1111_dev_info *info)
+{
+ struct sa1111_dev *dev;
+ unsigned i;
+ int ret;
+
+ dev = kzalloc(sizeof(struct sa1111_dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ device_initialize(&dev->dev);
+ dev_set_name(&dev->dev, "%4.4lx", info->offset);
+ dev->devid = info->devid;
+ dev->dev.parent = sachip->dev;
+ dev->dev.bus = &sa1111_bus_type;
+ dev->dev.release = sa1111_dev_release;
+ dev->res.start = sachip->phys + info->offset;
+ dev->res.end = dev->res.start + 511;
+ dev->res.name = dev_name(&dev->dev);
+ dev->res.flags = IORESOURCE_MEM;
+ dev->mapbase = sachip->base + info->offset;
+ dev->skpcr_mask = info->skpcr_mask;
+
+ for (i = 0; i < ARRAY_SIZE(info->irq); i++)
+ dev->irq[i] = sachip->irq_base + info->irq[i];
+
+ /*
+ * If the parent device has a DMA mask associated with it, and
+ * this child supports DMA, propagate it down to the children.
+ */
+ if (info->dma && sachip->dev->dma_mask) {
+ dev->dma_mask = *sachip->dev->dma_mask;
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.coherent_dma_mask = sachip->dev->coherent_dma_mask;
+ }
+
+ ret = request_resource(parent, &dev->res);
+ if (ret) {
+ dev_err(sachip->dev, "failed to allocate resource for %s\n",
+ dev->res.name);
+ goto err_resource;
+ }
+
+ ret = device_add(&dev->dev);
+ if (ret)
+ goto err_add;
+ return 0;
+
+ err_add:
+ release_resource(&dev->res);
+ err_resource:
+ put_device(&dev->dev);
+ err_alloc:
+ return ret;
+}
+
+/**
+ * sa1111_probe - probe for a single SA1111 chip.
+ * @phys_addr: physical address of device.
+ *
+ * Probe for a SA1111 chip. This must be called
+ * before any other SA1111-specific code.
+ *
+ * Returns:
+ * %-ENODEV device not found.
+ * %-EBUSY physical address already marked in-use.
+ * %-EINVAL no platform data passed
+ * %0 successful.
+ */
+static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
+{
+ struct sa1111_platform_data *pd = me->platform_data;
+ struct sa1111 *sachip;
+ unsigned long id;
+ unsigned int has_devs;
+ int i, ret = -ENODEV;
+
+ if (!pd)
+ return -EINVAL;
+
+ sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL);
+ if (!sachip)
+ return -ENOMEM;
+
+ sachip->clk = clk_get(me, "SA1111_CLK");
+ if (IS_ERR(sachip->clk)) {
+ ret = PTR_ERR(sachip->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(sachip->clk);
+ if (ret)
+ goto err_clkput;
+
+ spin_lock_init(&sachip->lock);
+
+ sachip->dev = me;
+ dev_set_drvdata(sachip->dev, sachip);
+
+ sachip->pdata = pd;
+ sachip->phys = mem->start;
+ sachip->irq = irq;
+
+ /*
+ * Map the whole region. This also maps the
+ * registers for our children.
+ */
+ sachip->base = ioremap(mem->start, PAGE_SIZE * 2);
+ if (!sachip->base) {
+ ret = -ENOMEM;
+ goto err_clk_unprep;
+ }
+
+ /*
+ * Probe for the chip. Only touch the SBI registers.
+ */
+ id = sa1111_readl(sachip->base + SA1111_SKID);
+ if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
+ printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id);
+ ret = -ENODEV;
+ goto err_unmap;
+ }
+
+ printk(KERN_INFO "SA1111 Microprocessor Companion Chip: "
+ "silicon revision %lx, metal revision %lx\n",
+ (id & SKID_SIREV_MASK)>>4, (id & SKID_MTREV_MASK));
+
+ /*
+ * We found it. Wake the chip up, and initialise.
+ */
+ sa1111_wake(sachip);
+
+ /*
+ * The interrupt controller must be initialised before any
+ * other device to ensure that the interrupts are available.
+ */
+ if (sachip->irq != NO_IRQ) {
+ ret = sa1111_setup_irq(sachip, pd->irq_base);
+ if (ret)
+ goto err_unmap;
+ }
+
+#ifdef CONFIG_ARCH_SA1100
+ {
+ unsigned int val;
+
+ /*
+ * The SDRAM configuration of the SA1110 and the SA1111 must
+ * match. This is very important to ensure that SA1111 accesses
+ * don't corrupt the SDRAM. Note that this ungates the SA1111's
+ * MBGNT signal, so we must have called sa1110_mb_disable()
+ * beforehand.
+ */
+ sa1111_configure_smc(sachip, 1,
+ FExtr(MDCNFG, MDCNFG_SA1110_DRAC0),
+ FExtr(MDCNFG, MDCNFG_SA1110_TDL0));
+
+ /*
+ * We only need to turn on DCLK whenever we want to use the
+ * DMA. It can otherwise be held firmly in the off position.
+ * (currently, we always enable it.)
+ */
+ val = sa1111_readl(sachip->base + SA1111_SKPCR);
+ sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
+
+ /*
+ * Enable the SA1110 memory bus request and grant signals.
+ */
+ sa1110_mb_enable();
+ }
+#endif
+
+ g_sa1111 = sachip;
+
+ has_devs = ~0;
+ if (pd)
+ has_devs &= ~pd->disable_devs;
+
+ for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++)
+ if (sa1111_devices[i].devid & has_devs)
+ sa1111_init_one_child(sachip, mem, &sa1111_devices[i]);
+
+ return 0;
+
+ err_unmap:
+ iounmap(sachip->base);
+ err_clk_unprep:
+ clk_unprepare(sachip->clk);
+ err_clkput:
+ clk_put(sachip->clk);
+ err_free:
+ kfree(sachip);
+ return ret;
+}
+
+static int sa1111_remove_one(struct device *dev, void *data)
+{
+ struct sa1111_dev *sadev = SA1111_DEV(dev);
+ device_del(&sadev->dev);
+ release_resource(&sadev->res);
+ put_device(&sadev->dev);
+ return 0;
+}
+
+static void __sa1111_remove(struct sa1111 *sachip)
+{
+ void __iomem *irqbase = sachip->base + SA1111_INTC;
+
+ device_for_each_child(sachip->dev, NULL, sa1111_remove_one);
+
+ /* disable all IRQs */
+ sa1111_writel(0, irqbase + SA1111_INTEN0);
+ sa1111_writel(0, irqbase + SA1111_INTEN1);
+ sa1111_writel(0, irqbase + SA1111_WAKEEN0);
+ sa1111_writel(0, irqbase + SA1111_WAKEEN1);
+
+ clk_disable(sachip->clk);
+ clk_unprepare(sachip->clk);
+
+ if (sachip->irq != NO_IRQ) {
+ irq_set_chained_handler(sachip->irq, NULL);
+ irq_set_handler_data(sachip->irq, NULL);
+ irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
+
+ release_mem_region(sachip->phys + SA1111_INTC, 512);
+ }
+
+ iounmap(sachip->base);
+ clk_put(sachip->clk);
+ kfree(sachip);
+}
+
+struct sa1111_save_data {
+ unsigned int skcr;
+ unsigned int skpcr;
+ unsigned int skcdr;
+ unsigned char skaud;
+ unsigned char skpwm0;
+ unsigned char skpwm1;
+
+ /*
+ * Interrupt controller
+ */
+ unsigned int intpol0;
+ unsigned int intpol1;
+ unsigned int inten0;
+ unsigned int inten1;
+ unsigned int wakepol0;
+ unsigned int wakepol1;
+ unsigned int wakeen0;
+ unsigned int wakeen1;
+};
+
+#ifdef CONFIG_PM
+
+static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sa1111 *sachip = platform_get_drvdata(dev);
+ struct sa1111_save_data *save;
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *base;
+
+ save = kmalloc(sizeof(struct sa1111_save_data), GFP_KERNEL);
+ if (!save)
+ return -ENOMEM;
+ sachip->saved_state = save;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+
+ /*
+ * Save state.
+ */
+ base = sachip->base;
+ save->skcr = sa1111_readl(base + SA1111_SKCR);
+ save->skpcr = sa1111_readl(base + SA1111_SKPCR);
+ save->skcdr = sa1111_readl(base + SA1111_SKCDR);
+ save->skaud = sa1111_readl(base + SA1111_SKAUD);
+ save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0);
+ save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1);
+
+ sa1111_writel(0, sachip->base + SA1111_SKPWM0);
+ sa1111_writel(0, sachip->base + SA1111_SKPWM1);
+
+ base = sachip->base + SA1111_INTC;
+ save->intpol0 = sa1111_readl(base + SA1111_INTPOL0);
+ save->intpol1 = sa1111_readl(base + SA1111_INTPOL1);
+ save->inten0 = sa1111_readl(base + SA1111_INTEN0);
+ save->inten1 = sa1111_readl(base + SA1111_INTEN1);
+ save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0);
+ save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1);
+ save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0);
+ save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1);
+
+ /*
+ * Disable.
+ */
+ val = sa1111_readl(sachip->base + SA1111_SKCR);
+ sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
+
+ clk_disable(sachip->clk);
+
+ spin_unlock_irqrestore(&sachip->lock, flags);
+
+#ifdef CONFIG_ARCH_SA1100
+ sa1110_mb_disable();
+#endif
+
+ return 0;
+}
+
+/*
+ * sa1111_resume - Restore the SA1111 device state.
+ * @dev: device to restore
+ *
+ * Restore the general state of the SA1111; clock control and
+ * interrupt controller. Other parts of the SA1111 must be
+ * restored by their respective drivers, and must be called
+ * via LDM after this function.
+ */
+static int sa1111_resume(struct platform_device *dev)
+{
+ struct sa1111 *sachip = platform_get_drvdata(dev);
+ struct sa1111_save_data *save;
+ unsigned long flags, id;
+ void __iomem *base;
+
+ save = sachip->saved_state;
+ if (!save)
+ return 0;
+
+ /*
+ * Ensure that the SA1111 is still here.
+ * FIXME: shouldn't do this here.
+ */
+ id = sa1111_readl(sachip->base + SA1111_SKID);
+ if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
+ __sa1111_remove(sachip);
+ platform_set_drvdata(dev, NULL);
+ kfree(save);
+ return 0;
+ }
+
+ /*
+ * First of all, wake up the chip.
+ */
+ sa1111_wake(sachip);
+
+#ifdef CONFIG_ARCH_SA1100
+ /* Enable the memory bus request/grant signals */
+ sa1110_mb_enable();
+#endif
+
+ /*
+ * Only lock for write ops. Also, sa1111_wake must be called with
+ * released spinlock!
+ */
+ spin_lock_irqsave(&sachip->lock, flags);
+
+ sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
+ sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
+
+ base = sachip->base;
+ sa1111_writel(save->skcr, base + SA1111_SKCR);
+ sa1111_writel(save->skpcr, base + SA1111_SKPCR);
+ sa1111_writel(save->skcdr, base + SA1111_SKCDR);
+ sa1111_writel(save->skaud, base + SA1111_SKAUD);
+ sa1111_writel(save->skpwm0, base + SA1111_SKPWM0);
+ sa1111_writel(save->skpwm1, base + SA1111_SKPWM1);
+
+ base = sachip->base + SA1111_INTC;
+ sa1111_writel(save->intpol0, base + SA1111_INTPOL0);
+ sa1111_writel(save->intpol1, base + SA1111_INTPOL1);
+ sa1111_writel(save->inten0, base + SA1111_INTEN0);
+ sa1111_writel(save->inten1, base + SA1111_INTEN1);
+ sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0);
+ sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1);
+ sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0);
+ sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1);
+
+ spin_unlock_irqrestore(&sachip->lock, flags);
+
+ sachip->saved_state = NULL;
+ kfree(save);
+
+ return 0;
+}
+
+#else
+#define sa1111_suspend NULL
+#define sa1111_resume NULL
+#endif
+
+static int sa1111_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ return __sa1111_probe(&pdev->dev, mem, irq);
+}
+
+static int sa1111_remove(struct platform_device *pdev)
+{
+ struct sa1111 *sachip = platform_get_drvdata(pdev);
+
+ if (sachip) {
+#ifdef CONFIG_PM
+ kfree(sachip->saved_state);
+ sachip->saved_state = NULL;
+#endif
+ __sa1111_remove(sachip);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+/*
+ * Not sure if this should be on the system bus or not yet.
+ * We really want some way to register a system device at
+ * the per-machine level, and then have this driver pick
+ * up the registered devices.
+ *
+ * We also need to handle the SDRAM configuration for
+ * PXA250/SA1110 machine classes.
+ */
+static struct platform_driver sa1111_device_driver = {
+ .probe = sa1111_probe,
+ .remove = sa1111_remove,
+ .suspend = sa1111_suspend,
+ .resume = sa1111_resume,
+ .driver = {
+ .name = "sa1111",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*
+ * Get the parent device driver (us) structure
+ * from a child function device
+ */
+static inline struct sa1111 *sa1111_chip_driver(struct sa1111_dev *sadev)
+{
+ return (struct sa1111 *)dev_get_drvdata(sadev->dev.parent);
+}
+
+/*
+ * The bits in the opdiv field are non-linear.
+ */
+static unsigned char opdiv_table[] = { 1, 4, 2, 8 };
+
+static unsigned int __sa1111_pll_clock(struct sa1111 *sachip)
+{
+ unsigned int skcdr, fbdiv, ipdiv, opdiv;
+
+ skcdr = sa1111_readl(sachip->base + SA1111_SKCDR);
+
+ fbdiv = (skcdr & 0x007f) + 2;
+ ipdiv = ((skcdr & 0x0f80) >> 7) + 2;
+ opdiv = opdiv_table[(skcdr & 0x3000) >> 12];
+
+ return 3686400 * fbdiv / (ipdiv * opdiv);
+}
+
+/**
+ * sa1111_pll_clock - return the current PLL clock frequency.
+ * @sadev: SA1111 function block
+ *
+ * BUG: we should look at SKCR. We also blindly believe that
+ * the chip is being fed with the 3.6864MHz clock.
+ *
+ * Returns the PLL clock in Hz.
+ */
+unsigned int sa1111_pll_clock(struct sa1111_dev *sadev)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+
+ return __sa1111_pll_clock(sachip);
+}
+EXPORT_SYMBOL(sa1111_pll_clock);
+
+/**
+ * sa1111_select_audio_mode - select I2S or AC link mode
+ * @sadev: SA1111 function block
+ * @mode: One of %SA1111_AUDIO_ACLINK or %SA1111_AUDIO_I2S
+ *
+ * Frob the SKCR to select AC Link mode or I2S mode for
+ * the audio block.
+ */
+void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+
+ val = sa1111_readl(sachip->base + SA1111_SKCR);
+ if (mode == SA1111_AUDIO_I2S) {
+ val &= ~SKCR_SELAC;
+ } else {
+ val |= SKCR_SELAC;
+ }
+ sa1111_writel(val, sachip->base + SA1111_SKCR);
+
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+EXPORT_SYMBOL(sa1111_select_audio_mode);
+
+/**
+ * sa1111_set_audio_rate - set the audio sample rate
+ * @sadev: SA1111 SAC function block
+ * @rate: sample rate to select
+ */
+int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned int div;
+
+ if (sadev->devid != SA1111_DEVID_SAC)
+ return -EINVAL;
+
+ div = (__sa1111_pll_clock(sachip) / 256 + rate / 2) / rate;
+ if (div == 0)
+ div = 1;
+ if (div > 128)
+ div = 128;
+
+ sa1111_writel(div - 1, sachip->base + SA1111_SKAUD);
+
+ return 0;
+}
+EXPORT_SYMBOL(sa1111_set_audio_rate);
+
+/**
+ * sa1111_get_audio_rate - get the audio sample rate
+ * @sadev: SA1111 SAC function block device
+ */
+int sa1111_get_audio_rate(struct sa1111_dev *sadev)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long div;
+
+ if (sadev->devid != SA1111_DEVID_SAC)
+ return -EINVAL;
+
+ div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1;
+
+ return __sa1111_pll_clock(sachip) / (256 * div);
+}
+EXPORT_SYMBOL(sa1111_get_audio_rate);
+
+void sa1111_set_io_dir(struct sa1111_dev *sadev,
+ unsigned int bits, unsigned int dir,
+ unsigned int sleep_dir)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *gpio = sachip->base + SA1111_GPIO;
+
+#define MODIFY_BITS(port, mask, dir) \
+ if (mask) { \
+ val = sa1111_readl(port); \
+ val &= ~(mask); \
+ val |= (dir) & (mask); \
+ sa1111_writel(val, port); \
+ }
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ MODIFY_BITS(gpio + SA1111_GPIO_PADDR, bits & 15, dir);
+ MODIFY_BITS(gpio + SA1111_GPIO_PBDDR, (bits >> 8) & 255, dir >> 8);
+ MODIFY_BITS(gpio + SA1111_GPIO_PCDDR, (bits >> 16) & 255, dir >> 16);
+
+ MODIFY_BITS(gpio + SA1111_GPIO_PASDR, bits & 15, sleep_dir);
+ MODIFY_BITS(gpio + SA1111_GPIO_PBSDR, (bits >> 8) & 255, sleep_dir >> 8);
+ MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+EXPORT_SYMBOL(sa1111_set_io_dir);
+
+void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *gpio = sachip->base + SA1111_GPIO;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ MODIFY_BITS(gpio + SA1111_GPIO_PADWR, bits & 15, v);
+ MODIFY_BITS(gpio + SA1111_GPIO_PBDWR, (bits >> 8) & 255, v >> 8);
+ MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+EXPORT_SYMBOL(sa1111_set_io);
+
+void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long flags;
+ unsigned int val;
+ void __iomem *gpio = sachip->base + SA1111_GPIO;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ MODIFY_BITS(gpio + SA1111_GPIO_PASSR, bits & 15, v);
+ MODIFY_BITS(gpio + SA1111_GPIO_PBSSR, (bits >> 8) & 255, v >> 8);
+ MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+}
+EXPORT_SYMBOL(sa1111_set_sleep_io);
+
+/*
+ * Individual device operations.
+ */
+
+/**
+ * sa1111_enable_device - enable an on-chip SA1111 function block
+ * @sadev: SA1111 function block device to enable
+ */
+int sa1111_enable_device(struct sa1111_dev *sadev)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long flags;
+ unsigned int val;
+ int ret = 0;
+
+ if (sachip->pdata && sachip->pdata->enable)
+ ret = sachip->pdata->enable(sachip->pdata->data, sadev->devid);
+
+ if (ret == 0) {
+ spin_lock_irqsave(&sachip->lock, flags);
+ val = sa1111_readl(sachip->base + SA1111_SKPCR);
+ sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(sa1111_enable_device);
+
+/**
+ * sa1111_disable_device - disable an on-chip SA1111 function block
+ * @sadev: SA1111 function block device to disable
+ */
+void sa1111_disable_device(struct sa1111_dev *sadev)
+{
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ unsigned long flags;
+ unsigned int val;
+
+ spin_lock_irqsave(&sachip->lock, flags);
+ val = sa1111_readl(sachip->base + SA1111_SKPCR);
+ sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
+ spin_unlock_irqrestore(&sachip->lock, flags);
+
+ if (sachip->pdata && sachip->pdata->disable)
+ sachip->pdata->disable(sachip->pdata->data, sadev->devid);
+}
+EXPORT_SYMBOL(sa1111_disable_device);
+
+/*
+ * SA1111 "Register Access Bus."
+ *
+ * We model this as a regular bus type, and hang devices directly
+ * off this.
+ */
+static int sa1111_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct sa1111_dev *dev = SA1111_DEV(_dev);
+ struct sa1111_driver *drv = SA1111_DRV(_drv);
+
+ return dev->devid & drv->devid;
+}
+
+static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_driver *drv = SA1111_DRV(dev->driver);
+ int ret = 0;
+
+ if (drv && drv->suspend)
+ ret = drv->suspend(sadev, state);
+ return ret;
+}
+
+static int sa1111_bus_resume(struct device *dev)
+{
+ struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_driver *drv = SA1111_DRV(dev->driver);
+ int ret = 0;
+
+ if (drv && drv->resume)
+ ret = drv->resume(sadev);
+ return ret;
+}
+
+static void sa1111_bus_shutdown(struct device *dev)
+{
+ struct sa1111_driver *drv = SA1111_DRV(dev->driver);
+
+ if (drv && drv->shutdown)
+ drv->shutdown(SA1111_DEV(dev));
+}
+
+static int sa1111_bus_probe(struct device *dev)
+{
+ struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_driver *drv = SA1111_DRV(dev->driver);
+ int ret = -ENODEV;
+
+ if (drv->probe)
+ ret = drv->probe(sadev);
+ return ret;
+}
+
+static int sa1111_bus_remove(struct device *dev)
+{
+ struct sa1111_dev *sadev = SA1111_DEV(dev);
+ struct sa1111_driver *drv = SA1111_DRV(dev->driver);
+ int ret = 0;
+
+ if (drv->remove)
+ ret = drv->remove(sadev);
+ return ret;
+}
+
+struct bus_type sa1111_bus_type = {
+ .name = "sa1111-rab",
+ .match = sa1111_match,
+ .probe = sa1111_bus_probe,
+ .remove = sa1111_bus_remove,
+ .suspend = sa1111_bus_suspend,
+ .resume = sa1111_bus_resume,
+ .shutdown = sa1111_bus_shutdown,
+};
+EXPORT_SYMBOL(sa1111_bus_type);
+
+int sa1111_driver_register(struct sa1111_driver *driver)
+{
+ driver->drv.bus = &sa1111_bus_type;
+ return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(sa1111_driver_register);
+
+void sa1111_driver_unregister(struct sa1111_driver *driver)
+{
+ driver_unregister(&driver->drv);
+}
+EXPORT_SYMBOL(sa1111_driver_unregister);
+
+#ifdef CONFIG_DMABOUNCE
+/*
+ * According to the "Intel StrongARM SA-1111 Microprocessor Companion
+ * Chip Specification Update" (June 2000), erratum #7, there is a
+ * significant bug in the SA1111 SDRAM shared memory controller. If
+ * an access to a region of memory above 1MB relative to the bank base,
+ * it is important that address bit 10 _NOT_ be asserted. Depending
+ * on the configuration of the RAM, bit 10 may correspond to one
+ * of several different (processor-relative) address bits.
+ *
+ * This routine only identifies whether or not a given DMA address
+ * is susceptible to the bug.
+ *
+ * This should only get called for sa1111_device types due to the
+ * way we configure our device dma_masks.
+ */
+static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
+{
+ /*
+ * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
+ * User's Guide" mentions that jumpers R51 and R52 control the
+ * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
+ * SDRAM bank 1 on Neponset). The default configuration selects
+ * Assabet, so any address in bank 1 is necessarily invalid.
+ */
+ return (machine_is_assabet() || machine_is_pfs168()) &&
+ (addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
+}
+
+static int sa1111_notifier_call(struct notifier_block *n, unsigned long action,
+ void *data)
+{
+ struct sa1111_dev *dev = SA1111_DEV(data);
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) {
+ int ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
+ sa1111_needs_bounce);
+ if (ret)
+ dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret);
+ }
+ break;
+
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL)
+ dmabounce_unregister_dev(&dev->dev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sa1111_bus_notifier = {
+ .notifier_call = sa1111_notifier_call,
+};
+#endif
+
+static int __init sa1111_init(void)
+{
+ int ret = bus_register(&sa1111_bus_type);
+#ifdef CONFIG_DMABOUNCE
+ if (ret == 0)
+ bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
+#endif
+ if (ret == 0)
+ platform_driver_register(&sa1111_device_driver);
+ return ret;
+}
+
+static void __exit sa1111_exit(void)
+{
+ platform_driver_unregister(&sa1111_device_driver);
+#ifdef CONFIG_DMABOUNCE
+ bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
+#endif
+ bus_unregister(&sa1111_bus_type);
+}
+
+subsys_initcall(sa1111_init);
+module_exit(sa1111_exit);
+
+MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
new file mode 100644
index 000000000..a5c3dc38a
--- /dev/null
+++ b/arch/arm/common/scoop.c
@@ -0,0 +1,284 @@
+/*
+ * Support code for the SCOOP interface found on various Sharp PDAs
+ *
+ * Copyright (c) 2004 Richard Purdie
+ *
+ * Based on code written by Sharp/Lineo for 2.4 kernels
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <asm/hardware/scoop.h>
+
+/* PCMCIA to Scoop linkage
+
+ There is no easy way to link multiple scoop devices into one
+ single entity for the pxa2xx_pcmcia device so this structure
+ is used which is setup by the platform code.
+
+ This file is never modular so this symbol is always
+ accessile to the board support files.
+*/
+struct scoop_pcmcia_config *platform_scoop_config;
+EXPORT_SYMBOL(platform_scoop_config);
+
+struct scoop_dev {
+ void __iomem *base;
+ struct gpio_chip gpio;
+ spinlock_t scoop_lock;
+ unsigned short suspend_clr;
+ unsigned short suspend_set;
+ u32 scoop_gpwr;
+};
+
+void reset_scoop(struct device *dev)
+{
+ struct scoop_dev *sdev = dev_get_drvdata(dev);
+
+ iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */
+ iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */
+ iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */
+ iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */
+ iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */
+ iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */
+ iowrite16(0x0000, sdev->base + SCOOP_IRM);
+}
+
+static void __scoop_gpio_set(struct scoop_dev *sdev,
+ unsigned offset, int value)
+{
+ unsigned short gpwr;
+
+ gpwr = ioread16(sdev->base + SCOOP_GPWR);
+ if (value)
+ gpwr |= 1 << (offset + 1);
+ else
+ gpwr &= ~(1 << (offset + 1));
+ iowrite16(gpwr, sdev->base + SCOOP_GPWR);
+}
+
+static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->scoop_lock, flags);
+
+ __scoop_gpio_set(sdev, offset, value);
+
+ spin_unlock_irqrestore(&sdev->scoop_lock, flags);
+}
+
+static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
+
+ /* XXX: I'm unsure, but it seems so */
+ return ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1));
+}
+
+static int scoop_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
+ unsigned long flags;
+ unsigned short gpcr;
+
+ spin_lock_irqsave(&sdev->scoop_lock, flags);
+
+ gpcr = ioread16(sdev->base + SCOOP_GPCR);
+ gpcr &= ~(1 << (offset + 1));
+ iowrite16(gpcr, sdev->base + SCOOP_GPCR);
+
+ spin_unlock_irqrestore(&sdev->scoop_lock, flags);
+
+ return 0;
+}
+
+static int scoop_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
+ unsigned long flags;
+ unsigned short gpcr;
+
+ spin_lock_irqsave(&sdev->scoop_lock, flags);
+
+ __scoop_gpio_set(sdev, offset, value);
+
+ gpcr = ioread16(sdev->base + SCOOP_GPCR);
+ gpcr |= 1 << (offset + 1);
+ iowrite16(gpcr, sdev->base + SCOOP_GPCR);
+
+ spin_unlock_irqrestore(&sdev->scoop_lock, flags);
+
+ return 0;
+}
+
+unsigned short read_scoop_reg(struct device *dev, unsigned short reg)
+{
+ struct scoop_dev *sdev = dev_get_drvdata(dev);
+ return ioread16(sdev->base + reg);
+}
+
+void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data)
+{
+ struct scoop_dev *sdev = dev_get_drvdata(dev);
+ iowrite16(data, sdev->base + reg);
+}
+
+EXPORT_SYMBOL(reset_scoop);
+EXPORT_SYMBOL(read_scoop_reg);
+EXPORT_SYMBOL(write_scoop_reg);
+
+#ifdef CONFIG_PM
+static void check_scoop_reg(struct scoop_dev *sdev)
+{
+ unsigned short mcr;
+
+ mcr = ioread16(sdev->base + SCOOP_MCR);
+ if ((mcr & 0x100) == 0)
+ iowrite16(0x0101, sdev->base + SCOOP_MCR);
+}
+
+static int scoop_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct scoop_dev *sdev = platform_get_drvdata(dev);
+
+ check_scoop_reg(sdev);
+ sdev->scoop_gpwr = ioread16(sdev->base + SCOOP_GPWR);
+ iowrite16((sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set, sdev->base + SCOOP_GPWR);
+
+ return 0;
+}
+
+static int scoop_resume(struct platform_device *dev)
+{
+ struct scoop_dev *sdev = platform_get_drvdata(dev);
+
+ check_scoop_reg(sdev);
+ iowrite16(sdev->scoop_gpwr, sdev->base + SCOOP_GPWR);
+
+ return 0;
+}
+#else
+#define scoop_suspend NULL
+#define scoop_resume NULL
+#endif
+
+static int scoop_probe(struct platform_device *pdev)
+{
+ struct scoop_dev *devptr;
+ struct scoop_config *inf;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int ret;
+ int temp;
+
+ if (!mem)
+ return -EINVAL;
+
+ devptr = kzalloc(sizeof(struct scoop_dev), GFP_KERNEL);
+ if (!devptr)
+ return -ENOMEM;
+
+ spin_lock_init(&devptr->scoop_lock);
+
+ inf = pdev->dev.platform_data;
+ devptr->base = ioremap(mem->start, resource_size(mem));
+
+ if (!devptr->base) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ platform_set_drvdata(pdev, devptr);
+
+ printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base);
+
+ iowrite16(0x0140, devptr->base + SCOOP_MCR);
+ reset_scoop(&pdev->dev);
+ iowrite16(0x0000, devptr->base + SCOOP_CPR);
+ iowrite16(inf->io_dir & 0xffff, devptr->base + SCOOP_GPCR);
+ iowrite16(inf->io_out & 0xffff, devptr->base + SCOOP_GPWR);
+
+ devptr->suspend_clr = inf->suspend_clr;
+ devptr->suspend_set = inf->suspend_set;
+
+ devptr->gpio.base = -1;
+
+ if (inf->gpio_base != 0) {
+ devptr->gpio.label = dev_name(&pdev->dev);
+ devptr->gpio.base = inf->gpio_base;
+ devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */
+ devptr->gpio.set = scoop_gpio_set;
+ devptr->gpio.get = scoop_gpio_get;
+ devptr->gpio.direction_input = scoop_gpio_direction_input;
+ devptr->gpio.direction_output = scoop_gpio_direction_output;
+
+ ret = gpiochip_add(&devptr->gpio);
+ if (ret)
+ goto err_gpio;
+ }
+
+ return 0;
+
+ if (devptr->gpio.base != -1)
+ temp = gpiochip_remove(&devptr->gpio);
+err_gpio:
+ platform_set_drvdata(pdev, NULL);
+err_ioremap:
+ iounmap(devptr->base);
+ kfree(devptr);
+
+ return ret;
+}
+
+static int scoop_remove(struct platform_device *pdev)
+{
+ struct scoop_dev *sdev = platform_get_drvdata(pdev);
+ int ret;
+
+ if (!sdev)
+ return -EINVAL;
+
+ if (sdev->gpio.base != -1) {
+ ret = gpiochip_remove(&sdev->gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't remove gpio chip: %d\n", ret);
+ return ret;
+ }
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ iounmap(sdev->base);
+ kfree(sdev);
+
+ return 0;
+}
+
+static struct platform_driver scoop_driver = {
+ .probe = scoop_probe,
+ .remove = scoop_remove,
+ .suspend = scoop_suspend,
+ .resume = scoop_resume,
+ .driver = {
+ .name = "sharp-scoop",
+ },
+};
+
+static int __init scoop_init(void)
+{
+ return platform_driver_register(&scoop_driver);
+}
+
+subsys_initcall(scoop_init);
diff --git a/arch/arm/common/sharpsl_param.c b/arch/arm/common/sharpsl_param.c
new file mode 100644
index 000000000..d56c93258
--- /dev/null
+++ b/arch/arm/common/sharpsl_param.c
@@ -0,0 +1,62 @@
+/*
+ * Hardware parameter area specific to Sharp SL series devices
+ *
+ * Copyright (c) 2005 Richard Purdie
+ *
+ * Based on Sharp's 2.4 kernel patches
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <asm/mach/sharpsl_param.h>
+
+/*
+ * Certain hardware parameters determined at the time of device manufacture,
+ * typically including LCD parameters are loaded by the bootloader at the
+ * address PARAM_BASE. As the kernel will overwrite them, we need to store
+ * them early in the boot process, then pass them to the appropriate drivers.
+ * Not all devices use all parameters but the format is common to all.
+ */
+#ifdef CONFIG_ARCH_SA1100
+#define PARAM_BASE 0xe8ffc000
+#else
+#define PARAM_BASE 0xa0000a00
+#endif
+#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
+
+#define COMADJ_MAGIC MAGIC_CHG('C','M','A','D')
+#define UUID_MAGIC MAGIC_CHG('U','U','I','D')
+#define TOUCH_MAGIC MAGIC_CHG('T','U','C','H')
+#define AD_MAGIC MAGIC_CHG('B','V','A','D')
+#define PHAD_MAGIC MAGIC_CHG('P','H','A','D')
+
+struct sharpsl_param_info sharpsl_param;
+EXPORT_SYMBOL(sharpsl_param);
+
+void sharpsl_save_param(void)
+{
+ memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
+
+ if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
+ sharpsl_param.comadj=-1;
+
+ if (sharpsl_param.phad_keyword != PHAD_MAGIC)
+ sharpsl_param.phadadj=-1;
+
+ if (sharpsl_param.uuid_keyword != UUID_MAGIC)
+ sharpsl_param.uuid[0]=-1;
+
+ if (sharpsl_param.touch_keyword != TOUCH_MAGIC)
+ sharpsl_param.touch_xp=-1;
+
+ if (sharpsl_param.adadj_keyword != AD_MAGIC)
+ sharpsl_param.adadj=-1;
+}
+
+
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
new file mode 100644
index 000000000..ddc740769
--- /dev/null
+++ b/arch/arm/common/timer-sp.c
@@ -0,0 +1,299 @@
+/*
+ * linux/arch/arm/common/timer-sp.c
+ *
+ * Copyright (C) 1999 - 2003 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/sched_clock.h>
+#include <asm/hardware/arm_timer.h>
+#include <asm/hardware/timer-sp.h>
+
+static long __init sp804_get_clock_rate(struct clk *clk)
+{
+ long rate;
+ int err;
+
+ err = clk_prepare(clk);
+ if (err) {
+ pr_err("sp804: clock failed to prepare: %d\n", err);
+ clk_put(clk);
+ return err;
+ }
+
+ err = clk_enable(clk);
+ if (err) {
+ pr_err("sp804: clock failed to enable: %d\n", err);
+ clk_unprepare(clk);
+ clk_put(clk);
+ return err;
+ }
+
+ rate = clk_get_rate(clk);
+ if (rate < 0) {
+ pr_err("sp804: clock failed to get rate: %ld\n", rate);
+ clk_disable(clk);
+ clk_unprepare(clk);
+ clk_put(clk);
+ }
+
+ return rate;
+}
+
+static void __iomem *sched_clock_base;
+
+static u32 sp804_read(void)
+{
+ return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
+}
+
+void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
+ const char *name,
+ struct clk *clk,
+ int use_sched_clock)
+{
+ long rate;
+
+ if (!clk) {
+ clk = clk_get_sys("sp804", name);
+ if (IS_ERR(clk)) {
+ pr_err("sp804: clock not found: %d\n",
+ (int)PTR_ERR(clk));
+ return;
+ }
+ }
+
+ rate = sp804_get_clock_rate(clk);
+
+ if (rate < 0)
+ return;
+
+ /* setup timer 0 as free-running clocksource */
+ writel(0, base + TIMER_CTRL);
+ writel(0xffffffff, base + TIMER_LOAD);
+ writel(0xffffffff, base + TIMER_VALUE);
+ writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
+ base + TIMER_CTRL);
+
+ clocksource_mmio_init(base + TIMER_VALUE, name,
+ rate, 200, 32, clocksource_mmio_readl_down);
+
+ if (use_sched_clock) {
+ sched_clock_base = base;
+ setup_sched_clock(sp804_read, 32, rate);
+ }
+}
+
+
+static void __iomem *clkevt_base;
+static unsigned long clkevt_reload;
+
+/*
+ * IRQ handler for the timer
+ */
+static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ /* clear the interrupt */
+ writel(1, clkevt_base + TIMER_INTCLR);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void sp804_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel(clkevt_reload, clkevt_base + TIMER_LOAD);
+ ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* period set, and timer enabled in 'next_event' hook */
+ ctrl |= TIMER_CTRL_ONESHOT;
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ break;
+ }
+
+ writel(ctrl, clkevt_base + TIMER_CTRL);
+}
+
+static int sp804_set_next_event(unsigned long next,
+ struct clock_event_device *evt)
+{
+ unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+
+ writel(next, clkevt_base + TIMER_LOAD);
+ writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+
+ return 0;
+}
+
+static struct clock_event_device sp804_clockevent = {
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = sp804_set_mode,
+ .set_next_event = sp804_set_next_event,
+ .rating = 300,
+};
+
+static struct irqaction sp804_timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = sp804_timer_interrupt,
+ .dev_id = &sp804_clockevent,
+};
+
+void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
+{
+ struct clock_event_device *evt = &sp804_clockevent;
+ long rate;
+
+ if (!clk)
+ clk = clk_get_sys("sp804", name);
+ if (IS_ERR(clk)) {
+ pr_err("sp804: %s clock not found: %d\n", name,
+ (int)PTR_ERR(clk));
+ return;
+ }
+
+ rate = sp804_get_clock_rate(clk);
+ if (rate < 0)
+ return;
+
+ clkevt_base = base;
+ clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
+ evt->name = name;
+ evt->irq = irq;
+ evt->cpumask = cpu_possible_mask;
+
+ writel(0, base + TIMER_CTRL);
+
+ setup_irq(irq, &sp804_timer_irq);
+ clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
+}
+
+static void __init sp804_of_init(struct device_node *np)
+{
+ static bool initialized = false;
+ void __iomem *base;
+ int irq;
+ u32 irq_num = 0;
+ struct clk *clk1, *clk2;
+ const char *name = of_get_property(np, "compatible", NULL);
+
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return;
+
+ /* Ensure timers are disabled */
+ writel(0, base + TIMER_CTRL);
+ writel(0, base + TIMER_2_BASE + TIMER_CTRL);
+
+ if (initialized || !of_device_is_available(np))
+ goto err;
+
+ clk1 = of_clk_get(np, 0);
+ if (IS_ERR(clk1))
+ clk1 = NULL;
+
+ /* Get the 2nd clock if the timer has 2 timer clocks */
+ if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) {
+ clk2 = of_clk_get(np, 1);
+ if (IS_ERR(clk2)) {
+ pr_err("sp804: %s clock not found: %d\n", np->name,
+ (int)PTR_ERR(clk2));
+ goto err;
+ }
+ } else
+ clk2 = clk1;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0)
+ goto err;
+
+ of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
+ if (irq_num == 2) {
+ __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
+ __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+ } else {
+ __sp804_clockevents_init(base, irq, clk1 , name);
+ __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
+ name, clk2, 1);
+ }
+ initialized = true;
+
+ return;
+err:
+ iounmap(base);
+}
+CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
+
+static void __init integrator_cp_of_init(struct device_node *np)
+{
+ static int init_count = 0;
+ void __iomem *base;
+ int irq;
+ const char *name = of_get_property(np, "compatible", NULL);
+
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return;
+
+ /* Ensure timer is disabled */
+ writel(0, base + TIMER_CTRL);
+
+ if (init_count == 2 || !of_device_is_available(np))
+ goto err;
+
+ if (!init_count)
+ sp804_clocksource_init(base, name);
+ else {
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0)
+ goto err;
+
+ sp804_clockevents_init(base, irq, name);
+ }
+
+ init_count++;
+ return;
+err:
+ iounmap(base);
+}
+CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
diff --git a/arch/arm/common/via82c505.c b/arch/arm/common/via82c505.c
new file mode 100644
index 000000000..6cb362e56
--- /dev/null
+++ b/arch/arm/common/via82c505.c
@@ -0,0 +1,83 @@
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+
+
+#include <asm/mach/pci.h>
+
+#define MAX_SLOTS 7
+
+#define CONFIG_CMD(bus, devfn, where) (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
+
+static int
+via82c505_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ outl(CONFIG_CMD(bus,devfn,where),0xCF8);
+ switch (size) {
+ case 1:
+ *value=inb(0xCFC + (where&3));
+ break;
+ case 2:
+ *value=inw(0xCFC + (where&2));
+ break;
+ case 4:
+ *value=inl(0xCFC);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+via82c505_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ outl(CONFIG_CMD(bus,devfn,where),0xCF8);
+ switch (size) {
+ case 1:
+ outb(value, 0xCFC + (where&3));
+ break;
+ case 2:
+ outw(value, 0xCFC + (where&2));
+ break;
+ case 4:
+ outl(value, 0xCFC);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops via82c505_ops = {
+ .read = via82c505_read_config,
+ .write = via82c505_write_config,
+};
+
+void __init via82c505_preinit(void)
+{
+ printk(KERN_DEBUG "PCI: VIA 82c505\n");
+ if (!request_region(0xA8,2,"via config")) {
+ printk(KERN_WARNING"VIA 82c505: Unable to request region 0xA8\n");
+ return;
+ }
+ if (!request_region(0xCF8,8,"pci config")) {
+ printk(KERN_WARNING"VIA 82c505: Unable to request region 0xCF8\n");
+ release_region(0xA8, 2);
+ return;
+ }
+
+ /* Enable compatible Mode */
+ outb(0x96,0xA8);
+ outb(0x18,0xA9);
+ outb(0x93,0xA8);
+ outb(0xd0,0xA9);
+
+}
+
+int __init via82c505_setup(int nr, struct pci_sys_data *sys)
+{
+ return (nr == 0);
+}
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
new file mode 100644
index 000000000..ff198583f
--- /dev/null
+++ b/arch/arm/common/vlock.S
@@ -0,0 +1,108 @@
+/*
+ * vlock.S - simple voting lock implementation for ARM
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * This algorithm is described in more detail in
+ * Documentation/arm/vlocks.txt.
+ */
+
+#include <linux/linkage.h>
+#include "vlock.h"
+
+/* Select different code if voting flags can fit in a single word. */
+#if VLOCK_VOTING_SIZE > 4
+#define FEW(x...)
+#define MANY(x...) x
+#else
+#define FEW(x...) x
+#define MANY(x...)
+#endif
+
+@ voting lock for first-man coordination
+
+.macro voting_begin rbase:req, rcpu:req, rscratch:req
+ mov \rscratch, #1
+ strb \rscratch, [\rbase, \rcpu]
+ dmb
+.endm
+
+.macro voting_end rbase:req, rcpu:req, rscratch:req
+ dmb
+ mov \rscratch, #0
+ strb \rscratch, [\rbase, \rcpu]
+ dsb
+ sev
+.endm
+
+/*
+ * The vlock structure must reside in Strongly-Ordered or Device memory.
+ * This implementation deliberately eliminates most of the barriers which
+ * would be required for other memory types, and assumes that independent
+ * writes to neighbouring locations within a cacheline do not interfere
+ * with one another.
+ */
+
+@ r0: lock structure base
+@ r1: CPU ID (0-based index within cluster)
+ENTRY(vlock_trylock)
+ add r1, r1, #VLOCK_VOTING_OFFSET
+
+ voting_begin r0, r1, r2
+
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
+ cmp r2, #VLOCK_OWNER_NONE
+ bne trylock_fail @ fail if so
+
+ @ Control dependency implies strb not observable before previous ldrb.
+
+ strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
+
+ voting_end r0, r1, r2 @ implies DMB
+
+ @ Wait for the current round of voting to finish:
+
+ MANY( mov r3, #VLOCK_VOTING_OFFSET )
+0:
+ MANY( ldr r2, [r0, r3] )
+ FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
+ cmp r2, #0
+ wfene
+ bne 0b
+ MANY( add r3, r3, #4 )
+ MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
+ MANY( bne 0b )
+
+ @ Check who won:
+
+ dmb
+ ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
+ eor r0, r1, r2 @ zero if I won, else nonzero
+ bx lr
+
+trylock_fail:
+ voting_end r0, r1, r2
+ mov r0, #1 @ nonzero indicates that I lost
+ bx lr
+ENDPROC(vlock_trylock)
+
+@ r0: lock structure base
+ENTRY(vlock_unlock)
+ dmb
+ mov r1, #VLOCK_OWNER_NONE
+ strb r1, [r0, #VLOCK_OWNER_OFFSET]
+ dsb
+ sev
+ bx lr
+ENDPROC(vlock_unlock)
diff --git a/arch/arm/common/vlock.h b/arch/arm/common/vlock.h
new file mode 100644
index 000000000..3b441475a
--- /dev/null
+++ b/arch/arm/common/vlock.h
@@ -0,0 +1,29 @@
+/*
+ * vlock.h - simple voting lock implementation
+ *
+ * Created by: Dave Martin, 2012-08-16
+ * Copyright: (C) 2012-2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __VLOCK_H
+#define __VLOCK_H
+
+#include <asm/mcpm.h>
+
+/* Offsets and sizes are rounded to a word (4 bytes) */
+#define VLOCK_OWNER_OFFSET 0
+#define VLOCK_VOTING_OFFSET 4
+#define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
+#define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
+#define VLOCK_OWNER_NONE 0
+
+#endif /* ! __VLOCK_H */