aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/s3c24xx/files-2.6.30/drivers/mfd
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/s3c24xx/files-2.6.30/drivers/mfd')
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Kconfig41
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Makefile11
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.c1301
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.h67
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-fb.c1026
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-gpio.c281
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-mci.c987
-rw-r--r--target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-regs.h632
8 files changed, 4346 insertions, 0 deletions
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Kconfig b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Kconfig
new file mode 100644
index 000000000..8c93bcbdb
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Kconfig
@@ -0,0 +1,41 @@
+config MFD_GLAMO
+ bool "Smedia Glamo 336x/337x support"
+ select MFD_CORE
+ help
+ This enables the core driver for the Smedia Glamo 336x/337x
+ multi-function device. It includes irq_chip demultiplex as
+ well as clock / power management and GPIO support.
+
+config MFD_GLAMO_FB
+ tristate "Smedia Glamo 336x/337x framebuffer support"
+ depends on FB && MFD_GLAMO
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ help
+ Frame buffer driver for the LCD controller in the Smedia Glamo
+ 336x/337x.
+
+ This driver is also available as a module ( = code which can be
+ inserted and removed from the running kernel whenever you want). The
+ module will be called glamofb. If you want to compile it as a module,
+ say M here and read <file:Documentation/modules.txt>.
+
+ If unsure, say N.
+
+config MFD_GLAMO_GPIO
+ tristate "Glamo GPIO support"
+ depends on MFD_GLAMO
+
+ help
+ Enable a bitbanging SPI adapter driver for the Smedia Glamo.
+
+config MFD_GLAMO_MCI
+ tristate "Glamo S3C SD/MMC Card Interface support"
+ depends on MFD_GLAMO && MMC && REGULATOR
+ help
+ This selects a driver for the MCI interface found in
+ the S-Media GLAMO chip, as used in Openmoko
+ neo1973 GTA-02.
+
+ If unsure, say N.
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Makefile b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Makefile
new file mode 100644
index 000000000..ebf26f7e4
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Smedia Glamo framebuffer driver
+#
+
+obj-$(CONFIG_MFD_GLAMO) += glamo-core.o
+obj-$(CONFIG_MFD_GLAMO_GPIO) += glamo-gpio.o
+obj-$(CONFIG_MFD_GLAMO_SPI) += glamo-spi.o
+
+obj-$(CONFIG_MFD_GLAMO_FB) += glamo-fb.o
+obj-$(CONFIG_MFD_GLAMO_MCI) += glamo-mci.o
+
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.c b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.c
new file mode 100644
index 000000000..10f2c7a2a
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.c
@@ -0,0 +1,1301 @@
+/* Smedia Glamo 336x/337x driver
+ *
+ * (C) 2007 by Openmoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/glamo.h>
+#include <linux/glamofb.h>
+#include <linux/io.h>
+
+#include <asm/div64.h>
+
+#include <linux/pm.h>
+
+#include "glamo-regs.h"
+#include "glamo-core.h"
+
+#define GLAMO_MEM_REFRESH_COUNT 0x100
+
+/*
+ * Glamo internal settings
+ *
+ * We run the memory interface from the faster PLLB on 2.6.28 kernels and
+ * above. Couple of GTA02 users report trouble with memory bus when they
+ * upgraded from 2.6.24. So this parameter allows reversion to 2.6.24
+ * scheme if their Glamo chip needs it.
+ *
+ * you can override the faster default on kernel commandline using
+ *
+ * glamo3362.slow_memory=1
+ *
+ * for example
+ */
+
+static int slow_memory = 0;
+module_param(slow_memory, int, 0644);
+
+struct reg_range {
+ int start;
+ int count;
+ char *name;
+ char dump;
+};
+
+struct reg_range reg_range[] = {
+ { 0x0000, 0x76, "General", 1 },
+ { 0x0200, 0x18, "Host Bus", 1 },
+ { 0x0300, 0x38, "Memory", 1 },
+/* { 0x0400, 0x100, "Sensor", 0 }, */
+/* { 0x0500, 0x300, "ISP", 0 }, */
+/* { 0x0800, 0x400, "JPEG", 0 }, */
+/* { 0x0c00, 0xcc, "MPEG", 0 }, */
+ { 0x1100, 0xb2, "LCD 1", 1 },
+ { 0x1200, 0x64, "LCD 2", 1 },
+ { 0x1400, 0x42, "MMC", 1 },
+/* { 0x1500, 0x080, "MPU 0", 0 },
+ { 0x1580, 0x080, "MPU 1", 0 },
+ { 0x1600, 0x080, "Cmd Queue", 0 },
+ { 0x1680, 0x080, "RISC CPU", 0 },*/
+ { 0x1700, 0x400, "2D Unit", 0 },
+/* { 0x1b00, 0x900, "3D Unit", 0 }, */
+};
+
+static inline void __reg_write(struct glamo_core *glamo,
+ u_int16_t reg, u_int16_t val)
+{
+ writew(val, glamo->base + reg);
+}
+
+static inline u_int16_t __reg_read(struct glamo_core *glamo,
+ u_int16_t reg)
+{
+ return readw(glamo->base + reg);
+}
+
+static void __reg_set_bit_mask(struct glamo_core *glamo,
+ u_int16_t reg, u_int16_t mask,
+ u_int16_t val)
+{
+ u_int16_t tmp;
+
+ val &= mask;
+
+ tmp = __reg_read(glamo, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ __reg_write(glamo, reg, tmp);
+}
+
+static void reg_set_bit_mask(struct glamo_core *glamo,
+ u_int16_t reg, u_int16_t mask,
+ u_int16_t val)
+{
+ spin_lock(&glamo->lock);
+ __reg_set_bit_mask(glamo, reg, mask, val);
+ spin_unlock(&glamo->lock);
+}
+
+static inline void __reg_set_bit(struct glamo_core *glamo,
+ u_int16_t reg, u_int16_t bit)
+{
+ __reg_set_bit_mask(glamo, reg, bit, 0xffff);
+}
+
+static inline void __reg_clear_bit(struct glamo_core *glamo,
+ u_int16_t reg, u_int16_t bit)
+{
+ __reg_set_bit_mask(glamo, reg, bit, 0);
+}
+
+/***********************************************************************
+ * resources of sibling devices
+ ***********************************************************************/
+
+static struct resource glamo_fb_resources[] = {
+ {
+ .name = "glamo-fb-regs",
+ .start = GLAMO_REGOFS_LCD,
+ .end = GLAMO_REGOFS_MMC - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "glamo-fb-mem",
+ .start = GLAMO_OFFSET_FB,
+ .end = GLAMO_OFFSET_FB + GLAMO_FB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource glamo_mmc_resources[] = {
+ {
+ .start = GLAMO_REGOFS_MMC,
+ .end = GLAMO_REGOFS_MPROC0 - 1,
+ .flags = IORESOURCE_MEM
+ }, {
+ .start = IRQ_GLAMO_MMC,
+ .end = IRQ_GLAMO_MMC,
+ .flags = IORESOURCE_IRQ,
+ }, { /* our data buffer for MMC transfers */
+ .start = GLAMO_OFFSET_FB + GLAMO_FB_SIZE,
+ .end = GLAMO_OFFSET_FB + GLAMO_FB_SIZE +
+ GLAMO_MMC_BUFFER_SIZE - 1,
+ .flags = IORESOURCE_MEM
+ },
+};
+
+enum glamo_cells {
+ GLAMO_CELL_FB,
+ GLAMO_CELL_MMC,
+ GLAMO_CELL_GPIO,
+};
+
+static struct mfd_cell glamo_cells[] = {
+ [GLAMO_CELL_FB] = {
+ .name = "glamo-fb",
+ .num_resources = ARRAY_SIZE(glamo_fb_resources),
+ .resources = glamo_fb_resources,
+ },
+ [GLAMO_CELL_MMC] = {
+ .name = "glamo-mci",
+ .num_resources = ARRAY_SIZE(glamo_mmc_resources),
+ .resources = glamo_mmc_resources,
+ },
+ [GLAMO_CELL_GPIO] = {
+ .name = "glamo-gpio",
+ },
+};
+
+
+/***********************************************************************
+ * IRQ demultiplexer
+ ***********************************************************************/
+#define irq2glamo(x) (x - IRQ_GLAMO(0))
+
+static void glamo_ack_irq(unsigned int irq)
+{
+ struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
+ /* clear interrupt source */
+ __reg_write(glamo, GLAMO_REG_IRQ_CLEAR,
+ 1 << irq2glamo(irq));
+}
+
+static void glamo_mask_irq(unsigned int irq)
+{
+ struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
+ u_int16_t tmp;
+
+ /* clear bit in enable register */
+ tmp = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE);
+ tmp &= ~(1 << irq2glamo(irq));
+ __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, tmp);
+}
+
+static void glamo_unmask_irq(unsigned int irq)
+{
+ struct glamo_core *glamo = (struct glamo_core*)get_irq_chip_data(irq);
+ u_int16_t tmp;
+
+ /* set bit in enable register */
+ tmp = __reg_read(glamo, GLAMO_REG_IRQ_ENABLE);
+ tmp |= (1 << irq2glamo(irq));
+ __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, tmp);
+}
+
+static struct irq_chip glamo_irq_chip = {
+ .name = "glamo",
+ .ack = glamo_ack_irq,
+ .mask = glamo_mask_irq,
+ .unmask = glamo_unmask_irq,
+};
+
+static void glamo_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct glamo_core *glamo = get_irq_desc_chip_data(desc);
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+
+ if (unlikely(desc->status & IRQ_INPROGRESS)) {
+ desc->status |= (IRQ_PENDING | IRQ_MASKED);
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ return;
+ }
+ kstat_incr_irqs_this_cpu(irq, desc);
+
+ desc->chip->ack(irq);
+ desc->status |= IRQ_INPROGRESS;
+
+ do {
+ u_int16_t irqstatus;
+ int i;
+
+ if (unlikely((desc->status &
+ (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
+ (IRQ_PENDING | IRQ_MASKED))) {
+ /* dealing with pending IRQ, unmasking */
+ desc->chip->unmask(irq);
+ desc->status &= ~IRQ_MASKED;
+ }
+
+ desc->status &= ~IRQ_PENDING;
+
+ /* read IRQ status register */
+ irqstatus = __reg_read(glamo, GLAMO_REG_IRQ_STATUS);
+ for (i = 0; i < 9; i++)
+ if (irqstatus & (1 << i))
+ desc_handle_irq(IRQ_GLAMO(i),
+ irq_desc+IRQ_GLAMO(i));
+
+ } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+
+ desc->status &= ~IRQ_INPROGRESS;
+}
+
+
+static ssize_t regs_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long reg = simple_strtoul(buf, NULL, 10);
+ struct glamo_core *glamo = dev_get_drvdata(dev);
+
+ while (*buf && (*buf != ' '))
+ buf++;
+ if (*buf != ' ')
+ return -EINVAL;
+ while (*buf && (*buf == ' '))
+ buf++;
+ if (!*buf)
+ return -EINVAL;
+
+ printk(KERN_INFO"reg 0x%02lX <-- 0x%04lX\n",
+ reg, simple_strtoul(buf, NULL, 10));
+
+ __reg_write(glamo, reg, simple_strtoul(buf, NULL, 10));
+
+ return count;
+}
+
+static ssize_t regs_read(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct glamo_core *glamo = dev_get_drvdata(dev);
+ int n, n1 = 0, r;
+ char * end = buf;
+
+ spin_lock(&glamo->lock);
+
+ for (r = 0; r < ARRAY_SIZE(reg_range); r++) {
+ if (!reg_range[r].dump)
+ continue;
+ n1 = 0;
+ end += sprintf(end, "\n%s\n", reg_range[r].name);
+ for (n = reg_range[r].start;
+ n < reg_range[r].start + reg_range[r].count; n += 2) {
+ if (((n1++) & 7) == 0)
+ end += sprintf(end, "\n%04X: ", n);
+ end += sprintf(end, "%04x ", __reg_read(glamo, n));
+ }
+ end += sprintf(end, "\n");
+ if (!attr) {
+ printk("%s", buf);
+ end = buf;
+ }
+ }
+ spin_unlock(&glamo->lock);
+
+ return end - buf;
+}
+
+static DEVICE_ATTR(regs, 0644, regs_read, regs_write);
+static struct attribute *glamo_sysfs_entries[] = {
+ &dev_attr_regs.attr,
+ NULL
+};
+static struct attribute_group glamo_attr_group = {
+ .name = NULL,
+ .attrs = glamo_sysfs_entries,
+};
+
+
+
+/***********************************************************************
+ * 'engine' support
+ ***********************************************************************/
+
+int __glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ switch (engine) {
+ case GLAMO_ENGINE_LCD:
+ __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
+ GLAMO_HOSTBUS2_MMIO_EN_LCD,
+ GLAMO_HOSTBUS2_MMIO_EN_LCD);
+ __reg_write(glamo, GLAMO_REG_CLOCK_LCD,
+ GLAMO_CLOCK_LCD_EN_M5CLK |
+ GLAMO_CLOCK_LCD_EN_DHCLK |
+ GLAMO_CLOCK_LCD_EN_DMCLK |
+ GLAMO_CLOCK_LCD_EN_DCLK |
+ GLAMO_CLOCK_LCD_DG_M5CLK |
+ GLAMO_CLOCK_LCD_DG_DMCLK);
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
+ GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
+ GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0xffff);
+ break;
+ case GLAMO_ENGINE_MMC:
+ __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
+ GLAMO_HOSTBUS2_MMIO_EN_MMC,
+ GLAMO_HOSTBUS2_MMIO_EN_MMC);
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
+ GLAMO_CLOCK_MMC_EN_M9CLK |
+ GLAMO_CLOCK_MMC_EN_TCLK |
+ GLAMO_CLOCK_MMC_DG_M9CLK |
+ GLAMO_CLOCK_MMC_DG_TCLK,
+ 0xffff);
+ /* enable the TCLK divider clk input */
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_TCLK,
+ GLAMO_CLOCK_GEN51_EN_DIV_TCLK);
+ break;
+ case GLAMO_ENGINE_2D:
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
+ GLAMO_CLOCK_2D_EN_M7CLK |
+ GLAMO_CLOCK_2D_EN_GCLK |
+ GLAMO_CLOCK_2D_DG_M7CLK |
+ GLAMO_CLOCK_2D_DG_GCLK, 0xffff);
+ __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
+ GLAMO_HOSTBUS2_MMIO_EN_2D,
+ GLAMO_HOSTBUS2_MMIO_EN_2D);
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
+ 0xffff);
+ break;
+ case GLAMO_ENGINE_CMDQ:
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
+ GLAMO_CLOCK_2D_EN_M6CLK, 0xffff);
+ __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
+ GLAMO_HOSTBUS2_MMIO_EN_CQ,
+ GLAMO_HOSTBUS2_MMIO_EN_CQ);
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
+ 0xffff);
+ break;
+ /* FIXME: Implementation */
+ default:
+ return -EINVAL;
+ }
+
+ glamo->engine_enabled_bitfield |= 1 << engine;
+
+ return 0;
+}
+
+int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ int ret;
+
+ spin_lock(&glamo->lock);
+
+ ret = __glamo_engine_enable(glamo, engine);
+
+ spin_unlock(&glamo->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(glamo_engine_enable);
+
+int __glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ switch (engine) {
+ case GLAMO_ENGINE_LCD:
+ /* remove pixel clock to LCM */
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
+ GLAMO_CLOCK_LCD_EN_DCLK, 0);
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
+ GLAMO_CLOCK_LCD_EN_DHCLK |
+ GLAMO_CLOCK_LCD_EN_DMCLK, 0);
+ /* kill memory clock */
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_LCD,
+ GLAMO_CLOCK_LCD_EN_M5CLK, 0);
+ /* stop dividing the clocks */
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_DHCLK |
+ GLAMO_CLOCK_GEN51_EN_DIV_DMCLK |
+ GLAMO_CLOCK_GEN51_EN_DIV_DCLK, 0);
+ break;
+
+ case GLAMO_ENGINE_MMC:
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_MMC,
+ GLAMO_CLOCK_MMC_EN_M9CLK |
+ GLAMO_CLOCK_MMC_EN_TCLK |
+ GLAMO_CLOCK_MMC_DG_M9CLK |
+ GLAMO_CLOCK_MMC_DG_TCLK, 0);
+ /* disable the TCLK divider clk input */
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_TCLK, 0);
+ break;
+ case GLAMO_ENGINE_CMDQ:
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
+ GLAMO_CLOCK_2D_EN_M6CLK,
+ 0);
+ __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
+ GLAMO_HOSTBUS2_MMIO_EN_CQ,
+ GLAMO_HOSTBUS2_MMIO_EN_CQ);
+/* __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_MCLK,
+ 0);*/
+ break;
+ case GLAMO_ENGINE_2D:
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_2D,
+ GLAMO_CLOCK_2D_EN_M7CLK |
+ GLAMO_CLOCK_2D_EN_GCLK |
+ GLAMO_CLOCK_2D_DG_M7CLK |
+ GLAMO_CLOCK_2D_DG_GCLK,
+ 0);
+ __reg_set_bit_mask(glamo, GLAMO_REG_HOSTBUS(2),
+ GLAMO_HOSTBUS2_MMIO_EN_2D,
+ GLAMO_HOSTBUS2_MMIO_EN_2D);
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1,
+ GLAMO_CLOCK_GEN51_EN_DIV_GCLK,
+ 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ glamo->engine_enabled_bitfield &= ~(1 << engine);
+
+ return 0;
+}
+int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ int ret;
+
+ spin_lock(&glamo->lock);
+
+ ret = __glamo_engine_disable(glamo, engine);
+
+ spin_unlock(&glamo->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(glamo_engine_disable);
+
+static const u_int16_t engine_clock_regs[__NUM_GLAMO_ENGINES] = {
+ [GLAMO_ENGINE_LCD] = GLAMO_REG_CLOCK_LCD,
+ [GLAMO_ENGINE_MMC] = GLAMO_REG_CLOCK_MMC,
+ [GLAMO_ENGINE_ISP] = GLAMO_REG_CLOCK_ISP,
+ [GLAMO_ENGINE_JPEG] = GLAMO_REG_CLOCK_JPEG,
+ [GLAMO_ENGINE_3D] = GLAMO_REG_CLOCK_3D,
+ [GLAMO_ENGINE_2D] = GLAMO_REG_CLOCK_2D,
+ [GLAMO_ENGINE_MPEG_ENC] = GLAMO_REG_CLOCK_MPEG,
+ [GLAMO_ENGINE_MPEG_DEC] = GLAMO_REG_CLOCK_MPEG,
+};
+
+void glamo_engine_clkreg_set(struct glamo_core *glamo,
+ enum glamo_engine engine,
+ u_int16_t mask, u_int16_t val)
+{
+ reg_set_bit_mask(glamo, engine_clock_regs[engine], mask, val);
+}
+EXPORT_SYMBOL_GPL(glamo_engine_clkreg_set);
+
+u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
+ enum glamo_engine engine)
+{
+ u_int16_t val;
+
+ spin_lock(&glamo->lock);
+ val = __reg_read(glamo, engine_clock_regs[engine]);
+ spin_unlock(&glamo->lock);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(glamo_engine_clkreg_get);
+
+static const struct glamo_script engine_div_regs[__NUM_GLAMO_ENGINES] = {
+ [GLAMO_ENGINE_LCD] = {GLAMO_REG_CLOCK_GEN5_1, GLAMO_CLOCK_GEN51_EN_DIV_DCLK},
+ [GLAMO_ENGINE_MMC] = {GLAMO_REG_CLOCK_GEN5_1, GLAMO_CLOCK_GEN51_EN_DIV_TCLK},
+ [GLAMO_ENGINE_2D] = {GLAMO_REG_CLOCK_GEN5_1, GLAMO_CLOCK_GEN51_EN_DIV_GCLK},
+};
+
+void glamo_engine_div_enable(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ uint16_t reg = engine_div_regs[engine].reg;
+ uint16_t bit = engine_div_regs[engine].val;
+ uint16_t val;
+
+ spin_lock(&glamo->lock);
+ val = __reg_read(glamo, reg);
+ __reg_write(glamo, reg, val | bit);
+ spin_unlock(&glamo->lock);
+ mdelay(5);
+}
+EXPORT_SYMBOL_GPL(glamo_engine_div_enable);
+
+void glamo_engine_div_disable(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ uint16_t reg = engine_div_regs[engine].reg;
+ uint16_t bit = engine_div_regs[engine].val;
+ uint16_t val;
+
+ spin_lock(&glamo->lock);
+ val = __reg_read(glamo, reg);
+ __reg_write(glamo, reg, val & ~bit);
+ spin_unlock(&glamo->lock);
+}
+EXPORT_SYMBOL_GPL(glamo_engine_div_disable);
+
+static const struct glamo_script reset_regs[] = {
+ [GLAMO_ENGINE_LCD] = {
+ GLAMO_REG_CLOCK_LCD, GLAMO_CLOCK_LCD_RESET
+ },
+#if 0
+ [GLAMO_ENGINE_HOST] = {
+ GLAMO_REG_CLOCK_HOST, GLAMO_CLOCK_HOST_RESET
+ },
+ [GLAMO_ENGINE_MEM] = {
+ GLAMO_REG_CLOCK_MEM, GLAMO_CLOCK_MEM_RESET
+ },
+#endif
+ [GLAMO_ENGINE_MMC] = {
+ GLAMO_REG_CLOCK_MMC, GLAMO_CLOCK_MMC_RESET
+ },
+ [GLAMO_ENGINE_CMDQ] = {
+ GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_CQ_RESET
+ },
+ [GLAMO_ENGINE_2D] = {
+ GLAMO_REG_CLOCK_2D, GLAMO_CLOCK_2D_RESET
+ },
+ [GLAMO_ENGINE_JPEG] = {
+ GLAMO_REG_CLOCK_JPEG, GLAMO_CLOCK_JPEG_RESET
+ },
+};
+
+void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine)
+{
+ uint16_t reg = reset_regs[engine].reg;
+ uint16_t val = reset_regs[engine].val;
+
+ if (engine >= ARRAY_SIZE(reset_regs)) {
+ dev_warn(&glamo->pdev->dev, "unknown engine %u ", engine);
+ return;
+ }
+
+
+ spin_lock(&glamo->lock);
+ __reg_set_bit(glamo, reg, val);
+ __reg_clear_bit(glamo, reg, val);
+ spin_unlock(&glamo->lock);
+}
+EXPORT_SYMBOL_GPL(glamo_engine_reset);
+
+int glamo_pll_rate(struct glamo_core *glamo,
+ enum glamo_pll pll)
+{
+ u_int16_t reg;
+ unsigned int osci = glamo->pdata->osci_clock_rate;
+
+ switch (pll) {
+ case GLAMO_PLL1:
+ reg = __reg_read(glamo, GLAMO_REG_PLL_GEN1);
+ break;
+ case GLAMO_PLL2:
+ reg = __reg_read(glamo, GLAMO_REG_PLL_GEN3);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return osci*reg;
+}
+EXPORT_SYMBOL_GPL(glamo_pll_rate);
+
+int glamo_engine_reclock(struct glamo_core *glamo,
+ enum glamo_engine engine,
+ int hz)
+{
+ int pll;
+ u_int16_t reg, mask, div;
+
+ if (!hz)
+ return -EINVAL;
+
+ switch (engine) {
+ case GLAMO_ENGINE_LCD:
+ pll = GLAMO_PLL1;
+ reg = GLAMO_REG_CLOCK_GEN7;
+ mask = 0xff;
+ break;
+ case GLAMO_ENGINE_MMC:
+ pll = GLAMO_PLL1;
+ reg = GLAMO_REG_CLOCK_GEN8;
+ mask = 0xff;
+ break;
+ default:
+ dev_warn(&glamo->pdev->dev,
+ "reclock of engine 0x%x not supported\n", engine);
+ return -EINVAL;
+ break;
+ }
+
+ pll = glamo_pll_rate(glamo, pll);
+
+ div = pll / hz;
+
+ if (div != 0 && pll / div <= hz)
+ --div;
+
+ if (div > mask)
+ div = mask;
+
+ dev_dbg(&glamo->pdev->dev,
+ "PLL %d, kHZ %d, div %d\n", pll, hz / 1000, div);
+
+ reg_set_bit_mask(glamo, reg, mask, div);
+ mdelay(5); /* wait some time to stabilize */
+
+ return pll / (div + 1);
+}
+EXPORT_SYMBOL_GPL(glamo_engine_reclock);
+
+/***********************************************************************
+ * script support
+ ***********************************************************************/
+
+int glamo_run_script(struct glamo_core *glamo, const struct glamo_script *script,
+ int len, int may_sleep)
+{
+ int i;
+ const struct glamo_script *line = script;
+
+ for (i = 0; i < len; ++i, ++line) {
+ switch (line->reg) {
+ case 0xffff:
+ return 0;
+ case 0xfffe:
+ if (may_sleep)
+ msleep(line->val);
+ else
+ mdelay(line->val * 4);
+ break;
+ case 0xfffd:
+ /* spin until PLLs lock */
+ while ((__reg_read(glamo, GLAMO_REG_PLL_GEN5) & 3) != 3)
+ ;
+ break;
+
+ /*
+ * couple of people reported artefacts with 2.6.28 changes, this
+ * allows reversion to 2.6.24 settings
+ */
+
+ case 0x200:
+ switch (slow_memory) {
+ /* choice 1 is the most conservative */
+ case 1: /* 3 waits on Async BB R & W, Use PLL 1 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xef0);
+ break;
+ case 2: /* 2 waits on Async BB R & W, Use PLL 1 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xea0);
+ break;
+ case 3: /* 1 waits on Async BB R & W, Use PLL 1 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xe50);
+ break;
+ case 4: /* 0 waits on Async BB R & W, Use PLL 1 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xe00);
+ break;
+
+ /* using PLL2 for memory bus increases CPU bandwidth significantly */
+ case 5: /* 3 waits on Async BB R & W, Use PLL 2 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xef3);
+ break;
+ case 6: /* 2 waits on Async BB R & W, Use PLL 2 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xea3);
+ break;
+ case 7: /* 1 waits on Async BB R & W, Use PLL 2 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xe53);
+ break;
+ /* default of 0 or >7 is fastest */
+ default: /* 0 waits on Async BB R & W, Use PLL 2 for mem bus */
+ __reg_write(glamo, script[i].reg, 0xe03);
+ break;
+ }
+ break;
+
+ default:
+ __reg_write(glamo, script[i].reg, script[i].val);
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(glamo_run_script);
+
+static const struct glamo_script glamo_init_script[] = {
+ { GLAMO_REG_CLOCK_HOST, 0x1000 },
+ { 0xfffe, 2 },
+ { GLAMO_REG_CLOCK_MEMORY, 0x1000 },
+ { GLAMO_REG_CLOCK_MEMORY, 0x2000 },
+ { GLAMO_REG_CLOCK_LCD, 0x1000 },
+ { GLAMO_REG_CLOCK_MMC, 0x1000 },
+ { GLAMO_REG_CLOCK_ISP, 0x1000 },
+ { GLAMO_REG_CLOCK_ISP, 0x3000 },
+ { GLAMO_REG_CLOCK_JPEG, 0x1000 },
+ { GLAMO_REG_CLOCK_3D, 0x1000 },
+ { GLAMO_REG_CLOCK_3D, 0x3000 },
+ { GLAMO_REG_CLOCK_2D, 0x1000 },
+ { GLAMO_REG_CLOCK_2D, 0x3000 },
+ { GLAMO_REG_CLOCK_RISC1, 0x1000 },
+ { GLAMO_REG_CLOCK_MPEG, 0x3000 },
+ { GLAMO_REG_CLOCK_MPEG, 0x3000 },
+ { GLAMO_REG_CLOCK_MPROC, 0x1000 /*0x100f*/ },
+ { 0xfffe, 2 },
+ { GLAMO_REG_CLOCK_HOST, 0x0000 },
+ { GLAMO_REG_CLOCK_MEMORY, 0x0000 },
+ { GLAMO_REG_CLOCK_LCD, 0x0000 },
+ { GLAMO_REG_CLOCK_MMC, 0x0000 },
+#if 0
+/* unused engines must be left in reset to stop MMC block read "blackouts" */
+ { GLAMO_REG_CLOCK_ISP, 0x0000 },
+ { GLAMO_REG_CLOCK_ISP, 0x0000 },
+ { GLAMO_REG_CLOCK_JPEG, 0x0000 },
+ { GLAMO_REG_CLOCK_3D, 0x0000 },
+ { GLAMO_REG_CLOCK_3D, 0x0000 },
+ { GLAMO_REG_CLOCK_2D, 0x0000 },
+ { GLAMO_REG_CLOCK_2D, 0x0000 },
+ { GLAMO_REG_CLOCK_RISC1, 0x0000 },
+ { GLAMO_REG_CLOCK_MPEG, 0x0000 },
+ { GLAMO_REG_CLOCK_MPEG, 0x0000 },
+#endif
+ { GLAMO_REG_PLL_GEN1, 0x05db }, /* 48MHz */
+ { GLAMO_REG_PLL_GEN3, 0x0aba }, /* 90MHz */
+ { 0xfffd, 0 },
+ /*
+ * b9 of this register MUST be zero to get any interrupts on INT#
+ * the other set bits enable all the engine interrupt sources
+ */
+ { GLAMO_REG_IRQ_ENABLE, 0x01ff },
+ { GLAMO_REG_CLOCK_GEN6, 0x2000 },
+ { GLAMO_REG_CLOCK_GEN7, 0x0101 },
+ { GLAMO_REG_CLOCK_GEN8, 0x0100 },
+ { GLAMO_REG_CLOCK_HOST, 0x000d },
+ /*
+ * b7..b4 = 0 = no wait states on read or write
+ * b0 = 1 select PLL2 for Host interface, b1 = enable it
+ */
+ { 0x200, 0x0e03 /* this is replaced by script parser */ },
+ { 0x202, 0x07ff },
+ { 0x212, 0x0000 },
+ { 0x214, 0x4000 },
+ { 0x216, 0xf00e },
+
+ /* S-Media recommended "set tiling mode to 512 mode for memory access
+ * more efficiency when 640x480" */
+ { GLAMO_REG_MEM_TYPE, 0x0c74 }, /* 8MB, 16 word pg wr+rd */
+ { GLAMO_REG_MEM_GEN, 0xafaf }, /* 63 grants min + max */
+
+ { GLAMO_REGOFS_HOSTBUS + 2, 0xffff }, /* enable on MMIO*/
+
+ { GLAMO_REG_MEM_TIMING1, 0x0108 },
+ { GLAMO_REG_MEM_TIMING2, 0x0010 }, /* Taa = 3 MCLK */
+ { GLAMO_REG_MEM_TIMING3, 0x0000 },
+ { GLAMO_REG_MEM_TIMING4, 0x0000 }, /* CE1# delay fall/rise */
+ { GLAMO_REG_MEM_TIMING5, 0x0000 }, /* UB# LB# */
+ { GLAMO_REG_MEM_TIMING6, 0x0000 }, /* OE# */
+ { GLAMO_REG_MEM_TIMING7, 0x0000 }, /* WE# */
+ { GLAMO_REG_MEM_TIMING8, 0x1002 }, /* MCLK delay, was 0x1000 */
+ { GLAMO_REG_MEM_TIMING9, 0x6006 },
+ { GLAMO_REG_MEM_TIMING10, 0x00ff },
+ { GLAMO_REG_MEM_TIMING11, 0x0001 },
+ { GLAMO_REG_MEM_POWER1, 0x0020 },
+ { GLAMO_REG_MEM_POWER2, 0x0000 },
+ { GLAMO_REG_MEM_DRAM1, 0x0000 },
+ { 0xfffe, 1 },
+ { GLAMO_REG_MEM_DRAM1, 0xc100 },
+ { 0xfffe, 1 },
+ { GLAMO_REG_MEM_DRAM1, 0xe100 },
+ { GLAMO_REG_MEM_DRAM2, 0x01d6 },
+ { GLAMO_REG_CLOCK_MEMORY, 0x000b },
+};
+#if 0
+static struct glamo_script glamo_resume_script[] = {
+
+ { GLAMO_REG_PLL_GEN1, 0x05db }, /* 48MHz */
+ { GLAMO_REG_PLL_GEN3, 0x0aba }, /* 90MHz */
+ { GLAMO_REG_DFT_GEN6, 1 },
+ { 0xfffe, 100 },
+ { 0xfffd, 0 },
+ { 0x200, 0x0e03 },
+
+ /*
+ * b9 of this register MUST be zero to get any interrupts on INT#
+ * the other set bits enable all the engine interrupt sources
+ */
+ { GLAMO_REG_IRQ_ENABLE, 0x01ff },
+ { GLAMO_REG_CLOCK_HOST, 0x0018 },
+ { GLAMO_REG_CLOCK_GEN5_1, 0x18b1 },
+
+ { GLAMO_REG_MEM_DRAM1, 0x0000 },
+ { 0xfffe, 1 },
+ { GLAMO_REG_MEM_DRAM1, 0xc100 },
+ { 0xfffe, 1 },
+ { GLAMO_REG_MEM_DRAM1, 0xe100 },
+ { GLAMO_REG_MEM_DRAM2, 0x01d6 },
+ { GLAMO_REG_CLOCK_MEMORY, 0x000b },
+};
+#endif
+
+enum glamo_power {
+ GLAMO_POWER_ON,
+ GLAMO_POWER_SUSPEND,
+};
+
+static void glamo_power(struct glamo_core *glamo,
+ enum glamo_power new_state)
+{
+ int n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&glamo->lock, flags);
+
+ dev_info(&glamo->pdev->dev, "***** glamo_power -> %d\n", new_state);
+
+ /*
+Power management
+static const REG_VALUE_MASK_TYPE reg_powerOn[] =
+{
+ { REG_GEN_DFT6, REG_BIT_ALL, REG_DATA(1u << 0) },
+ { REG_GEN_PLL3, 0u, REG_DATA(1u << 13) },
+ { REG_GEN_MEM_CLK, REG_BIT_ALL, REG_BIT_EN_MOCACLK },
+ { REG_MEM_DRAM2, 0u, REG_BIT_EN_DEEP_POWER_DOWN },
+ { REG_MEM_DRAM1, 0u, REG_BIT_SELF_REFRESH }
+};
+
+static const REG_VALUE_MASK_TYPE reg_powerStandby[] =
+{
+ { REG_MEM_DRAM1, REG_BIT_ALL, REG_BIT_SELF_REFRESH },
+ { REG_GEN_MEM_CLK, 0u, REG_BIT_EN_MOCACLK },
+ { REG_GEN_PLL3, REG_BIT_ALL, REG_DATA(1u << 13) },
+ { REG_GEN_DFT5, REG_BIT_ALL, REG_DATA(1u << 0) }
+};
+
+static const REG_VALUE_MASK_TYPE reg_powerSuspend[] =
+{
+ { REG_MEM_DRAM2, REG_BIT_ALL, REG_BIT_EN_DEEP_POWER_DOWN },
+ { REG_GEN_MEM_CLK, 0u, REG_BIT_EN_MOCACLK },
+ { REG_GEN_PLL3, REG_BIT_ALL, REG_DATA(1u << 13) },
+ { REG_GEN_DFT5, REG_BIT_ALL, REG_DATA(1u << 0) }
+};
+*/
+
+ switch (new_state) {
+ case GLAMO_POWER_ON:
+
+ /*
+ * glamo state on resume is nondeterministic in some
+ * fundamental way, it has also been observed that the
+ * Glamo reset pin can get asserted by, eg, touching it with
+ * a scope probe. So the only answer is to roll with it and
+ * force an external reset on the Glamo during resume.
+ */
+
+ (glamo->pdata->glamo_external_reset)(0);
+ udelay(10);
+ (glamo->pdata->glamo_external_reset)(1);
+ mdelay(5);
+
+ glamo_run_script(glamo, glamo_init_script,
+ ARRAY_SIZE(glamo_init_script), 0);
+
+ break;
+
+ case GLAMO_POWER_SUSPEND:
+
+ /* nuke interrupts */
+ __reg_write(glamo, GLAMO_REG_IRQ_ENABLE, 0x200);
+
+ /* stash a copy of which engines were running */
+ glamo->engine_enabled_bitfield_suspend =
+ glamo->engine_enabled_bitfield;
+
+ /* take down each engine before we kill mem and pll */
+ for (n = 0; n < __NUM_GLAMO_ENGINES; n++)
+ if (glamo->engine_enabled_bitfield & (1 << n))
+ __glamo_engine_disable(glamo, n);
+
+ /* enable self-refresh */
+
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
+ GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
+ GLAMO_MEM_DRAM1_EN_GATE_CKE |
+ GLAMO_MEM_DRAM1_SELF_REFRESH |
+ GLAMO_MEM_REFRESH_COUNT);
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1,
+ GLAMO_MEM_DRAM1_EN_MODEREG_SET |
+ GLAMO_MEM_DRAM1_EN_DRAM_REFRESH |
+ GLAMO_MEM_DRAM1_EN_GATE_CKE |
+ GLAMO_MEM_DRAM1_SELF_REFRESH |
+ GLAMO_MEM_REFRESH_COUNT);
+
+ /* force RAM into deep powerdown */
+
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM2,
+ GLAMO_MEM_DRAM2_DEEP_PWRDOWN |
+ (7 << 6) | /* tRC */
+ (1 << 4) | /* tRP */
+ (1 << 2) | /* tRCD */
+ 2); /* CAS latency */
+
+ /* disable clocks to memory */
+ __reg_write(glamo, GLAMO_REG_CLOCK_MEMORY, 0);
+
+ /* all dividers from OSCI */
+ __reg_set_bit_mask(glamo, GLAMO_REG_CLOCK_GEN5_1, 0x400, 0x400);
+
+ /* PLL2 into bypass */
+ __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 12, 1 << 12);
+
+ __reg_write(glamo, 0x200, 0x0e00);
+
+
+ /* kill PLLS 1 then 2 */
+ __reg_write(glamo, GLAMO_REG_DFT_GEN5, 0x0001);
+ __reg_set_bit_mask(glamo, GLAMO_REG_PLL_GEN3, 1 << 13, 1 << 13);
+
+ break;
+ }
+
+ spin_unlock_irqrestore(&glamo->lock, flags);
+}
+
+#if 0
+#define MEMDETECT_RETRY 6
+static unsigned int detect_memsize(struct glamo_core *glamo)
+{
+ int i;
+
+ /*static const u_int16_t pattern[] = {
+ 0x1111, 0x8a8a, 0x2222, 0x7a7a,
+ 0x3333, 0x6a6a, 0x4444, 0x5a5a,
+ 0x5555, 0x4a4a, 0x6666, 0x3a3a,
+ 0x7777, 0x2a2a, 0x8888, 0x1a1a
+ }; */
+
+ for (i = 0; i < MEMDETECT_RETRY; i++) {
+ switch (glamo->type) {
+ case 3600:
+ __reg_write(glamo, GLAMO_REG_MEM_TYPE, 0x0072);
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
+ break;
+ case 3650:
+ switch (glamo->revision) {
+ case GLAMO_CORE_REV_A0:
+ if (i & 1)
+ __reg_write(glamo, GLAMO_REG_MEM_TYPE,
+ 0x097a);
+ else
+ __reg_write(glamo, GLAMO_REG_MEM_TYPE,
+ 0x0173);
+
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
+ msleep(1);
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xc100);
+ break;
+ default:
+ if (i & 1)
+ __reg_write(glamo, GLAMO_REG_MEM_TYPE,
+ 0x0972);
+ else
+ __reg_write(glamo, GLAMO_REG_MEM_TYPE,
+ 0x0872);
+
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0x0000);
+ msleep(1);
+ __reg_write(glamo, GLAMO_REG_MEM_DRAM1, 0xe100);
+ break;
+ }
+ break;
+ case 3700:
+ /* FIXME */
+ default:
+ break;
+ }
+
+#if 0
+ /* FIXME: finish implementation */
+ for (j = 0; j < 8; j++) {
+ __
+#endif
+ }
+
+ return 0;
+}
+#endif
+
+/* Find out if we can support this version of the Glamo chip */
+static int glamo_supported(struct glamo_core *glamo)
+{
+ u_int16_t dev_id, rev_id; /*, memsize; */
+
+ dev_id = __reg_read(glamo, GLAMO_REG_DEVICE_ID);
+ rev_id = __reg_read(glamo, GLAMO_REG_REVISION_ID);
+
+ switch (dev_id) {
+ case 0x3650:
+ switch (rev_id) {
+ case GLAMO_CORE_REV_A2:
+ break;
+ case GLAMO_CORE_REV_A0:
+ case GLAMO_CORE_REV_A1:
+ case GLAMO_CORE_REV_A3:
+ dev_warn(&glamo->pdev->dev, "untested core revision "
+ "%04x, your mileage may vary\n", rev_id);
+ break;
+ default:
+ dev_warn(&glamo->pdev->dev, "unknown glamo revision "
+ "%04x, your mileage may vary\n", rev_id);
+ /* maybe should abort ? */
+ }
+ break;
+ case 0x3600:
+ case 0x3700:
+ default:
+ dev_err(&glamo->pdev->dev, "unsupported Glamo device %04x\n",
+ dev_id);
+ return 0;
+ }
+
+ dev_dbg(&glamo->pdev->dev, "Detected Glamo core %04x Revision %04x "
+ "(%uHz CPU / %uHz Memory)\n", dev_id, rev_id,
+ glamo_pll_rate(glamo, GLAMO_PLL1),
+ glamo_pll_rate(glamo, GLAMO_PLL2));
+
+ return 1;
+}
+
+static int __init glamo_probe(struct platform_device *pdev)
+{
+ int rc = 0, irq;
+ struct glamo_core *glamo;
+
+ glamo = kmalloc(GFP_KERNEL, sizeof(*glamo));
+ if (!glamo)
+ return -ENOMEM;
+
+ spin_lock_init(&glamo->lock);
+ glamo->pdev = pdev;
+ glamo->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ glamo->irq = platform_get_irq(pdev, 0);
+ glamo->pdata = pdev->dev.platform_data;
+ if (!glamo->mem || !glamo->pdata) {
+ dev_err(&pdev->dev, "platform device with no MEM/PDATA ?\n");
+ rc = -ENOENT;
+ goto bail_free;
+ }
+
+ /* register a number of sibling devices whoise IOMEM resources
+ * are siblings of pdev's IOMEM resource */
+
+ /* only remap the generic, hostbus and memory controller registers */
+ glamo->base = ioremap(glamo->mem->start, 0x4000 /*GLAMO_REGOFS_VIDCAP*/);
+ if (!glamo->base) {
+ dev_err(&pdev->dev, "failed to ioremap() memory region\n");
+ goto bail_free;
+ }
+
+ platform_set_drvdata(pdev, glamo);
+
+ (glamo->pdata->glamo_external_reset)(0);
+ udelay(10);
+ (glamo->pdata->glamo_external_reset)(1);
+ mdelay(10);
+
+ /*
+ * finally set the mfd interrupts up
+ * can't do them earlier or sibling probes blow up
+ */
+
+ for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
+ set_irq_chip_and_handler(irq, &glamo_irq_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ set_irq_chip_data(irq, glamo);
+ }
+
+ if (glamo->pdata->glamo_irq_is_wired &&
+ !glamo->pdata->glamo_irq_is_wired()) {
+ set_irq_chained_handler(glamo->irq, glamo_irq_demux_handler);
+ set_irq_type(glamo->irq, IRQ_TYPE_EDGE_FALLING);
+ set_irq_chip_data(glamo->irq, glamo);
+ dev_info(&pdev->dev, "Glamo interrupt registered\n");
+ glamo->irq_works = 1;
+ } else {
+ dev_err(&pdev->dev, "Glamo interrupt not used\n");
+ glamo->irq_works = 0;
+ }
+
+ /* confirm it isn't insane version */
+ if (!glamo_supported(glamo)) {
+ dev_err(&pdev->dev, "This Glamo is not supported\n");
+ goto bail_irq;
+ }
+
+ /* sysfs */
+ rc = sysfs_create_group(&pdev->dev.kobj, &glamo_attr_group);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "cannot create sysfs group\n");
+ goto bail_irq;
+ }
+
+ /* init the chip with canned register set */
+
+ dev_dbg(&glamo->pdev->dev, "running init script\n");
+ glamo_run_script(glamo, glamo_init_script,
+ ARRAY_SIZE(glamo_init_script), 1);
+
+ dev_info(&glamo->pdev->dev, "Glamo core PLL1: %uHz, PLL2: %uHz\n",
+ glamo_pll_rate(glamo, GLAMO_PLL1),
+ glamo_pll_rate(glamo, GLAMO_PLL2));
+
+ mfd_add_devices(&pdev->dev, pdev->id, glamo_cells,
+ ARRAY_SIZE(glamo_cells),
+ glamo->mem, 0);
+
+ /* only request the generic, hostbus and memory controller MMIO */
+ glamo->mem = request_mem_region(glamo->mem->start,
+ GLAMO_REGOFS_VIDCAP, "glamo-core");
+ if (!glamo->mem) {
+ dev_err(&pdev->dev, "failed to request memory region\n");
+ goto bail_irq;
+ }
+
+ return 0;
+
+bail_irq:
+ disable_irq(glamo->irq);
+ set_irq_chained_handler(glamo->irq, NULL);
+ set_irq_chip_data(glamo->irq, NULL);
+
+ for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
+ set_irq_flags(irq, 0);
+ set_irq_chip(irq, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+
+ iounmap(glamo->base);
+bail_free:
+ platform_set_drvdata(pdev, NULL);
+ kfree(glamo);
+
+ return rc;
+}
+
+static int glamo_remove(struct platform_device *pdev)
+{
+ struct glamo_core *glamo = platform_get_drvdata(pdev);
+ int irq;
+
+ disable_irq(glamo->irq);
+ set_irq_chained_handler(glamo->irq, NULL);
+ set_irq_chip_data(glamo->irq, NULL);
+
+ for (irq = IRQ_GLAMO(0); irq <= IRQ_GLAMO(8); irq++) {
+ set_irq_flags(irq, 0);
+ set_irq_chip(irq, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ mfd_remove_devices(&pdev->dev);
+ iounmap(glamo->base);
+ release_mem_region(glamo->mem->start, GLAMO_REGOFS_VIDCAP);
+ kfree(glamo);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int glamo_suspend(struct device *dev)
+{
+ struct glamo_core *glamo = dev_get_drvdata(dev);
+ glamo->suspending = 1;
+ glamo_power(glamo, GLAMO_POWER_SUSPEND);
+
+ return 0;
+}
+
+static int glamo_resume(struct device *dev)
+{
+ struct glamo_core *glamo = dev_get_drvdata(dev);
+ glamo_power(glamo, GLAMO_POWER_ON);
+ glamo->suspending = 0;
+ return 0;
+}
+
+static struct dev_pm_ops glamo_pm_ops = {
+ .suspend = glamo_suspend,
+ .resume = glamo_resume,
+};
+
+#define GLAMO_PM_OPS (&glamo_pm_ops)
+
+#else
+#define GLAMO_PM_OPS NULL
+#endif
+
+static struct platform_driver glamo_driver = {
+ .probe = glamo_probe,
+ .remove = glamo_remove,
+ .driver = {
+ .name = "glamo3362",
+ .owner = THIS_MODULE,
+ .pm = GLAMO_PM_OPS,
+ },
+};
+
+static int __devinit glamo_init(void)
+{
+ return platform_driver_register(&glamo_driver);
+}
+
+static void __exit glamo_cleanup(void)
+{
+ platform_driver_unregister(&glamo_driver);
+}
+
+module_init(glamo_init);
+module_exit(glamo_cleanup);
+
+MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
+MODULE_DESCRIPTION("Smedia Glamo 336x/337x core/resource driver");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.h b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.h
new file mode 100644
index 000000000..f0c2ec3ee
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-core.h
@@ -0,0 +1,67 @@
+#ifndef __GLAMO_CORE_H
+#define __GLAMO_CORE_H
+
+#include <asm/system.h>
+#include <linux/mfd/glamo.h>
+
+/* for the time being, we put the on-screen framebuffer into the lowest
+ * VRAM space. This should make the code easily compatible with the various
+ * 2MB/4MB/8MB variants of the Smedia chips */
+#define GLAMO_OFFSET_VRAM 0x800000
+#define GLAMO_OFFSET_FB (GLAMO_OFFSET_VRAM)
+
+/* we only allocate the minimum possible size for the framebuffer to make
+ * sure we have sufficient memory for other functions of the chip */
+//#define GLAMO_FB_SIZE (640*480*4) /* == 0x12c000 */
+#define GLAMO_INTERNAL_RAM_SIZE 0x800000
+#define GLAMO_MMC_BUFFER_SIZE (64 * 1024)
+#define GLAMO_FB_SIZE (GLAMO_INTERNAL_RAM_SIZE - GLAMO_MMC_BUFFER_SIZE)
+
+enum glamo_pll {
+ GLAMO_PLL1,
+ GLAMO_PLL2,
+};
+
+struct glamo_core {
+ int irq;
+ int irq_works; /* 0 means PCB does not support Glamo IRQ */
+ struct resource *mem;
+ struct resource *mem_core;
+ void __iomem *base;
+ struct platform_device *pdev;
+ struct glamo_platform_data *pdata;
+ u_int16_t type;
+ u_int16_t revision;
+ spinlock_t lock;
+ u32 engine_enabled_bitfield;
+ u32 engine_enabled_bitfield_suspend;
+ int suspending;
+};
+
+struct glamo_script {
+ u_int16_t reg;
+ u_int16_t val;
+};
+
+void glamo_engine_div_enable(struct glamo_core *glamo, enum glamo_engine engine);
+void glamo_engine_div_disable(struct glamo_core *glamo, enum glamo_engine engine);
+
+
+int glamo_pll_rate(struct glamo_core *glamo, enum glamo_pll pll);
+
+int glamo_run_script(struct glamo_core *glamo,
+ const struct glamo_script *script, int len, int may_sleep);
+
+int glamo_engine_enable(struct glamo_core *glamo, enum glamo_engine engine);
+int glamo_engine_disable(struct glamo_core *glamo, enum glamo_engine engine);
+void glamo_engine_reset(struct glamo_core *glamo, enum glamo_engine engine);
+int glamo_engine_reclock(struct glamo_core *glamo,
+ enum glamo_engine engine, int ps);
+
+void glamo_engine_clkreg_set(struct glamo_core *glamo,
+ enum glamo_engine engine,
+ u_int16_t mask, u_int16_t val);
+
+u_int16_t glamo_engine_clkreg_get(struct glamo_core *glamo,
+ enum glamo_engine engine);
+#endif /* __GLAMO_CORE_H */
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-fb.c b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-fb.c
new file mode 100644
index 000000000..7bb2201f6
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-fb.c
@@ -0,0 +1,1026 @@
+/* Smedia Glamo 336x/337x driver
+ *
+ * (C) 2007-2008 by Openmoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/mfd/glamo.h>
+
+#include <asm/div64.h>
+
+#ifdef CONFIG_PM
+#include <linux/pm.h>
+#endif
+
+#include <linux/glamofb.h>
+
+#include "glamo-regs.h"
+#include "glamo-core.h"
+
+static void glamofb_program_mode(struct glamofb_handle* glamo);
+
+struct glamofb_handle {
+ struct glamo_core *core;
+ struct fb_info *fb;
+ struct device *dev;
+ struct resource *reg;
+ struct resource *fb_res;
+ char __iomem *base;
+ struct glamo_fb_platform_data *mach_info;
+ char __iomem *cursor_addr;
+ int cursor_on;
+ u_int32_t pseudo_pal[16];
+ spinlock_t lock_cmd;
+ int blank_mode;
+ int mode_set; /* 0 if the current display mode hasn't been set on the glamo */
+ int output_enabled; /* 0 if the video output is disabled */
+};
+
+static void glamo_output_enable(struct glamofb_handle *gfb) {
+ struct glamo_core *gcore = gfb->core;
+
+ if (gfb->output_enabled)
+ return;
+
+ /* enable the pixel clock if off */
+ glamo_engine_clkreg_set(gcore,
+ GLAMO_ENGINE_LCD,
+ GLAMO_CLOCK_LCD_EN_DCLK,
+ GLAMO_CLOCK_LCD_EN_DCLK);
+
+ gfb->output_enabled = 1;
+ if (!gfb->mode_set)
+ glamofb_program_mode(gfb);
+}
+
+static void glamo_output_disable(struct glamofb_handle *gfb) {
+ struct glamo_core *gcore = gfb->core;
+
+ if (!gfb->output_enabled)
+ return;
+
+ /* enable the pixel clock if off */
+ glamo_engine_clkreg_set(gcore,
+ GLAMO_ENGINE_LCD,
+ GLAMO_CLOCK_LCD_EN_DCLK,
+ 0);
+
+ gfb->output_enabled = 0;
+}
+
+
+static int reg_read(struct glamofb_handle *glamo,
+ u_int16_t reg)
+{
+ int i = 0;
+
+ for (i = 0; i != 2; i++)
+ nop();
+
+ return readw(glamo->base + reg);
+}
+
+static void reg_write(struct glamofb_handle *glamo,
+ u_int16_t reg, u_int16_t val)
+{
+ int i = 0;
+
+ for (i = 0; i != 2; i++)
+ nop();
+
+ writew(val, glamo->base + reg);
+}
+
+static struct glamo_script glamo_regs[] = {
+ { GLAMO_REG_LCD_MODE1, 0x0020 },
+ /* no display rotation, no hardware cursor, no dither, no gamma,
+ * no retrace flip, vsync low-active, hsync low active,
+ * no TVCLK, no partial display, hw dest color from fb,
+ * no partial display mode, LCD1, software flip, */
+ { GLAMO_REG_LCD_MODE2, 0x9020 },
+ /* video flip, no ptr, no ptr, dhclk off,
+ * normal mode, no cpuif,
+ * res, serial msb first, single fb, no fr ctrl,
+ * cpu if bits all zero, no crc
+ * 0000 0000 0010 0000 */
+ { GLAMO_REG_LCD_MODE3, 0x0b40 },
+ /* src data rgb565, res, 18bit rgb666
+ * 000 01 011 0100 0000 */
+ { GLAMO_REG_LCD_POLARITY, 0x440c },
+ /* DE high active, no cpu/lcd if, cs0 force low, a0 low active,
+ * np cpu if, 9bit serial data, sclk rising edge latch data
+ * 01 00 0 100 0 000 01 0 0 */
+ /* The following values assume 640*480@16bpp */
+ { GLAMO_REG_LCD_A_BASE1, 0x0000 }, /* display A base address 15:0 */
+ { GLAMO_REG_LCD_A_BASE2, 0x0000 }, /* display A base address 22:16 */
+ { GLAMO_REG_LCD_CURSOR_BASE1, 0xC000 }, /* cursor base address 15:0 */
+ { GLAMO_REG_LCD_CURSOR_BASE2, 0x0012 }, /* cursor base address 22:16 */
+ { GLAMO_REG_LCD_COMMAND2, 0x0000 }, /* display page A */
+};
+
+static int glamofb_run_script(struct glamofb_handle *glamo,
+ struct glamo_script *script, int len)
+{
+ int i;
+
+ if (glamo->core->suspending) {
+ dev_err(&glamo->core->pdev->dev,
+ "IGNORING glamofb_run_script while "
+ "suspended\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < len; i++) {
+ struct glamo_script *line = &script[i];
+
+ if (line->reg == 0xffff)
+ return 0;
+ else if (line->reg == 0xfffe)
+ msleep(line->val);
+ else
+ reg_write(glamo, script[i].reg, script[i].val);
+ }
+
+ return 0;
+}
+
+static int glamofb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct glamofb_handle *glamo = info->par;
+
+ if (glamo->core->suspending) {
+ dev_err(&glamo->core->pdev->dev,
+ "IGNORING glamofb_check_var while "
+ "suspended\n");
+ return -EBUSY;
+ }
+
+ if (var->bits_per_pixel != 16)
+ var->bits_per_pixel = 16;
+
+ var->height = glamo->mach_info->height;
+ var->width = glamo->mach_info->width;
+
+ /* FIXME: set rgb positions */
+ switch (var->bits_per_pixel) {
+ case 16:
+ switch (reg_read(glamo, GLAMO_REG_LCD_MODE3) & 0xc000) {
+ case GLAMO_LCD_SRC_RGB565:
+ var->red.offset = 11;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->red.length = 5;
+ var->green.length = 6;
+ var->blue.length = 5;
+ var->transp.length = 0;
+ break;
+ case GLAMO_LCD_SRC_ARGB1555:
+ var->transp.offset = 15;
+ var->red.offset = 10;
+ var->green.offset = 5;
+ var->blue.offset = 0;
+ var->transp.length = 1;
+ var->red.length = 5;
+ var->green.length = 5;
+ var->blue.length = 5;
+ break;
+ case GLAMO_LCD_SRC_ARGB4444:
+ var->transp.offset = 12;
+ var->red.offset = 8;
+ var->green.offset = 4;
+ var->blue.offset = 0;
+ var->transp.length = 4;
+ var->red.length = 4;
+ var->green.length = 4;
+ var->blue.length = 4;
+ break;
+ }
+ break;
+ case 24:
+ case 32:
+ default:
+ /* The Smedia Glamo doesn't support anything but 16bit color */
+ printk(KERN_ERR
+ "Smedia driver does not [yet?] support 24/32bpp\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void reg_set_bit_mask(struct glamofb_handle *glamo,
+ u_int16_t reg, u_int16_t mask,
+ u_int16_t val)
+{
+ u_int16_t tmp;
+
+ val &= mask;
+
+ tmp = reg_read(glamo, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ reg_write(glamo, reg, tmp);
+}
+
+#define GLAMO_LCD_WIDTH_MASK 0x03FF
+#define GLAMO_LCD_HEIGHT_MASK 0x03FF
+#define GLAMO_LCD_PITCH_MASK 0x07FE
+#define GLAMO_LCD_HV_TOTAL_MASK 0x03FF
+#define GLAMO_LCD_HV_RETR_START_MASK 0x03FF
+#define GLAMO_LCD_HV_RETR_END_MASK 0x03FF
+#define GLAMO_LCD_HV_RETR_DISP_START_MASK 0x03FF
+#define GLAMO_LCD_HV_RETR_DISP_END_MASK 0x03FF
+
+/* the caller has to enxure lock_cmd is held and we are in cmd mode */
+static void __rotate_lcd(struct glamofb_handle *glamo, __u32 rotation)
+{
+ int glamo_rot;
+
+ if (glamo->core->suspending) {
+ dev_err(&glamo->core->pdev->dev,
+ "IGNORING rotate_lcd while "
+ "suspended\n");
+ return;
+ }
+
+ switch (rotation) {
+ case FB_ROTATE_CW:
+ glamo_rot = GLAMO_LCD_ROT_MODE_90;
+ break;
+ case FB_ROTATE_UD:
+ glamo_rot = GLAMO_LCD_ROT_MODE_180;
+ break;
+ case FB_ROTATE_CCW:
+ glamo_rot = GLAMO_LCD_ROT_MODE_270;
+ break;
+ default:
+ glamo_rot = GLAMO_LCD_ROT_MODE_0;
+ break;
+ }
+
+ reg_set_bit_mask(glamo,
+ GLAMO_REG_LCD_WIDTH,
+ GLAMO_LCD_ROT_MODE_MASK,
+ glamo_rot);
+ reg_set_bit_mask(glamo,
+ GLAMO_REG_LCD_MODE1,
+ GLAMO_LCD_MODE1_ROTATE_EN,
+ (glamo_rot != GLAMO_LCD_ROT_MODE_0) ?
+ GLAMO_LCD_MODE1_ROTATE_EN : 0);
+}
+
+static void glamofb_program_mode(struct glamofb_handle* gfb) {
+ int sync, bp, disp, fp, total;
+ unsigned long flags;
+ struct glamo_core *gcore = gfb->core;
+ struct fb_var_screeninfo *var = &gfb->fb->var;
+
+ dev_dbg(&gcore->pdev->dev,
+ "glamofb_program_mode spin_lock_irqsave\n");
+ spin_lock_irqsave(&gfb->lock_cmd, flags);
+
+ if (glamofb_cmd_mode(gfb, 1))
+ goto out_unlock;
+
+ if (var->pixclock)
+ glamo_engine_reclock(gcore,
+ GLAMO_ENGINE_LCD,
+ (1000000000UL / gfb->fb->var.pixclock) * 1000);
+
+ reg_set_bit_mask(gfb,
+ GLAMO_REG_LCD_WIDTH,
+ GLAMO_LCD_WIDTH_MASK,
+ var->xres);
+ reg_set_bit_mask(gfb,
+ GLAMO_REG_LCD_HEIGHT,
+ GLAMO_LCD_HEIGHT_MASK,
+ var->yres);
+ reg_set_bit_mask(gfb,
+ GLAMO_REG_LCD_PITCH,
+ GLAMO_LCD_PITCH_MASK,
+ gfb->fb->fix.line_length);
+
+ /* honour the rotation request */
+ __rotate_lcd(gfb, var->rotate);
+
+ /* update scannout timings */
+ sync = 0;
+ bp = sync + var->hsync_len;
+ disp = bp + var->left_margin;
+ fp = disp + var->xres;
+ total = fp + var->right_margin;
+
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_TOTAL,
+ GLAMO_LCD_HV_TOTAL_MASK, total);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_RETR_START,
+ GLAMO_LCD_HV_RETR_START_MASK, sync);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_RETR_END,
+ GLAMO_LCD_HV_RETR_END_MASK, bp);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_DISP_START,
+ GLAMO_LCD_HV_RETR_DISP_START_MASK, disp);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_HORIZ_DISP_END,
+ GLAMO_LCD_HV_RETR_DISP_END_MASK, fp);
+
+ sync = 0;
+ bp = sync + var->vsync_len;
+ disp = bp + var->upper_margin;
+ fp = disp + var->yres;
+ total = fp + var->lower_margin;
+
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_TOTAL,
+ GLAMO_LCD_HV_TOTAL_MASK, total);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_RETR_START,
+ GLAMO_LCD_HV_RETR_START_MASK, sync);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_RETR_END,
+ GLAMO_LCD_HV_RETR_END_MASK, bp);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_DISP_START,
+ GLAMO_LCD_HV_RETR_DISP_START_MASK, disp);
+ reg_set_bit_mask(gfb, GLAMO_REG_LCD_VERT_DISP_END,
+ GLAMO_LCD_HV_RETR_DISP_END_MASK, fp);
+
+ glamofb_cmd_mode(gfb, 0);
+
+ gfb->mode_set = 1;
+
+out_unlock:
+ dev_dbg(&gcore->pdev->dev,
+ "glamofb_program_mode spin_unlock_irqrestore\n");
+ spin_unlock_irqrestore(&gfb->lock_cmd, flags);
+}
+
+
+static int glamofb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return 0;
+}
+
+static struct fb_videomode *glamofb_find_mode(struct fb_info *info,
+ struct fb_var_screeninfo *var) {
+ struct glamofb_handle *glamo = info->par;
+ struct glamo_fb_platform_data *mach_info = glamo->mach_info;
+ struct fb_videomode *mode;
+ int i;
+
+ for(i = mach_info->num_modes, mode = mach_info->modes; i > 0; --i, ++mode) {
+ if (mode->xres == var->xres &&
+ mode->yres == var->yres)
+ return mode;
+ }
+
+ return NULL;
+}
+
+static int glamofb_set_par(struct fb_info *info)
+{
+ struct glamofb_handle *glamo = info->par;
+ struct fb_var_screeninfo *var = &info->var;
+ struct fb_videomode *mode;
+
+ if (glamo->core->suspending) {
+ dev_err(&glamo->core->pdev->dev,
+ "IGNORING glamofb_set_par while "
+ "suspended\n");
+ return -EBUSY;
+ }
+
+ mode = glamofb_find_mode(info, var);
+ if (!mode)
+ return -EINVAL;
+
+ fb_videomode_to_var(var, mode);
+
+ info->mode = mode;
+
+ glamo->mode_set = 0;
+
+ switch(var->rotate) {
+ case FB_ROTATE_CW:
+ case FB_ROTATE_CCW:
+ info->fix.line_length = (var->yres * var->bits_per_pixel) / 8;
+ /* FIXME: Limit pixelclock */
+ var->pixclock *= 2;
+ break;
+ default:
+ info->fix.line_length = (var->xres * var->bits_per_pixel) / 8;
+ break;
+ }
+
+ if(glamo->output_enabled)
+ glamofb_program_mode(glamo);
+
+ return 0;
+}
+
+static int glamofb_blank(int blank_mode, struct fb_info *info)
+{
+ struct glamofb_handle *gfb = info->par;
+
+ dev_dbg(gfb->dev, "glamofb_blank(%u)\n", blank_mode);
+
+ switch (blank_mode) {
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ /* FIXME: add pdata hook/flag to indicate whether
+ * we should already switch off pixel clock here */
+ break;
+ case FB_BLANK_POWERDOWN:
+ /* disable the pixel clock */
+ glamo_output_disable(gfb);
+ gfb->blank_mode = blank_mode;
+ break;
+ case FB_BLANK_UNBLANK:
+ case FB_BLANK_NORMAL:
+ glamo_output_enable(gfb);
+ gfb->blank_mode = blank_mode;
+ break;
+ }
+
+ /* FIXME: once we have proper clock management in glamo-core,
+ * we can determine if other units need MCLK1 or the PLL, and
+ * disable it if not used. */
+ return 0;
+}
+
+static inline unsigned int chan_to_field(unsigned int chan,
+ struct fb_bitfield *bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf->length;
+ return chan << bf->offset;
+}
+
+static int glamofb_setcolreg(unsigned regno,
+ unsigned red, unsigned green, unsigned blue,
+ unsigned transp, struct fb_info *info)
+{
+ struct glamofb_handle *glamo = info->par;
+ unsigned int val;
+
+ if (glamo->core->suspending) {
+ dev_err(&glamo->core->pdev->dev,
+ "IGNORING glamofb_set_par while "
+ "suspended\n");
+ return -EBUSY;
+ }
+
+ switch (glamo->fb->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ case FB_VISUAL_DIRECTCOLOR:
+ /* true-colour, use pseuo-palette */
+
+ if (regno < 16) {
+ u32 *pal = glamo->fb->pseudo_palette;
+
+ val = chan_to_field(red, &glamo->fb->var.red);
+ val |= chan_to_field(green, &glamo->fb->var.green);
+ val |= chan_to_field(blue, &glamo->fb->var.blue);
+
+ pal[regno] = val;
+ };
+ break;
+ default:
+ return 1; /* unknown type */
+ }
+
+ return 0;
+}
+
+static int glamofb_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg) {
+ struct glamofb_handle *gfb = (struct glamofb_handle*)info->par;
+ struct glamo_core *gcore = gfb->core;
+ int retval = -ENOTTY;
+
+ switch (cmd) {
+ case GLAMOFB_ENGINE_ENABLE:
+ retval = glamo_engine_enable(gcore, arg);
+ break;
+ case GLAMOFB_ENGINE_DISABLE:
+ retval = glamo_engine_disable(gcore, arg);
+ break;
+ case GLAMOFB_ENGINE_RESET:
+ glamo_engine_reset(gcore, arg);
+ retval = 0;
+ break;
+ default:
+ break;
+ }
+
+ return retval;
+}
+
+
+#ifdef CONFIG_MFD_GLAMO_HWACCEL
+static inline void glamofb_vsync_wait(struct glamofb_handle *glamo,
+ int line, int size, int range)
+{
+ int count[2];
+
+ do {
+ count[0] = reg_read(glamo, GLAMO_REG_LCD_STATUS2) & 0x3ff;
+ count[1] = reg_read(glamo, GLAMO_REG_LCD_STATUS2) & 0x3ff;
+ } while (count[0] != count[1] ||
+ (line < count[0] + range &&
+ size > count[0] - range) ||
+ count[0] < range * 2);
+}
+
+/*
+ * Enable/disable the hardware cursor mode altogether
+ * (for blinking and such, use glamofb_cursor()).
+ */
+static void glamofb_cursor_onoff(struct glamofb_handle *glamo, int on)
+{
+ int y, size;
+
+ if (glamo->cursor_on) {
+ y = reg_read(glamo, GLAMO_REG_LCD_CURSOR_Y_POS);
+ size = reg_read(glamo, GLAMO_REG_LCD_CURSOR_Y_SIZE);
+
+ glamofb_vsync_wait(glamo, y, size, 30);
+ }
+
+ reg_set_bit_mask(glamo, GLAMO_REG_LCD_MODE1,
+ GLAMO_LCD_MODE1_CURSOR_EN,
+ on ? GLAMO_LCD_MODE1_CURSOR_EN : 0);
+ glamo->cursor_on = on;
+
+ /* Hide the cursor by default */
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_X_SIZE, 0);
+}
+
+static int glamofb_cursor(struct fb_info *info, struct fb_cursor *cursor)
+{
+ struct glamofb_handle *glamo = info->par;
+ unsigned long flags;
+
+ spin_lock_irqsave(&glamo->lock_cmd, flags);
+
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_X_SIZE,
+ cursor->enable ? cursor->image.width : 0);
+
+ if (cursor->set & FB_CUR_SETPOS) {
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_X_POS,
+ cursor->image.dx);
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_Y_POS,
+ cursor->image.dy);
+ }
+
+ if (cursor->set & FB_CUR_SETCMAP) {
+ uint16_t fg = glamo->pseudo_pal[cursor->image.fg_color];
+ uint16_t bg = glamo->pseudo_pal[cursor->image.bg_color];
+
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_FG_COLOR, fg);
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_BG_COLOR, bg);
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_DST_COLOR, fg);
+ }
+
+ if (cursor->set & FB_CUR_SETHOT)
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_PRESET,
+ (cursor->hot.x << 8) | cursor->hot.y);
+
+ if ((cursor->set & FB_CUR_SETSIZE) ||
+ (cursor->set & (FB_CUR_SETIMAGE | FB_CUR_SETSHAPE))) {
+ int x, y, pitch, op;
+ const uint8_t *pcol = cursor->image.data;
+ const uint8_t *pmsk = cursor->mask;
+ uint8_t __iomem *dst = glamo->cursor_addr;
+ uint8_t dcol = 0;
+ uint8_t dmsk = 0;
+ uint8_t byte = 0;
+
+ if (cursor->image.depth > 1) {
+ spin_unlock_irqrestore(&glamo->lock_cmd, flags);
+ return -EINVAL;
+ }
+
+ pitch = ((cursor->image.width + 7) >> 2) & ~1;
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_PITCH,
+ pitch);
+ reg_write(glamo, GLAMO_REG_LCD_CURSOR_Y_SIZE,
+ cursor->image.height);
+
+ for (y = 0; y < cursor->image.height; y++) {
+ byte = 0;
+ for (x = 0; x < cursor->image.width; x++) {
+ if ((x % 8) == 0) {
+ dcol = *pcol++;
+ dmsk = *pmsk++;
+ } else {
+ dcol >>= 1;
+ dmsk >>= 1;
+ }
+
+ if (cursor->rop == ROP_COPY)
+ op = (dmsk & 1) ?
+ (dcol & 1) ? 1 : 3 : 0;
+ else
+ op = ((dmsk & 1) << 1) |
+ ((dcol & 1) << 0);
+ byte |= op << ((x & 3) << 1);
+
+ if (x % 4 == 3) {
+ writeb(byte, dst + x / 4);
+ byte = 0;
+ }
+ }
+ if (x % 4) {
+ writeb(byte, dst + x / 4);
+ byte = 0;
+ }
+
+ dst += pitch;
+ }
+ }
+
+ spin_unlock_irqrestore(&glamo->lock_cmd, flags);
+
+ return 0;
+}
+#endif
+
+static inline int glamofb_cmdq_empty(struct glamofb_handle *gfb)
+{
+ /* DGCMdQempty -- 1 == command queue is empty */
+ return reg_read(gfb, GLAMO_REG_LCD_STATUS1) & (1 << 15);
+}
+
+/* call holding gfb->lock_cmd when locking, until you unlock */
+int glamofb_cmd_mode(struct glamofb_handle *gfb, int on)
+{
+ int timeout = 2000000;
+
+ if (gfb->core->suspending) {
+ dev_err(&gfb->core->pdev->dev,
+ "IGNORING glamofb_cmd_mode while "
+ "suspended\n");
+ return -EBUSY;
+ }
+
+ dev_dbg(gfb->dev, "glamofb_cmd_mode(gfb=%p, on=%d)\n", gfb, on);
+ if (on) {
+ dev_dbg(gfb->dev, "%s: waiting for cmdq empty: ",
+ __func__);
+ while ((!glamofb_cmdq_empty(gfb)) && (timeout--))
+ cpu_relax();
+ if (timeout < 0) {
+ printk(KERN_ERR"*************"
+ "glamofb cmd_queue never got empty"
+ "*************\n");
+ return -EIO;
+ }
+ dev_dbg(gfb->dev, "empty!\n");
+
+ /* display the entire frame then switch to command */
+ reg_write(gfb, GLAMO_REG_LCD_COMMAND1,
+ GLAMO_LCD_CMD_TYPE_DISP |
+ GLAMO_LCD_CMD_DATA_FIRE_VSYNC);
+
+ /* wait until lcd idle */
+ dev_dbg(gfb->dev, "waiting for lcd idle: ");
+ timeout = 2000000;
+ while ((!reg_read(gfb, GLAMO_REG_LCD_STATUS2) & (1 << 12)) &&
+ (timeout--))
+ cpu_relax();
+ if (timeout < 0) {
+ printk(KERN_ERR"*************"
+ "glamofb lcd never idle"
+ "*************\n");
+ return -EIO;
+ }
+
+ mdelay(100);
+
+ dev_dbg(gfb->dev, "cmd mode entered\n");
+
+ } else {
+ /* RGB interface needs vsync/hsync */
+ if (reg_read(gfb, GLAMO_REG_LCD_MODE3) & GLAMO_LCD_MODE3_RGB)
+ reg_write(gfb, GLAMO_REG_LCD_COMMAND1,
+ GLAMO_LCD_CMD_TYPE_DISP |
+ GLAMO_LCD_CMD_DATA_DISP_SYNC);
+
+ reg_write(gfb, GLAMO_REG_LCD_COMMAND1,
+ GLAMO_LCD_CMD_TYPE_DISP |
+ GLAMO_LCD_CMD_DATA_DISP_FIRE);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(glamofb_cmd_mode);
+
+
+int glamofb_cmd_write(struct glamofb_handle *gfb, u_int16_t val)
+{
+ int timeout = 200000;
+
+ if (gfb->core->suspending) {
+ dev_err(&gfb->core->pdev->dev,
+ "IGNORING glamofb_cmd_write while "
+ "suspended\n");
+ return -EBUSY;
+ }
+
+ dev_dbg(gfb->dev, "%s: waiting for cmdq empty\n", __func__);
+ while ((!glamofb_cmdq_empty(gfb)) && (timeout--))
+ yield();
+ if (timeout < 0) {
+ printk(KERN_ERR"*************"
+ "glamofb cmd_queue never got empty"
+ "*************\n");
+ return 1;
+ }
+ dev_dbg(gfb->dev, "idle, writing 0x%04x\n", val);
+
+ reg_write(gfb, GLAMO_REG_LCD_COMMAND1, val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(glamofb_cmd_write);
+
+static struct fb_ops glamofb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = glamofb_check_var,
+ .fb_pan_display = glamofb_pan_display,
+ .fb_set_par = glamofb_set_par,
+ .fb_blank = glamofb_blank,
+ .fb_setcolreg = glamofb_setcolreg,
+ .fb_ioctl = glamofb_ioctl,
+#ifdef CONFIG_MFD_GLAMO_HWACCEL
+ .fb_cursor = glamofb_cursor,
+#endif
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+};
+
+static int glamofb_init_regs(struct glamofb_handle *glamo)
+{
+ struct fb_info *info = glamo->fb;
+
+ glamofb_check_var(&info->var, info);
+ glamofb_run_script(glamo, glamo_regs, ARRAY_SIZE(glamo_regs));
+ glamofb_set_par(info);
+
+ return 0;
+}
+
+static int __init glamofb_probe(struct platform_device *pdev)
+{
+ int rc = -EIO;
+ struct fb_info *fbinfo;
+ struct glamofb_handle *glamofb;
+ struct glamo_core *core = dev_get_drvdata(pdev->dev.parent);
+ struct glamo_fb_platform_data *mach_info;
+
+ printk(KERN_INFO "SMEDIA Glamo frame buffer driver (C) 2007 "
+ "Openmoko, Inc.\n");
+
+ if (!core->pdata || !core->pdata->fb_data)
+ return -ENOENT;
+
+
+ fbinfo = framebuffer_alloc(sizeof(struct glamofb_handle), &pdev->dev);
+ if (!fbinfo)
+ return -ENOMEM;
+
+
+ glamofb = fbinfo->par;
+ glamofb->fb = fbinfo;
+ glamofb->dev = &pdev->dev;
+
+ glamofb->blank_mode = FB_BLANK_POWERDOWN;
+
+ strcpy(fbinfo->fix.id, "SMedia Glamo");
+
+ glamofb->reg = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "glamo-fb-regs");
+ if (!glamofb->reg) {
+ dev_err(&pdev->dev, "platform device with no registers?\n");
+ rc = -ENOENT;
+ goto out_free;
+ }
+
+ glamofb->fb_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "glamo-fb-mem");
+ if (!glamofb->fb_res) {
+ dev_err(&pdev->dev, "platform device with no memory ?\n");
+ rc = -ENOENT;
+ goto out_free;
+ }
+
+ glamofb->reg = request_mem_region(glamofb->reg->start,
+ resource_size(glamofb->reg), pdev->name);
+ if (!glamofb->reg) {
+ dev_err(&pdev->dev, "failed to request mmio region\n");
+ goto out_free;
+ }
+
+ glamofb->fb_res = request_mem_region(glamofb->fb_res->start,
+ resource_size(glamofb->fb_res),
+ pdev->name);
+ if (!glamofb->fb_res) {
+ dev_err(&pdev->dev, "failed to request vram region\n");
+ goto out_release_reg;
+ }
+
+ /* we want to remap only the registers required for this core
+ * driver. */
+ glamofb->base = ioremap_nocache(glamofb->reg->start, resource_size(glamofb->reg));
+ if (!glamofb->base) {
+ dev_err(&pdev->dev, "failed to ioremap() mmio memory\n");
+ goto out_release_fb;
+ }
+
+ fbinfo->fix.smem_start = (unsigned long) glamofb->fb_res->start;
+ fbinfo->fix.smem_len = (__u32) resource_size(glamofb->fb_res);
+
+ fbinfo->screen_base = ioremap(glamofb->fb_res->start,
+ resource_size(glamofb->fb_res));
+ if (!fbinfo->screen_base) {
+ dev_err(&pdev->dev, "failed to ioremap() vram memory\n");
+ goto out_release_fb;
+ }
+ glamofb->cursor_addr = fbinfo->screen_base + 0x12C000;
+
+ platform_set_drvdata(pdev, glamofb);
+
+ mach_info = core->pdata->fb_data;
+ glamofb->core = core;
+ glamofb->mach_info = mach_info;
+
+ fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
+ fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
+ fbinfo->fix.type_aux = 0;
+ fbinfo->fix.xpanstep = 0;
+ fbinfo->fix.ypanstep = 0;
+ fbinfo->fix.ywrapstep = 0;
+ fbinfo->fix.accel = FB_ACCEL_GLAMO;
+
+
+ fbinfo->fbops = &glamofb_ops;
+ fbinfo->flags = FBINFO_FLAG_DEFAULT;
+ fbinfo->pseudo_palette = &glamofb->pseudo_pal;
+
+ fbinfo->mode = mach_info->modes;
+ fb_videomode_to_var(&fbinfo->var, fbinfo->mode);
+ fbinfo->var.bits_per_pixel = 16;
+ fbinfo->var.nonstd = 0;
+ fbinfo->var.activate = FB_ACTIVATE_NOW;
+ fbinfo->var.height = mach_info->height;
+ fbinfo->var.width = mach_info->width;
+ fbinfo->var.accel_flags = 0;
+ fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
+
+ glamo_engine_enable(core, GLAMO_ENGINE_LCD);
+ glamo_engine_reset(core, GLAMO_ENGINE_LCD);
+ glamofb->output_enabled = 1;
+ glamofb->mode_set = 1;
+
+ dev_info(&pdev->dev, "spin_lock_init\n");
+ spin_lock_init(&glamofb->lock_cmd);
+ glamofb_init_regs(glamofb);
+#ifdef CONFIG_MFD_GLAMO_HWACCEL
+ glamofb_cursor_onoff(glamofb, 1);
+#endif
+
+ fb_videomode_to_modelist(mach_info->modes, mach_info->num_modes,
+ &fbinfo->modelist);
+
+ rc = register_framebuffer(fbinfo);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "failed to register framebuffer\n");
+ goto out_unmap_fb;
+ }
+
+ printk(KERN_INFO "fb%d: %s frame buffer device\n",
+ fbinfo->node, fbinfo->fix.id);
+
+ return 0;
+
+out_unmap_fb:
+ iounmap(fbinfo->screen_base);
+ iounmap(glamofb->base);
+out_release_fb:
+ release_mem_region(glamofb->fb_res->start, resource_size(glamofb->fb_res));
+out_release_reg:
+ release_mem_region(glamofb->reg->start, resource_size(glamofb->reg));
+out_free:
+ framebuffer_release(fbinfo);
+ return rc;
+}
+
+static int glamofb_remove(struct platform_device *pdev)
+{
+ struct glamofb_handle *glamofb = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ iounmap(glamofb->base);
+ release_mem_region(glamofb->reg->start, resource_size(glamofb->reg));
+ kfree(glamofb);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int glamofb_suspend(struct device *dev)
+{
+ struct glamofb_handle *gfb = dev_get_drvdata(dev);
+
+ /* we need to stop anything touching our framebuffer */
+ fb_set_suspend(gfb->fb, 1);
+
+ /* seriously -- nobody is allowed to touch glamo memory when we
+ * are suspended or we lock on nWAIT
+ */
+ /* iounmap(gfb->fb->screen_base); */
+
+ return 0;
+}
+
+static int glamofb_resume(struct device *dev)
+{
+ struct glamofb_handle *gfb = dev_get_drvdata(dev);
+
+ /* OK let's allow framebuffer ops again */
+ /* gfb->fb->screen_base = ioremap(gfb->fb_res->start,
+ resource_size(gfb->fb_res)); */
+ glamo_engine_enable(gfb->core, GLAMO_ENGINE_LCD);
+ glamo_engine_reset(gfb->core, GLAMO_ENGINE_LCD);
+
+ glamofb_init_regs(gfb);
+#ifdef CONFIG_MFD_GLAMO_HWACCEL
+ glamofb_cursor_onoff(gfb, 1);
+#endif
+
+ fb_set_suspend(gfb->fb, 0);
+
+ return 0;
+}
+
+static struct dev_pm_ops glamofb_pm_ops = {
+ .suspend = glamofb_suspend,
+ .resume = glamofb_resume,
+};
+
+#define GLAMOFB_PM_OPS (&glamofb_pm_ops)
+
+#else
+#define GLAMOFB_PM_OPS NULL
+#endif
+
+static struct platform_driver glamofb_driver = {
+ .probe = glamofb_probe,
+ .remove = glamofb_remove,
+ .driver = {
+ .name = "glamo-fb",
+ .owner = THIS_MODULE,
+ .pm = GLAMOFB_PM_OPS
+ },
+};
+
+static int __devinit glamofb_init(void)
+{
+ return platform_driver_register(&glamofb_driver);
+}
+
+static void __exit glamofb_cleanup(void)
+{
+ platform_driver_unregister(&glamofb_driver);
+}
+
+module_init(glamofb_init);
+module_exit(glamofb_cleanup);
+
+MODULE_AUTHOR("Harald Welte <laforge@openmoko.org>");
+MODULE_DESCRIPTION("Smedia Glamo 336x/337x framebuffer driver");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-gpio.c b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-gpio.c
new file mode 100644
index 000000000..6978e6223
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-gpio.c
@@ -0,0 +1,281 @@
+/* Smedia Glamo 336x/337x gpio driver
+ *
+ * (C) 2009 Lars-Peter Clausen
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include <linux/gpio.h>
+#include <linux/mfd/glamo.h>
+
+#include "glamo-core.h"
+#include "glamo-regs.h"
+
+#define GLAMO_NR_GPIO 21
+#define GLAMO_NR_GPIO_REGS DIV_ROUND_UP(GLAMO_NR_GPIO, 4)
+
+#define GLAMO_REG_GPIO(x) (((x) * 2) + GLAMO_REG_GPIO_GEN1)
+
+struct glamo_gpio {
+ struct glamo_core *glamo;
+ struct gpio_chip chip;
+ uint16_t saved_regs[GLAMO_NR_GPIO_REGS];
+};
+
+#define REG_OF_GPIO(gpio) (GLAMO_REG_GPIO(gpio >> 2))
+#define NUM_OF_GPIO(gpio) (gpio & 0x3)
+#define DIRECTION_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 0))
+#define OUTPUT_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 4))
+#define INPUT_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 8))
+#define FUNC_BIT(gpio) (1 << (NUM_OF_GPIO(gpio) + 12))
+
+
+static inline struct glamo_core *chip_to_glamo(struct gpio_chip *chip)
+{
+ return container_of(chip, struct glamo_gpio, chip)->glamo;
+}
+
+static void glamo_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct glamo_core *glamo = chip_to_glamo(chip);
+ unsigned int reg = REG_OF_GPIO(offset);
+ u_int16_t tmp;
+
+ spin_lock(&glamo->lock);
+ tmp = readw(glamo->base + reg);
+ if (value)
+ tmp |= OUTPUT_BIT(offset);
+ else
+ tmp &= ~OUTPUT_BIT(offset);
+ writew(tmp, glamo->base + reg);
+ spin_unlock(&glamo->lock);
+}
+
+static int glamo_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct glamo_core *glamo = chip_to_glamo(chip);
+ return readw(glamo->base + REG_OF_GPIO(offset)) & INPUT_BIT(offset) ? 1 : 0;
+}
+
+static int glamo_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct glamo_core *glamo = chip_to_glamo(chip);
+ unsigned int reg = REG_OF_GPIO(offset);
+ u_int16_t tmp;
+
+ spin_lock(&glamo->lock);
+ tmp = readw(glamo->base + reg);
+ if ((tmp & FUNC_BIT(offset)) == 0) {
+ tmp |= FUNC_BIT(offset);
+ writew(tmp, glamo->base + reg);
+ }
+ spin_unlock(&glamo->lock);
+
+ return 0;
+}
+
+static void glamo_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct glamo_core *glamo = chip_to_glamo(chip);
+ unsigned int reg = REG_OF_GPIO(offset);
+ u_int16_t tmp;
+
+ spin_lock(&glamo->lock);
+ tmp = readw(glamo->base + reg);
+ if ((tmp & FUNC_BIT(offset)) == 1) {
+ tmp &= ~FUNC_BIT(offset);
+ writew(tmp, glamo->base + reg);
+ }
+ spin_unlock(&glamo->lock);
+}
+
+static int glamo_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct glamo_core *glamo = chip_to_glamo(chip);
+ unsigned int reg = REG_OF_GPIO(offset);
+ u_int16_t tmp;
+
+ spin_lock(&glamo->lock);
+ tmp = readw(glamo->base + reg);
+ tmp &= ~DIRECTION_BIT(offset);
+
+ if (value)
+ tmp |= OUTPUT_BIT(offset);
+ else
+ tmp &= ~OUTPUT_BIT(offset);
+
+ writew(tmp, glamo->base + reg);
+ spin_unlock(&glamo->lock);
+
+ return 0;
+}
+
+static int glamo_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct glamo_core *glamo = chip_to_glamo(chip);
+ unsigned int reg = REG_OF_GPIO(offset);
+ u_int16_t tmp;
+
+ spin_lock(&glamo->lock);
+ tmp = readw(glamo->base + reg);
+ if ((tmp & DIRECTION_BIT(offset)) == 0) {
+ tmp |= DIRECTION_BIT(offset);
+ writew(tmp, glamo->base + reg);
+ }
+ spin_unlock(&glamo->lock);
+
+ return 0;
+}
+
+static const struct __devinit gpio_chip glamo_gpio_chip = {
+ .label = "glamo",
+ .request = glamo_gpio_request,
+ .free = glamo_gpio_free,
+ .direction_input = glamo_gpio_direction_input,
+ .get = glamo_gpio_get,
+ .direction_output = glamo_gpio_direction_output,
+ .set = glamo_gpio_set,
+ .base = -1,
+ .ngpio = GLAMO_NR_GPIO,
+ .can_sleep = 0,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit glamo_gpio_probe(struct platform_device *pdev)
+{
+ struct glamo_platform_data *pdata = pdev->dev.parent->platform_data;
+ struct glamo_gpio *glamo_gpio;
+ int ret;
+
+ glamo_gpio = kzalloc(sizeof(struct glamo_gpio), GFP_KERNEL);
+ if (!glamo_gpio)
+ return -ENOMEM;
+
+ glamo_gpio->glamo = dev_get_drvdata(pdev->dev.parent);
+ glamo_gpio->chip = glamo_gpio_chip;
+ glamo_gpio->chip.dev = &pdev->dev;
+ if (pdata)
+ glamo_gpio->chip.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&glamo_gpio->chip);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register gpio chip: %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, glamo_gpio);
+ return 0;
+err:
+ kfree(glamo_gpio);
+ return ret;
+}
+
+static int __devexit glamo_gpio_remove(struct platform_device *pdev)
+{
+ struct glamo_gpio *glamo_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&glamo_gpio->chip);
+ if (!ret)
+ goto done;
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(glamo_gpio);
+
+done:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+
+static int glamo_gpio_suspend(struct device *dev)
+{
+ struct glamo_gpio *glamo_gpio = dev_get_drvdata(dev);
+ struct glamo_core *glamo = glamo_gpio->glamo;
+ uint16_t *saved_regs = glamo_gpio->saved_regs;
+ int i;
+
+ spin_lock(&glamo->lock);
+ for (i = 0; i < GLAMO_NR_GPIO / 4; ++i)
+ saved_regs[i] = readw(glamo->base + GLAMO_REG_GPIO(i));
+ spin_unlock(&glamo->lock);
+
+ return 0;
+}
+
+static int glamo_gpio_resume(struct device *dev)
+{
+ struct glamo_gpio *glamo_gpio = dev_get_drvdata(dev);
+ struct glamo_core *glamo = glamo_gpio->glamo;
+ uint16_t *saved_regs = glamo_gpio->saved_regs;
+ int i;
+
+ spin_lock(&glamo->lock);
+ for (i = 0; i < GLAMO_NR_GPIO_REGS; ++i)
+ writew(saved_regs[i], glamo->base + GLAMO_REG_GPIO(i));
+ spin_unlock(&glamo->lock);
+ return 0;
+}
+
+static const struct dev_pm_ops glamo_pm_ops = {
+ .suspend = glamo_gpio_suspend,
+ .resume = glamo_gpio_resume,
+ .freeze = glamo_gpio_suspend,
+ .thaw = glamo_gpio_resume,
+};
+
+#define GLAMO_GPIO_PM_OPS (&glamo_pm_ops)
+
+#else
+#define GLAMO_GPIO_PM_OPS NULL
+#endif
+
+static struct platform_driver glamo_gpio_driver = {
+ .driver = {
+ .name = "glamo-gpio",
+ .owner = THIS_MODULE,
+ .pm = GLAMO_GPIO_PM_OPS,
+ },
+ .probe = glamo_gpio_probe,
+ .remove = __devexit_p(glamo_gpio_remove),
+};
+
+static int __devinit glamo_gpio_init(void)
+{
+ return platform_driver_register(&glamo_gpio_driver);
+}
+
+static void __exit glamo_gpio_exit(void)
+{
+ platform_driver_unregister(&glamo_gpio_driver);
+}
+
+module_init(glamo_gpio_init);
+module_exit(glamo_gpio_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("GPIO interface for the Glamo multimedia device");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:glamo-gpio");
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-mci.c b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-mci.c
new file mode 100644
index 000000000..b728c8745
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-mci.c
@@ -0,0 +1,987 @@
+/*
+ * linux/drivers/mmc/host/glamo-mmc.c - Glamo MMC driver
+ *
+ * Copyright (C) 2007 Openmoko, Inc, Andy Green <andy@openmoko.com>
+ * Based on S3C MMC driver that was:
+ * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/crc7.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/glamo.h>
+
+#include "glamo-core.h"
+#include "glamo-regs.h"
+
+#define DRIVER_NAME "glamo-mci"
+
+struct glamo_mci_host {
+ struct glamo_mmc_platform_data *pdata;
+ struct platform_device *pdev;
+ struct glamo_core *core;
+ struct mmc_host *mmc;
+ struct resource *mmio_mem;
+ struct resource *data_mem;
+ void __iomem *mmio_base;
+ u16 __iomem *data_base;
+
+ struct regulator *regulator;
+ struct mmc_request *mrq;
+
+ unsigned int clk_rate;
+
+ unsigned short vdd;
+ char power_mode;
+
+ unsigned char request_counter;
+
+ struct timer_list disable_timer;
+
+ struct work_struct irq_work;
+ struct work_struct read_work;
+
+ unsigned clk_enabled : 1;
+
+};
+
+static void glamo_mci_send_request(struct mmc_host *mmc, struct mmc_request* mrq);
+static void glamo_mci_send_command(struct glamo_mci_host *host,
+ struct mmc_command *cmd);
+
+/*
+ * Max SD clock rate
+ *
+ * held at /(3 + 1) due to concerns of 100R recommended series resistor
+ * allows 16MHz @ 4-bit --> 8MBytes/sec raw
+ *
+ * you can override this on kernel commandline using
+ *
+ * glamo_mci.sd_max_clk=10000000
+ *
+ * for example
+ */
+
+static int sd_max_clk = 21000000;
+module_param(sd_max_clk, int, 0644);
+
+/*
+ * Slow SD clock rate
+ *
+ * you can override this on kernel commandline using
+ *
+ * glamo_mci.sd_slow_ratio=8
+ *
+ * for example
+ *
+ * platform callback is used to decide effective clock rate, if not
+ * defined then max is used, if defined and returns nonzero, rate is
+ * divided by this factor
+ */
+
+static int sd_slow_ratio = 8;
+module_param(sd_slow_ratio, int, 0644);
+
+/*
+ * Post-power SD clock rate
+ *
+ * you can override this on kernel commandline using
+ *
+ * glamo_mci.sd_post_power_clock=1000000
+ *
+ * for example
+ *
+ * After changing power to card, clock is held at this rate until first bulk
+ * transfer completes
+ */
+
+static int sd_post_power_clock = 1000000;
+module_param(sd_post_power_clock, int, 0644);
+
+
+static inline void glamo_reg_write(struct glamo_mci_host *glamo,
+ u_int16_t reg, u_int16_t val)
+{
+ writew(val, glamo->mmio_base + reg);
+}
+
+static inline u_int16_t glamo_reg_read(struct glamo_mci_host *glamo,
+ u_int16_t reg)
+{
+ return readw(glamo->mmio_base + reg);
+}
+
+static void glamo_reg_set_bit_mask(struct glamo_mci_host *glamo,
+ u_int16_t reg, u_int16_t mask,
+ u_int16_t val)
+{
+ u_int16_t tmp;
+
+ val &= mask;
+
+ tmp = glamo_reg_read(glamo, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ glamo_reg_write(glamo, reg, tmp);
+}
+
+static void glamo_mci_clock_disable(struct glamo_mci_host *host) {
+ if (host->clk_enabled) {
+ glamo_engine_div_disable(host->core, GLAMO_ENGINE_MMC);
+ host->clk_enabled = 0;
+ }
+}
+
+static void glamo_mci_clock_enable(struct glamo_mci_host *host) {
+ del_timer_sync(&host->disable_timer);
+
+ if (!host->clk_enabled) {
+ glamo_engine_div_enable(host->core, GLAMO_ENGINE_MMC);
+ host->clk_enabled = 1;
+ }
+}
+
+static void glamo_mci_disable_timer(unsigned long data) {
+ struct glamo_mci_host *host = (struct glamo_mci_host *)data;
+ glamo_mci_clock_disable(host);
+}
+
+
+static void do_pio_read(struct glamo_mci_host *host, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ u16 __iomem *from_ptr = host->data_base;
+ void *sg_pointer;
+
+ dev_dbg(&host->pdev->dev, "pio_read():\n");
+ for (sg = data->sg; sg; sg = sg_next(sg)) {
+ sg_pointer = page_address(sg_page(sg)) + sg->offset;
+
+
+ memcpy(sg_pointer, from_ptr, sg->length);
+ from_ptr += sg->length >> 1;
+
+ data->bytes_xfered += sg->length;
+ }
+
+ dev_dbg(&host->pdev->dev, "pio_read(): "
+ "complete (no more data).\n");
+}
+
+static void do_pio_write(struct glamo_mci_host *host, struct mmc_data *data)
+{
+ struct scatterlist *sg;
+ u16 __iomem *to_ptr = host->data_base;
+ void *sg_pointer;
+
+ dev_dbg(&host->pdev->dev, "pio_write():\n");
+ for (sg = data->sg; sg; sg = sg_next(sg)) {
+ sg_pointer = page_address(sg_page(sg)) + sg->offset;
+
+ data->bytes_xfered += sg->length;
+
+ memcpy(to_ptr, sg_pointer, sg->length);
+ to_ptr += sg->length >> 1;
+ }
+
+ dev_dbg(&host->pdev->dev, "pio_write(): complete\n");
+}
+
+static int glamo_mci_set_card_clock(struct glamo_mci_host *host, int freq)
+{
+ int real_rate = 0;
+
+ if (freq) {
+ glamo_mci_clock_enable(host);
+ real_rate = glamo_engine_reclock(host->core, GLAMO_ENGINE_MMC, freq);
+ } else {
+ glamo_mci_clock_disable(host);
+ }
+
+ return real_rate;
+}
+
+static void glamo_mci_request_done(struct glamo_mci_host *host, struct
+mmc_request *mrq) {
+ mod_timer(&host->disable_timer, jiffies + HZ / 16);
+ mmc_request_done(host->mmc, mrq);
+}
+
+
+static void glamo_mci_irq_worker(struct work_struct *work)
+{
+ struct glamo_mci_host *host = container_of(work, struct glamo_mci_host,
+ irq_work);
+ struct mmc_command *cmd;
+ uint16_t status;
+ if (!host->mrq || !host->mrq->cmd)
+ return;
+
+ cmd = host->mrq->cmd;
+
+#if 0
+ if (cmd->data->flags & MMC_DATA_READ) {
+ return;
+ }
+#endif
+
+ status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
+ dev_dbg(&host->pdev->dev, "status = 0x%04x\n", status);
+
+ /* we ignore a data timeout report if we are also told the data came */
+ if (status & GLAMO_STAT1_MMC_RB_DRDY)
+ status &= ~GLAMO_STAT1_MMC_DTOUT;
+
+ if (status & (GLAMO_STAT1_MMC_RTOUT | GLAMO_STAT1_MMC_DTOUT))
+ cmd->error = -ETIMEDOUT;
+ if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR)) {
+ cmd->error = -EILSEQ;
+ }
+ if (cmd->error) {
+ dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
+ goto done;
+ }
+
+ /* issue STOP if we have been given one to use */
+ if (host->mrq->stop) {
+ glamo_mci_send_command(host, host->mrq->stop);
+ }
+
+ if (cmd->data->flags & MMC_DATA_READ)
+ do_pio_read(host, cmd->data);
+
+done:
+ host->mrq = NULL;
+ glamo_mci_request_done(host, cmd->mrq);
+}
+
+static void glamo_mci_read_worker(struct work_struct *work)
+{
+ struct glamo_mci_host *host = container_of(work, struct glamo_mci_host,
+ read_work);
+ struct mmc_command *cmd;
+ uint16_t status;
+ uint16_t blocks_ready;
+ size_t data_read = 0;
+ size_t data_ready;
+ struct scatterlist *sg;
+ u16 __iomem *from_ptr = host->data_base;
+ void *sg_pointer;
+
+
+ cmd = host->mrq->cmd;
+ sg = cmd->data->sg;
+ do {
+ status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
+
+ if (status & (GLAMO_STAT1_MMC_RTOUT | GLAMO_STAT1_MMC_DTOUT))
+ cmd->error = -ETIMEDOUT;
+ if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR))
+ cmd->error = -EILSEQ;
+ if (cmd->error) {
+ dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n", status);
+ goto done;
+ }
+
+ blocks_ready = glamo_reg_read(host, GLAMO_REG_MMC_RB_BLKCNT);
+ data_ready = blocks_ready * cmd->data->blksz;
+
+ if (data_ready == data_read)
+ yield();
+
+ while(sg && data_read + sg->length <= data_ready) {
+ sg_pointer = page_address(sg_page(sg)) + sg->offset;
+ memcpy(sg_pointer, from_ptr, sg->length);
+ from_ptr += sg->length >> 1;
+
+ data_read += sg->length;
+ sg = sg_next(sg);
+ }
+
+ } while(sg);
+ cmd->data->bytes_xfered = data_read;
+
+ do {
+ status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
+ } while (!(status & GLAMO_STAT1_MMC_IDLE));
+
+ if (host->mrq->stop)
+ glamo_mci_send_command(host, host->mrq->stop);
+
+ do {
+ status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
+ } while (!(status & GLAMO_STAT1_MMC_IDLE));
+done:
+ host->mrq = NULL;
+ glamo_mci_request_done(host, cmd->mrq);
+}
+
+static irqreturn_t glamo_mci_irq(int irq, void *devid)
+{
+ struct glamo_mci_host *host = (struct glamo_mci_host*)devid;
+ schedule_work(&host->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void glamo_mci_send_command(struct glamo_mci_host *host,
+ struct mmc_command *cmd)
+{
+ u8 u8a[6];
+ u16 fire = 0;
+ unsigned int timeout = 1000000;
+ u16 * reg_resp = (u16 *)(host->mmio_base + GLAMO_REG_MMC_CMD_RSP1);
+ u16 status;
+ int triggers_int = 1;
+
+ /* if we can't do it, reject as busy */
+ if (!glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1) &
+ GLAMO_STAT1_MMC_IDLE) {
+ cmd->error = -EBUSY;
+ return;
+ }
+
+ /* create an array in wire order for CRC computation */
+ u8a[0] = 0x40 | (cmd->opcode & 0x3f);
+ u8a[1] = (u8)(cmd->arg >> 24);
+ u8a[2] = (u8)(cmd->arg >> 16);
+ u8a[3] = (u8)(cmd->arg >> 8);
+ u8a[4] = (u8)cmd->arg;
+ u8a[5] = (crc7(0, u8a, 5) << 1) | 0x01; /* crc7 on first 5 bytes of packet */
+
+ /* issue the wire-order array including CRC in register order */
+ glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG1, ((u8a[4] << 8) | u8a[5]));
+ glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG2, ((u8a[2] << 8) | u8a[3]));
+ glamo_reg_write(host, GLAMO_REG_MMC_CMD_REG3, ((u8a[0] << 8) | u8a[1]));
+
+ /* command index toggle */
+ fire |= (host->request_counter & 1) << 12;
+
+ /* set type of command */
+ switch (mmc_cmd_type(cmd)) {
+ case MMC_CMD_BC:
+ fire |= GLAMO_FIRE_MMC_CMDT_BNR;
+ break;
+ case MMC_CMD_BCR:
+ fire |= GLAMO_FIRE_MMC_CMDT_BR;
+ break;
+ case MMC_CMD_AC:
+ fire |= GLAMO_FIRE_MMC_CMDT_AND;
+ break;
+ case MMC_CMD_ADTC:
+ fire |= GLAMO_FIRE_MMC_CMDT_AD;
+ break;
+ }
+ /*
+ * if it expects a response, set the type expected
+ *
+ * R1, Length : 48bit, Normal response
+ * R1b, Length : 48bit, same R1, but added card busy status
+ * R2, Length : 136bit (really 128 bits with CRC snipped)
+ * R3, Length : 48bit (OCR register value)
+ * R4, Length : 48bit, SDIO_OP_CONDITION, Reverse SDIO Card
+ * R5, Length : 48bit, IO_RW_DIRECTION, Reverse SDIO Card
+ * R6, Length : 48bit (RCA register)
+ * R7, Length : 48bit (interface condition, VHS(voltage supplied),
+ * check pattern, CRC7)
+ */
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1: /* same index as R6 and R7 */
+ fire |= GLAMO_FIRE_MMC_RSPT_R1;
+ break;
+ case MMC_RSP_R1B:
+ fire |= GLAMO_FIRE_MMC_RSPT_R1b;
+ break;
+ case MMC_RSP_R2:
+ fire |= GLAMO_FIRE_MMC_RSPT_R2;
+ break;
+ case MMC_RSP_R3:
+ fire |= GLAMO_FIRE_MMC_RSPT_R3;
+ break;
+ /* R4 and R5 supported by chip not defined in linux/mmc/core.h (sdio) */
+ }
+ /*
+ * From the command index, set up the command class in the host ctrllr
+ *
+ * missing guys present on chip but couldn't figure out how to use yet:
+ * 0x0 "stream read"
+ * 0x9 "cancel running command"
+ */
+ switch (cmd->opcode) {
+ case MMC_READ_SINGLE_BLOCK:
+ fire |= GLAMO_FIRE_MMC_CC_SBR; /* single block read */
+ break;
+ case MMC_SWITCH: /* 64 byte payload */
+ case SD_APP_SEND_SCR:
+ case MMC_READ_MULTIPLE_BLOCK:
+ /* we will get an interrupt off this */
+ if (!cmd->mrq->stop)
+ /* multiblock no stop */
+ fire |= GLAMO_FIRE_MMC_CC_MBRNS;
+ else
+ /* multiblock with stop */
+ fire |= GLAMO_FIRE_MMC_CC_MBRS;
+ break;
+ case MMC_WRITE_BLOCK:
+ fire |= GLAMO_FIRE_MMC_CC_SBW; /* single block write */
+ break;
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ if (cmd->mrq->stop)
+ /* multiblock with stop */
+ fire |= GLAMO_FIRE_MMC_CC_MBWS;
+ else
+ /* multiblock NO stop-- 'RESERVED'? */
+ fire |= GLAMO_FIRE_MMC_CC_MBWNS;
+ break;
+ case MMC_STOP_TRANSMISSION:
+ fire |= GLAMO_FIRE_MMC_CC_STOP; /* STOP */
+ triggers_int = 0;
+ break;
+ default:
+ fire |= GLAMO_FIRE_MMC_CC_BASIC; /* "basic command" */
+ triggers_int = 0;
+ break;
+ }
+
+ if (cmd->data)
+ host->mrq = cmd->mrq;
+
+ /* always largest timeout */
+ glamo_reg_write(host, GLAMO_REG_MMC_TIMEOUT, 0xfff);
+
+ /* Generate interrupt on txfer */
+ glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC, 0xff36,
+ 0x0800 |
+ GLAMO_BASIC_MMC_NO_CLK_RD_WAIT |
+ GLAMO_BASIC_MMC_EN_COMPL_INT |
+ GLAMO_BASIC_MMC_EN_DATA_PUPS |
+ GLAMO_BASIC_MMC_EN_CMD_PUP);
+
+ /* send the command out on the wire */
+ /* dev_info(&host->pdev->dev, "Using FIRE %04X\n", fire); */
+ glamo_reg_write(host, GLAMO_REG_MMC_CMD_FIRE, fire);
+
+ /* we are deselecting card? because it isn't going to ack then... */
+ if ((cmd->opcode == 7) && (cmd->arg == 0))
+ return;
+
+ /*
+ * we must spin until response is ready or timed out
+ * -- we don't get interrupts unless there is a bulk rx
+ */
+ do
+ status = glamo_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
+ while (((((status >> 15) & 1) != (host->request_counter & 1)) ||
+ (!(status & (GLAMO_STAT1_MMC_RB_RRDY |
+ GLAMO_STAT1_MMC_RTOUT |
+ GLAMO_STAT1_MMC_DTOUT |
+ GLAMO_STAT1_MMC_BWERR |
+ GLAMO_STAT1_MMC_BRERR)))) && (timeout--));
+
+ if ((status & (GLAMO_STAT1_MMC_RTOUT |
+ GLAMO_STAT1_MMC_DTOUT)) ||
+ (timeout == 0)) {
+ cmd->error = -ETIMEDOUT;
+ } else if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR)) {
+ cmd->error = -EILSEQ;
+ }
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ cmd->resp[3] = readw(&reg_resp[0]) |
+ (readw(&reg_resp[1]) << 16);
+ cmd->resp[2] = readw(&reg_resp[2]) |
+ (readw(&reg_resp[3]) << 16);
+ cmd->resp[1] = readw(&reg_resp[4]) |
+ (readw(&reg_resp[5]) << 16);
+ cmd->resp[0] = readw(&reg_resp[6]) |
+ (readw(&reg_resp[7]) << 16);
+ } else {
+ cmd->resp[0] = (readw(&reg_resp[0]) >> 8) |
+ (readw(&reg_resp[1]) << 8) |
+ ((readw(&reg_resp[2])) << 24);
+ }
+ }
+
+#if 0
+ /* We'll only get an interrupt when all data has been transfered.
+ By starting to copy data when it's avaiable we can increase throughput by
+ up to 30%. */
+ if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
+ schedule_work(&host->read_work);
+#endif
+
+}
+
+static int glamo_mci_prepare_pio(struct glamo_mci_host *host,
+ struct mmc_data *data)
+{
+ /* set up the block info */
+ glamo_reg_write(host, GLAMO_REG_MMC_DATBLKLEN, data->blksz);
+ glamo_reg_write(host, GLAMO_REG_MMC_DATBLKCNT, data->blocks);
+
+ data->bytes_xfered = 0;
+
+ /* if write, prep the write into the shared RAM before the command */
+ if (data->flags & MMC_DATA_WRITE) {
+ do_pio_write(host, data);
+ }
+
+ dev_dbg(&host->pdev->dev, "(blksz=%d, count=%d)\n",
+ data->blksz, data->blocks);
+ return 0;
+}
+
+static int glamo_mci_irq_poll(struct glamo_mci_host *host,
+ struct mmc_command *cmd)
+{
+ int timeout = 1000000;
+ /*
+ * if the glamo INT# line isn't wired (*cough* it can happen)
+ * I'm afraid we have to spin on the IRQ status bit and "be
+ * our own INT# line"
+ */
+ /*
+ * we have faith we will get an "interrupt"...
+ * but something insane like suspend problems can mean
+ * we spin here forever, so we timeout after a LONG time
+ */
+ while ((!(readw(host->core->base +
+ GLAMO_REG_IRQ_STATUS) & GLAMO_IRQ_MMC)) &&
+ (timeout--));
+
+ if (timeout < 0) {
+ if (cmd->data->error)
+ cmd->data->error = -ETIMEDOUT;
+ dev_err(&host->pdev->dev, "Payload timeout\n");
+ return -ETIMEDOUT;
+ }
+ /* ack this interrupt source */
+ writew(GLAMO_IRQ_MMC, host->core->base +
+ GLAMO_REG_IRQ_CLEAR);
+
+ /* yay we are an interrupt controller! -- call the ISR
+ * it will stop clock to card
+ */
+ glamo_mci_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
+
+ return 0;
+}
+
+static void glamo_mci_send_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct glamo_mci_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ glamo_mci_clock_enable(host);
+ host->request_counter++;
+ if (cmd->data) {
+ if(glamo_mci_prepare_pio(host, cmd->data)) {
+ cmd->error = -EIO;
+ cmd->data->error = -EIO;
+ goto done;
+ }
+ }
+
+ dev_dbg(&host->pdev->dev,"cmd 0x%x, "
+ "arg 0x%x data=%p mrq->stop=%p flags 0x%x\n",
+ cmd->opcode, cmd->arg, cmd->data, cmd->mrq->stop,
+ cmd->flags);
+
+ glamo_mci_send_command(host, cmd);
+
+ /*
+ * if we don't have bulk data to take care of, we're done
+ */
+ if (!cmd->data || cmd->error)
+ goto done;
+
+
+ if (!host->core->irq_works) {
+ if (glamo_mci_irq_poll(host, mrq->cmd))
+ goto done;
+ }
+
+ /*
+ * Otherwise can can use the interrupt as async completion --
+ * if there is read data coming, or we wait for write data to complete,
+ * exit without mmc_request_done() as the payload interrupt
+ * will service it
+ */
+ dev_dbg(&host->pdev->dev, "Waiting for payload data\n");
+ return;
+done:
+ glamo_mci_request_done(host, mrq);
+}
+
+static void glamo_mci_set_power_mode(struct glamo_mci_host *host,
+ unsigned char power_mode) {
+ int ret;
+
+ if (power_mode == host->power_mode)
+ return;
+
+ switch(power_mode) {
+ case MMC_POWER_UP:
+ if (host->power_mode == MMC_POWER_OFF) {
+ ret = regulator_enable(host->regulator);
+ if (ret)
+ dev_err(&host->pdev->dev, "Failed to enable regulator: %d\n", ret);
+ }
+ break;
+ case MMC_POWER_ON:
+ break;
+ case MMC_POWER_OFF:
+ default:
+ glamo_engine_disable(host->core,
+ GLAMO_ENGINE_MMC);
+
+ ret = regulator_disable(host->regulator);
+ if (ret)
+ dev_warn(&host->pdev->dev, "Failed to disable regulator: %d\n", ret);
+ break;
+ }
+ host->power_mode = power_mode;
+}
+
+static void glamo_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct glamo_mci_host *host = mmc_priv(mmc);
+ int bus_width = 0;
+ int rate;
+ int sd_drive;
+ int ret;
+
+ /* Set power */
+ glamo_mci_set_power_mode(host, ios->power_mode);
+
+ if (host->vdd != ios->vdd) {
+ ret = mmc_regulator_set_ocr(host->regulator, ios->vdd);
+ if (ret)
+ dev_err(&host->pdev->dev, "Failed to set regulator voltage: %d\n", ret);
+ else
+ host->vdd = ios->vdd;
+ }
+ rate = glamo_mci_set_card_clock(host, ios->clock);
+
+ if ((ios->power_mode == MMC_POWER_ON) ||
+ (ios->power_mode == MMC_POWER_UP)) {
+ dev_info(&host->pdev->dev,
+ "powered (vdd = %hu) clk: %dkHz div=%hu (req: %ukHz). "
+ "Bus width=%d\n", ios->vdd,
+ rate / 1000, 0,
+ ios->clock / 1000, (int)ios->bus_width);
+ } else {
+ dev_info(&host->pdev->dev, "glamo_mci_set_ios: power down.\n");
+ }
+
+ /* set bus width */
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ bus_width = GLAMO_BASIC_MMC_EN_4BIT_DATA;
+
+ sd_drive = (rate * 4) / host->clk_rate;
+ if (sd_drive > 3)
+ sd_drive = 3;
+
+ glamo_reg_set_bit_mask(host, GLAMO_REG_MMC_BASIC,
+ GLAMO_BASIC_MMC_EN_4BIT_DATA | 0xb0,
+ bus_width | sd_drive << 6);
+}
+
+
+/*
+ * no physical write protect supported by us
+ */
+static int glamo_mci_get_ro(struct mmc_host *mmc)
+{
+ return 0;
+}
+
+static struct mmc_host_ops glamo_mci_ops = {
+ .request = glamo_mci_send_request,
+ .set_ios = glamo_mci_set_ios,
+ .get_ro = glamo_mci_get_ro,
+};
+
+static int glamo_mci_probe(struct platform_device *pdev)
+{
+ struct mmc_host *mmc;
+ struct glamo_mci_host *host;
+ struct glamo_core *core = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ dev_info(&pdev->dev, "glamo_mci driver (C)2007 Openmoko, Inc\n");
+
+ mmc = mmc_alloc_host(sizeof(struct glamo_mci_host), &pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto probe_out;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->pdev = pdev;
+ if (core->pdata)
+ host->pdata = core->pdata->mmc_data;
+ host->power_mode = MMC_POWER_OFF;
+ host->clk_enabled = 0;
+ host->core = core;
+
+ INIT_WORK(&host->irq_work, glamo_mci_irq_worker);
+ INIT_WORK(&host->read_work, glamo_mci_read_worker);
+
+ host->regulator = regulator_get(pdev->dev.parent, "SD_3V3");
+ if (!host->regulator) {
+ dev_err(&pdev->dev, "Cannot proceed without regulator.\n");
+ ret = -ENODEV;
+ goto probe_free_host;
+ }
+
+ host->mmio_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!host->mmio_mem) {
+ dev_err(&pdev->dev,
+ "failed to get io memory region resouce.\n");
+ ret = -ENOENT;
+ goto probe_regulator_put;
+ }
+
+ host->mmio_mem = request_mem_region(host->mmio_mem->start,
+ resource_size(host->mmio_mem),
+ pdev->name);
+
+ if (!host->mmio_mem) {
+ dev_err(&pdev->dev, "failed to request io memory region.\n");
+ ret = -ENOENT;
+ goto probe_regulator_put;
+ }
+
+ host->mmio_base = ioremap(host->mmio_mem->start,
+ resource_size(host->mmio_mem));
+ if (!host->mmio_base) {
+ dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
+ ret = -EINVAL;
+ goto probe_free_mem_region_mmio;
+ }
+
+
+ /* Get ahold of our data buffer we use for data in and out on MMC */
+ host->data_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!host->data_mem) {
+ dev_err(&pdev->dev,
+ "failed to get io memory region resource.\n");
+ ret = -ENOENT;
+ goto probe_iounmap_mmio;
+ }
+
+ host->data_mem = request_mem_region(host->data_mem->start,
+ resource_size(host->data_mem),
+ pdev->name);
+
+ if (!host->data_mem) {
+ dev_err(&pdev->dev, "failed to request io memory region.\n");
+ ret = -ENOENT;
+ goto probe_iounmap_mmio;
+ }
+ host->data_base = ioremap(host->data_mem->start,
+ resource_size(host->data_mem));
+
+ if (host->data_base == 0) {
+ dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
+ ret = -EINVAL;
+ goto probe_free_mem_region_data;
+ }
+
+ ret = request_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), glamo_mci_irq, IRQF_SHARED,
+ pdev->name, host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register irq.\n");
+ goto probe_iounmap_data;
+ }
+
+
+ host->vdd = 0;
+ host->clk_rate = glamo_pll_rate(host->core, GLAMO_PLL1);
+
+ /* explain our host controller capabilities */
+ mmc->ops = &glamo_mci_ops;
+ mmc->ocr_avail = mmc_regulator_get_ocrmask(host->regulator);
+ mmc->caps = MMC_CAP_4_BIT_DATA |
+ MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED;
+ mmc->f_min = host->clk_rate / 256;
+ mmc->f_max = sd_max_clk;
+
+ mmc->max_blk_count = (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
+ mmc->max_blk_size = (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
+ mmc->max_req_size = resource_size(host->data_mem);
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_phys_segs = 128;
+ mmc->max_hw_segs = 128;
+
+ if (mmc->ocr_avail < 0) {
+ dev_warn(&pdev->dev, "Failed to get ocr list for regulator: %d.\n",
+ mmc->ocr_avail);
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ }
+
+ platform_set_drvdata(pdev, mmc);
+
+ glamo_engine_enable(host->core, GLAMO_ENGINE_MMC);
+ glamo_engine_reset(host->core, GLAMO_ENGINE_MMC);
+
+ glamo_reg_write(host, GLAMO_REG_MMC_WDATADS1,
+ (u16)(host->data_mem->start));
+ glamo_reg_write(host, GLAMO_REG_MMC_WDATADS2,
+ (u16)(host->data_mem->start >> 16));
+
+ glamo_reg_write(host, GLAMO_REG_MMC_RDATADS1,
+ (u16)(host->data_mem->start));
+ glamo_reg_write(host, GLAMO_REG_MMC_RDATADS2,
+ (u16)(host->data_mem->start >> 16));
+
+ setup_timer(&host->disable_timer, glamo_mci_disable_timer,
+ (unsigned long)host);
+
+ if ((ret = mmc_add_host(mmc))) {
+ dev_err(&pdev->dev, "failed to add mmc host.\n");
+ goto probe_freeirq;
+ }
+
+ dev_info(&pdev->dev,"initialisation done.\n");
+ return 0;
+
+probe_freeirq:
+ free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
+probe_iounmap_data:
+ iounmap(host->data_base);
+probe_free_mem_region_data:
+ release_mem_region(host->data_mem->start, resource_size(host->data_mem));
+probe_iounmap_mmio:
+ iounmap(host->mmio_base);
+probe_free_mem_region_mmio:
+ release_mem_region(host->mmio_mem->start, resource_size(host->mmio_mem));
+probe_regulator_put:
+ regulator_put(host->regulator);
+probe_free_host:
+ mmc_free_host(mmc);
+probe_out:
+ return ret;
+}
+
+static int glamo_mci_remove(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct glamo_mci_host *host = mmc_priv(mmc);
+
+ free_irq(IRQ_GLAMO(GLAMO_IRQIDX_MMC), host);
+
+ mmc_remove_host(mmc);
+ iounmap(host->mmio_base);
+ iounmap(host->data_base);
+ release_mem_region(host->mmio_mem->start, resource_size(host->mmio_mem));
+ release_mem_region(host->data_mem->start, resource_size(host->data_mem));
+
+ regulator_put(host->regulator);
+
+ mmc_free_host(mmc);
+
+ glamo_engine_disable(host->core, GLAMO_ENGINE_MMC);
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int glamo_mci_suspend(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct glamo_mci_host *host = mmc_priv(mmc);
+ int ret;
+
+ cancel_work_sync(&host->irq_work);
+
+ ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
+ glamo_mci_clock_enable(host);
+
+ return ret;
+}
+
+static int glamo_mci_resume(struct device *dev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(dev);
+ struct glamo_mci_host *host = mmc_priv(mmc);
+ int ret;
+
+ glamo_engine_enable(host->core, GLAMO_ENGINE_MMC);
+ glamo_engine_reset(host->core, GLAMO_ENGINE_MMC);
+
+ glamo_reg_write(host, GLAMO_REG_MMC_WDATADS1,
+ (u16)(host->data_mem->start));
+ glamo_reg_write(host, GLAMO_REG_MMC_WDATADS2,
+ (u16)(host->data_mem->start >> 16));
+
+ glamo_reg_write(host, GLAMO_REG_MMC_RDATADS1,
+ (u16)(host->data_mem->start));
+ glamo_reg_write(host, GLAMO_REG_MMC_RDATADS2,
+ (u16)(host->data_mem->start >> 16));
+ mdelay(5);
+
+ ret = mmc_resume_host(host->mmc);
+/* glamo_mci_clock_disable(host);*/
+
+ return 0;
+}
+
+static struct dev_pm_ops glamo_mci_pm_ops = {
+ .suspend = glamo_mci_suspend,
+ .resume = glamo_mci_resume,
+};
+#define GLAMO_MCI_PM_OPS (&glamo_mci_pm_ops)
+
+#else /* CONFIG_PM */
+#define GLAMO_MCI_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+
+static struct platform_driver glamo_mci_driver =
+{
+ .probe = glamo_mci_probe,
+ .remove = glamo_mci_remove,
+ .driver = {
+ .name = "glamo-mci",
+ .owner = THIS_MODULE,
+ .pm = GLAMO_MCI_PM_OPS,
+ },
+};
+
+static int __init glamo_mci_init(void)
+{
+ platform_driver_register(&glamo_mci_driver);
+ return 0;
+}
+
+static void __exit glamo_mci_exit(void)
+{
+ platform_driver_unregister(&glamo_mci_driver);
+}
+
+module_init(glamo_mci_init);
+module_exit(glamo_mci_exit);
+
+MODULE_DESCRIPTION("Glamo MMC/SD Card Interface driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andy Green <andy@openmoko.com>");
diff --git a/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-regs.h b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-regs.h
new file mode 100644
index 000000000..2328b8ac9
--- /dev/null
+++ b/target/linux/s3c24xx/files-2.6.30/drivers/mfd/glamo/glamo-regs.h
@@ -0,0 +1,632 @@
+#ifndef _GLAMO_REGS_H
+#define _GLAMO_REGS_H
+
+/* Smedia Glamo 336x/337x driver
+ *
+ * (C) 2007 by Openmoko, Inc.
+ * Author: Harald Welte <laforge@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+enum glamo_regster_offsets {
+ GLAMO_REGOFS_GENERIC = 0x0000,
+ GLAMO_REGOFS_HOSTBUS = 0x0200,
+ GLAMO_REGOFS_MEMORY = 0x0300,
+ GLAMO_REGOFS_VIDCAP = 0x0400,
+ GLAMO_REGOFS_ISP = 0x0500,
+ GLAMO_REGOFS_JPEG = 0x0800,
+ GLAMO_REGOFS_MPEG = 0x0c00,
+ GLAMO_REGOFS_LCD = 0x1100,
+ GLAMO_REGOFS_MMC = 0x1400,
+ GLAMO_REGOFS_MPROC0 = 0x1500,
+ GLAMO_REGOFS_MPROC1 = 0x1580,
+ GLAMO_REGOFS_CMDQUEUE = 0x1600,
+ GLAMO_REGOFS_RISC = 0x1680,
+ GLAMO_REGOFS_2D = 0x1700,
+ GLAMO_REGOFS_3D = 0x1b00,
+ GLAMO_REGOFS_END = 0x2400,
+};
+
+
+enum glamo_register_generic {
+ GLAMO_REG_GCONF1 = 0x0000,
+ GLAMO_REG_GCONF2 = 0x0002,
+#define GLAMO_REG_DEVICE_ID GLAMO_REG_GCONF2
+ GLAMO_REG_GCONF3 = 0x0004,
+#define GLAMO_REG_REVISION_ID GLAMO_REG_GCONF3
+ GLAMO_REG_IRQ_GEN1 = 0x0006,
+#define GLAMO_REG_IRQ_ENABLE GLAMO_REG_IRQ_GEN1
+ GLAMO_REG_IRQ_GEN2 = 0x0008,
+#define GLAMO_REG_IRQ_SET GLAMO_REG_IRQ_GEN2
+ GLAMO_REG_IRQ_GEN3 = 0x000a,
+#define GLAMO_REG_IRQ_CLEAR GLAMO_REG_IRQ_GEN3
+ GLAMO_REG_IRQ_GEN4 = 0x000c,
+#define GLAMO_REG_IRQ_STATUS GLAMO_REG_IRQ_GEN4
+ GLAMO_REG_CLOCK_HOST = 0x0010,
+ GLAMO_REG_CLOCK_MEMORY = 0x0012,
+ GLAMO_REG_CLOCK_LCD = 0x0014,
+ GLAMO_REG_CLOCK_MMC = 0x0016,
+ GLAMO_REG_CLOCK_ISP = 0x0018,
+ GLAMO_REG_CLOCK_JPEG = 0x001a,
+ GLAMO_REG_CLOCK_3D = 0x001c,
+ GLAMO_REG_CLOCK_2D = 0x001e,
+ GLAMO_REG_CLOCK_RISC1 = 0x0020, /* 3365 only? */
+ GLAMO_REG_CLOCK_RISC2 = 0x0022, /* 3365 only? */
+ GLAMO_REG_CLOCK_MPEG = 0x0024,
+ GLAMO_REG_CLOCK_MPROC = 0x0026,
+
+ GLAMO_REG_CLOCK_GEN5_1 = 0x0030,
+ GLAMO_REG_CLOCK_GEN5_2 = 0x0032,
+ GLAMO_REG_CLOCK_GEN6 = 0x0034,
+ GLAMO_REG_CLOCK_GEN7 = 0x0036,
+ GLAMO_REG_CLOCK_GEN8 = 0x0038,
+ GLAMO_REG_CLOCK_GEN9 = 0x003a,
+ GLAMO_REG_CLOCK_GEN10 = 0x003c,
+ GLAMO_REG_CLOCK_GEN11 = 0x003e,
+ GLAMO_REG_PLL_GEN1 = 0x0040,
+ GLAMO_REG_PLL_GEN2 = 0x0042,
+ GLAMO_REG_PLL_GEN3 = 0x0044,
+ GLAMO_REG_PLL_GEN4 = 0x0046,
+ GLAMO_REG_PLL_GEN5 = 0x0048,
+ GLAMO_REG_GPIO_GEN1 = 0x0050,
+ GLAMO_REG_GPIO_GEN2 = 0x0052,
+ GLAMO_REG_GPIO_GEN3 = 0x0054,
+ GLAMO_REG_GPIO_GEN4 = 0x0056,
+ GLAMO_REG_GPIO_GEN5 = 0x0058,
+ GLAMO_REG_GPIO_GEN6 = 0x005a,
+ GLAMO_REG_GPIO_GEN7 = 0x005c,
+ GLAMO_REG_GPIO_GEN8 = 0x005e,
+ GLAMO_REG_GPIO_GEN9 = 0x0060,
+ GLAMO_REG_GPIO_GEN10 = 0x0062,
+ GLAMO_REG_DFT_GEN1 = 0x0070,
+ GLAMO_REG_DFT_GEN2 = 0x0072,
+ GLAMO_REG_DFT_GEN3 = 0x0074,
+ GLAMO_REG_DFT_GEN4 = 0x0076,
+
+ GLAMO_REG_DFT_GEN5 = 0x01e0,
+ GLAMO_REG_DFT_GEN6 = 0x01f0,
+};
+
+#define GLAMO_REG_HOSTBUS(x) (GLAMO_REGOFS_HOSTBUS-2+(x*2))
+
+#define REG_MEM(x) (GLAMO_REGOFS_MEMORY+(x))
+#define GLAMO_REG_MEM_TIMING(x) (GLAMO_REG_MEM_TIMING1-2+(x*2))
+
+enum glamo_register_mem {
+ GLAMO_REG_MEM_TYPE = REG_MEM(0x00),
+ GLAMO_REG_MEM_GEN = REG_MEM(0x02),
+ GLAMO_REG_MEM_TIMING1 = REG_MEM(0x04),
+ GLAMO_REG_MEM_TIMING2 = REG_MEM(0x06),
+ GLAMO_REG_MEM_TIMING3 = REG_MEM(0x08),
+ GLAMO_REG_MEM_TIMING4 = REG_MEM(0x0a),
+ GLAMO_REG_MEM_TIMING5 = REG_MEM(0x0c),
+ GLAMO_REG_MEM_TIMING6 = REG_MEM(0x0e),
+ GLAMO_REG_MEM_TIMING7 = REG_MEM(0x10),
+ GLAMO_REG_MEM_TIMING8 = REG_MEM(0x12),
+ GLAMO_REG_MEM_TIMING9 = REG_MEM(0x14),
+ GLAMO_REG_MEM_TIMING10 = REG_MEM(0x16),
+ GLAMO_REG_MEM_TIMING11 = REG_MEM(0x18),
+ GLAMO_REG_MEM_POWER1 = REG_MEM(0x1a),
+ GLAMO_REG_MEM_POWER2 = REG_MEM(0x1c),
+ GLAMO_REG_MEM_LCD_BUF1 = REG_MEM(0x1e),
+ GLAMO_REG_MEM_LCD_BUF2 = REG_MEM(0x20),
+ GLAMO_REG_MEM_LCD_BUF3 = REG_MEM(0x22),
+ GLAMO_REG_MEM_LCD_BUF4 = REG_MEM(0x24),
+ GLAMO_REG_MEM_BIST1 = REG_MEM(0x26),
+ GLAMO_REG_MEM_BIST2 = REG_MEM(0x28),
+ GLAMO_REG_MEM_BIST3 = REG_MEM(0x2a),
+ GLAMO_REG_MEM_BIST4 = REG_MEM(0x2c),
+ GLAMO_REG_MEM_BIST5 = REG_MEM(0x2e),
+ GLAMO_REG_MEM_MAH1 = REG_MEM(0x30),
+ GLAMO_REG_MEM_MAH2 = REG_MEM(0x32),
+ GLAMO_REG_MEM_DRAM1 = REG_MEM(0x34),
+ GLAMO_REG_MEM_DRAM2 = REG_MEM(0x36),
+ GLAMO_REG_MEM_CRC = REG_MEM(0x38),
+};
+
+#define GLAMO_MEM_TYPE_MASK 0x03
+
+enum glamo_reg_mem_dram1 {
+ /* b0 - b10 == refresh period, 1 -> 2048 clocks */
+ GLAMO_MEM_DRAM1_EN_GATE_CLK = (1 << 11),
+ GLAMO_MEM_DRAM1_SELF_REFRESH = (1 << 12),
+ GLAMO_MEM_DRAM1_EN_GATE_CKE = (1 << 13),
+ GLAMO_MEM_DRAM1_EN_DRAM_REFRESH = (1 << 14),
+ GLAMO_MEM_DRAM1_EN_MODEREG_SET = (1 << 15),
+};
+
+enum glamo_reg_mem_dram2 {
+ GLAMO_MEM_DRAM2_DEEP_PWRDOWN = (1 << 12),
+};
+
+enum glamo_irq_index {
+ GLAMO_IRQIDX_HOSTBUS = 0,
+ GLAMO_IRQIDX_JPEG = 1,
+ GLAMO_IRQIDX_MPEG = 2,
+ GLAMO_IRQIDX_MPROC1 = 3,
+ GLAMO_IRQIDX_MPROC0 = 4,
+ GLAMO_IRQIDX_CMDQUEUE = 5,
+ GLAMO_IRQIDX_2D = 6,
+ GLAMO_IRQIDX_MMC = 7,
+ GLAMO_IRQIDX_RISC = 8,
+};
+
+enum glamo_irq {
+ GLAMO_IRQ_HOSTBUS = (1 << GLAMO_IRQIDX_HOSTBUS),
+ GLAMO_IRQ_JPEG = (1 << GLAMO_IRQIDX_JPEG),
+ GLAMO_IRQ_MPEG = (1 << GLAMO_IRQIDX_MPEG),
+ GLAMO_IRQ_MPROC1 = (1 << GLAMO_IRQIDX_MPROC1),
+ GLAMO_IRQ_MPROC0 = (1 << GLAMO_IRQIDX_MPROC0),
+ GLAMO_IRQ_CMDQUEUE = (1 << GLAMO_IRQIDX_CMDQUEUE),
+ GLAMO_IRQ_2D = (1 << GLAMO_IRQIDX_2D),
+ GLAMO_IRQ_MMC = (1 << GLAMO_IRQIDX_MMC),
+ GLAMO_IRQ_RISC = (1 << GLAMO_IRQIDX_RISC),
+};
+
+enum glamo_reg_clock_host {
+ GLAMO_CLOCK_HOST_DG_BCLK = 0x0001,
+ GLAMO_CLOCK_HOST_DG_M0CLK = 0x0004,
+ GLAMO_CLOCK_HOST_RESET = 0x1000,
+};
+
+enum glamo_reg_clock_mem {
+ GLAMO_CLOCK_MEM_DG_M1CLK = 0x0001,
+ GLAMO_CLOCK_MEM_EN_M1CLK = 0x0002,
+ GLAMO_CLOCK_MEM_DG_MOCACLK = 0x0004,
+ GLAMO_CLOCK_MEM_EN_MOCACLK = 0x0008,
+ GLAMO_CLOCK_MEM_RESET = 0x1000,
+ GLAMO_CLOCK_MOCA_RESET = 0x2000,
+};
+
+enum glamo_reg_clock_lcd {
+ GLAMO_CLOCK_LCD_DG_DCLK = 0x0001,
+ GLAMO_CLOCK_LCD_EN_DCLK = 0x0002,
+ GLAMO_CLOCK_LCD_DG_DMCLK = 0x0004,
+ GLAMO_CLOCK_LCD_EN_DMCLK = 0x0008,
+ //
+ GLAMO_CLOCK_LCD_EN_DHCLK = 0x0020,
+ GLAMO_CLOCK_LCD_DG_M5CLK = 0x0040,
+ GLAMO_CLOCK_LCD_EN_M5CLK = 0x0080,
+ GLAMO_CLOCK_LCD_RESET = 0x1000,
+};
+
+enum glamo_reg_clock_mmc {
+ GLAMO_CLOCK_MMC_DG_TCLK = 0x0001,
+ GLAMO_CLOCK_MMC_EN_TCLK = 0x0002,
+ GLAMO_CLOCK_MMC_DG_M9CLK = 0x0004,
+ GLAMO_CLOCK_MMC_EN_M9CLK = 0x0008,
+ GLAMO_CLOCK_MMC_RESET = 0x1000,
+};
+
+enum glamo_reg_basic_mmc {
+ /* set to disable CRC error rejection */
+ GLAMO_BASIC_MMC_DISABLE_CRC = 0x0001,
+ /* enable completion interrupt */
+ GLAMO_BASIC_MMC_EN_COMPL_INT = 0x0002,
+ /* stop MMC clock while enforced idle waiting for data from card */
+ GLAMO_BASIC_MMC_NO_CLK_RD_WAIT = 0x0004,
+ /* 0 = 1-bit bus to card, 1 = use 4-bit bus (has to be negotiated) */
+ GLAMO_BASIC_MMC_EN_4BIT_DATA = 0x0008,
+ /* enable 75K pullups on D3..D0 */
+ GLAMO_BASIC_MMC_EN_DATA_PUPS = 0x0010,
+ /* enable 75K pullup on CMD */
+ GLAMO_BASIC_MMC_EN_CMD_PUP = 0x0020,
+ /* IO drive strength 00=weak -> 11=strongest */
+ GLAMO_BASIC_MMC_EN_DR_STR0 = 0x0040,
+ GLAMO_BASIC_MMC_EN_DR_STR1 = 0x0080,
+ /* TCLK delay stage A, 0000 = 500ps --> 1111 = 8ns */
+ GLAMO_BASIC_MMC_EN_TCLK_DLYA0 = 0x0100,
+ GLAMO_BASIC_MMC_EN_TCLK_DLYA1 = 0x0200,
+ GLAMO_BASIC_MMC_EN_TCLK_DLYA2 = 0x0400,
+ GLAMO_BASIC_MMC_EN_TCLK_DLYA3 = 0x0800,
+ /* TCLK delay stage B (cumulative), 0000 = 500ps --> 1111 = 8ns */
+ GLAMO_BASIC_MMC_EN_TCLK_DLYB0 = 0x1000,
+ GLAMO_BASIC_MMC_EN_TCLK_DLYB1 = 0x2000,
+ GLAMO_BASIC_MMC_EN_TCLK_DLYB2 = 0x4000,
+ GLAMO_BASIC_MMC_EN_TCLK_DLYB3 = 0x8000,
+};
+
+enum glamo_reg_stat1_mmc {
+ /* command "counter" (really: toggle) */
+ GLAMO_STAT1_MMC_CMD_CTR = 0x8000,
+ /* engine is idle */
+ GLAMO_STAT1_MMC_IDLE = 0x4000,
+ /* readback response is ready */
+ GLAMO_STAT1_MMC_RB_RRDY = 0x0200,
+ /* readback data is ready */
+ GLAMO_STAT1_MMC_RB_DRDY = 0x0100,
+ /* no response timeout */
+ GLAMO_STAT1_MMC_RTOUT = 0x0020,
+ /* no data timeout */
+ GLAMO_STAT1_MMC_DTOUT = 0x0010,
+ /* CRC error on block write */
+ GLAMO_STAT1_MMC_BWERR = 0x0004,
+ /* CRC error on block read */
+ GLAMO_STAT1_MMC_BRERR = 0x0002
+};
+
+enum glamo_reg_fire_mmc {
+ /* command "counter" (really: toggle)
+ * the STAT1 register reflects this so you can ensure you don't look
+ * at status for previous command
+ */
+ GLAMO_FIRE_MMC_CMD_CTR = 0x8000,
+ /* sets kind of response expected */
+ GLAMO_FIRE_MMC_RES_MASK = 0x0700,
+ /* sets command type */
+ GLAMO_FIRE_MMC_TYP_MASK = 0x00C0,
+ /* sets command class */
+ GLAMO_FIRE_MMC_CLS_MASK = 0x000F,
+};
+
+enum glamo_fire_mmc_response_types {
+ GLAMO_FIRE_MMC_RSPT_R1 = 0x0000,
+ GLAMO_FIRE_MMC_RSPT_R1b = 0x0100,
+ GLAMO_FIRE_MMC_RSPT_R2 = 0x0200,
+ GLAMO_FIRE_MMC_RSPT_R3 = 0x0300,
+ GLAMO_FIRE_MMC_RSPT_R4 = 0x0400,
+ GLAMO_FIRE_MMC_RSPT_R5 = 0x0500,
+};
+
+enum glamo_fire_mmc_command_types {
+ /* broadcast, no response */
+ GLAMO_FIRE_MMC_CMDT_BNR = 0x0000,
+ /* broadcast, with response */
+ GLAMO_FIRE_MMC_CMDT_BR = 0x0040,
+ /* addressed, no data */
+ GLAMO_FIRE_MMC_CMDT_AND = 0x0080,
+ /* addressed, with data */
+ GLAMO_FIRE_MMC_CMDT_AD = 0x00C0,
+};
+
+enum glamo_fire_mmc_command_class {
+ /* "Stream Read" */
+ GLAMO_FIRE_MMC_CC_STRR = 0x0000,
+ /* Single Block Read */
+ GLAMO_FIRE_MMC_CC_SBR = 0x0001,
+ /* Multiple Block Read With Stop */
+ GLAMO_FIRE_MMC_CC_MBRS = 0x0002,
+ /* Multiple Block Read No Stop */
+ GLAMO_FIRE_MMC_CC_MBRNS = 0x0003,
+ /* RESERVED for "Stream Write" */
+ GLAMO_FIRE_MMC_CC_STRW = 0x0004,
+ /* "Stream Write" */
+ GLAMO_FIRE_MMC_CC_SBW = 0x0005,
+ /* RESERVED for Multiple Block Write With Stop */
+ GLAMO_FIRE_MMC_CC_MBWS = 0x0006,
+ /* Multiple Block Write No Stop */
+ GLAMO_FIRE_MMC_CC_MBWNS = 0x0007,
+ /* STOP command */
+ GLAMO_FIRE_MMC_CC_STOP = 0x0008,
+ /* Cancel on Running Command */
+ GLAMO_FIRE_MMC_CC_CANCL = 0x0009,
+ /* "Basic Command" */
+ GLAMO_FIRE_MMC_CC_BASIC = 0x000a,
+};
+
+/* these are offsets from the start of the MMC register region */
+enum glamo_register_mmc {
+ /* MMC command, b15..8 = cmd arg b7..0; b7..1 = CRC; b0 = end bit */
+ GLAMO_REG_MMC_CMD_REG1 = 0x00,
+ /* MMC command, b15..0 = cmd arg b23 .. 8 */
+ GLAMO_REG_MMC_CMD_REG2 = 0x02,
+ /* MMC command, b15=start, b14=transmission,
+ * b13..8=cmd idx, b7..0=cmd arg b31..24
+ */
+ GLAMO_REG_MMC_CMD_REG3 = 0x04,
+ GLAMO_REG_MMC_CMD_FIRE = 0x06,
+ GLAMO_REG_MMC_CMD_RSP1 = 0x10,
+ GLAMO_REG_MMC_CMD_RSP2 = 0x12,
+ GLAMO_REG_MMC_CMD_RSP3 = 0x14,
+ GLAMO_REG_MMC_CMD_RSP4 = 0x16,
+ GLAMO_REG_MMC_CMD_RSP5 = 0x18,
+ GLAMO_REG_MMC_CMD_RSP6 = 0x1a,
+ GLAMO_REG_MMC_CMD_RSP7 = 0x1c,
+ GLAMO_REG_MMC_CMD_RSP8 = 0x1e,
+ GLAMO_REG_MMC_RB_STAT1 = 0x20,
+ GLAMO_REG_MMC_RB_BLKCNT = 0x22,
+ GLAMO_REG_MMC_RB_BLKLEN = 0x24,
+ GLAMO_REG_MMC_BASIC = 0x30,
+ GLAMO_REG_MMC_RDATADS1 = 0x34,
+ GLAMO_REG_MMC_RDATADS2 = 0x36,
+ GLAMO_REG_MMC_WDATADS1 = 0x38,
+ GLAMO_REG_MMC_WDATADS2 = 0x3a,
+ GLAMO_REG_MMC_DATBLKCNT = 0x3c,
+ GLAMO_REG_MMC_DATBLKLEN = 0x3e,
+ GLAMO_REG_MMC_TIMEOUT = 0x40,
+
+};
+
+enum glamo_reg_clock_isp {
+ GLAMO_CLOCK_ISP_DG_I1CLK = 0x0001,
+ GLAMO_CLOCK_ISP_EN_I1CLK = 0x0002,
+ GLAMO_CLOCK_ISP_DG_CCLK = 0x0004,
+ GLAMO_CLOCK_ISP_EN_CCLK = 0x0008,
+ //
+ GLAMO_CLOCK_ISP_EN_SCLK = 0x0020,
+ GLAMO_CLOCK_ISP_DG_M2CLK = 0x0040,
+ GLAMO_CLOCK_ISP_EN_M2CLK = 0x0080,
+ GLAMO_CLOCK_ISP_DG_M15CLK = 0x0100,
+ GLAMO_CLOCK_ISP_EN_M15CLK = 0x0200,
+ GLAMO_CLOCK_ISP1_RESET = 0x1000,
+ GLAMO_CLOCK_ISP2_RESET = 0x2000,
+};
+
+enum glamo_reg_clock_jpeg {
+ GLAMO_CLOCK_JPEG_DG_JCLK = 0x0001,
+ GLAMO_CLOCK_JPEG_EN_JCLK = 0x0002,
+ GLAMO_CLOCK_JPEG_DG_M3CLK = 0x0004,
+ GLAMO_CLOCK_JPEG_EN_M3CLK = 0x0008,
+ GLAMO_CLOCK_JPEG_RESET = 0x1000,
+};
+
+enum glamo_reg_clock_2d {
+ GLAMO_CLOCK_2D_DG_GCLK = 0x0001,
+ GLAMO_CLOCK_2D_EN_GCLK = 0x0002,
+ GLAMO_CLOCK_2D_DG_M7CLK = 0x0004,
+ GLAMO_CLOCK_2D_EN_M7CLK = 0x0008,
+ GLAMO_CLOCK_2D_DG_M6CLK = 0x0010,
+ GLAMO_CLOCK_2D_EN_M6CLK = 0x0020,
+ GLAMO_CLOCK_2D_RESET = 0x1000,
+ GLAMO_CLOCK_2D_CQ_RESET = 0x2000,
+};
+
+enum glamo_reg_clock_3d {
+ GLAMO_CLOCK_3D_DG_ECLK = 0x0001,
+ GLAMO_CLOCK_3D_EN_ECLK = 0x0002,
+ GLAMO_CLOCK_3D_DG_RCLK = 0x0004,
+ GLAMO_CLOCK_3D_EN_RCLK = 0x0008,
+ GLAMO_CLOCK_3D_DG_M8CLK = 0x0010,
+ GLAMO_CLOCK_3D_EN_M8CLK = 0x0020,
+ GLAMO_CLOCK_3D_BACK_RESET = 0x1000,
+ GLAMO_CLOCK_3D_FRONT_RESET = 0x2000,
+};
+
+enum glamo_reg_clock_mpeg {
+ GLAMO_CLOCK_MPEG_DG_X0CLK = 0x0001,
+ GLAMO_CLOCK_MPEG_EN_X0CLK = 0x0002,
+ GLAMO_CLOCK_MPEG_DG_X1CLK = 0x0004,
+ GLAMO_CLOCK_MPEG_EN_X1CLK = 0x0008,
+ GLAMO_CLOCK_MPEG_DG_X2CLK = 0x0010,
+ GLAMO_CLOCK_MPEG_EN_X2CLK = 0x0020,
+ GLAMO_CLOCK_MPEG_DG_X3CLK = 0x0040,
+ GLAMO_CLOCK_MPEG_EN_X3CLK = 0x0080,
+ GLAMO_CLOCK_MPEG_DG_X4CLK = 0x0100,
+ GLAMO_CLOCK_MPEG_EN_X4CLK = 0x0200,
+ GLAMO_CLOCK_MPEG_DG_X6CLK = 0x0400,
+ GLAMO_CLOCK_MPEG_EN_X6CLK = 0x0800,
+ GLAMO_CLOCK_MPEG_ENC_RESET = 0x1000,
+ GLAMO_CLOCK_MPEG_DEC_RESET = 0x2000,
+};
+
+enum glamo_reg_clock51 {
+ GLAMO_CLOCK_GEN51_EN_DIV_MCLK = 0x0001,
+ GLAMO_CLOCK_GEN51_EN_DIV_SCLK = 0x0002,
+ GLAMO_CLOCK_GEN51_EN_DIV_JCLK = 0x0004,
+ GLAMO_CLOCK_GEN51_EN_DIV_DCLK = 0x0008,
+ GLAMO_CLOCK_GEN51_EN_DIV_DMCLK = 0x0010,
+ GLAMO_CLOCK_GEN51_EN_DIV_DHCLK = 0x0020,
+ GLAMO_CLOCK_GEN51_EN_DIV_GCLK = 0x0040,
+ GLAMO_CLOCK_GEN51_EN_DIV_TCLK = 0x0080,
+ /* FIXME: higher bits */
+};
+
+enum glamo_reg_hostbus2 {
+ GLAMO_HOSTBUS2_MMIO_EN_ISP = 0x0001,
+ GLAMO_HOSTBUS2_MMIO_EN_JPEG = 0x0002,
+ GLAMO_HOSTBUS2_MMIO_EN_MPEG = 0x0004,
+ GLAMO_HOSTBUS2_MMIO_EN_LCD = 0x0008,
+ GLAMO_HOSTBUS2_MMIO_EN_MMC = 0x0010,
+ GLAMO_HOSTBUS2_MMIO_EN_MICROP0 = 0x0020,
+ GLAMO_HOSTBUS2_MMIO_EN_MICROP1 = 0x0040,
+ GLAMO_HOSTBUS2_MMIO_EN_CQ = 0x0080,
+ GLAMO_HOSTBUS2_MMIO_EN_RISC = 0x0100,
+ GLAMO_HOSTBUS2_MMIO_EN_2D = 0x0200,
+ GLAMO_HOSTBUS2_MMIO_EN_3D = 0x0400,
+};
+
+/* LCD Controller */
+
+#define REG_LCD(x) (x)
+enum glamo_reg_lcd {
+ GLAMO_REG_LCD_MODE1 = REG_LCD(0x00),
+ GLAMO_REG_LCD_MODE2 = REG_LCD(0x02),
+ GLAMO_REG_LCD_MODE3 = REG_LCD(0x04),
+ GLAMO_REG_LCD_WIDTH = REG_LCD(0x06),
+ GLAMO_REG_LCD_HEIGHT = REG_LCD(0x08),
+ GLAMO_REG_LCD_POLARITY = REG_LCD(0x0a),
+ GLAMO_REG_LCD_A_BASE1 = REG_LCD(0x0c),
+ GLAMO_REG_LCD_A_BASE2 = REG_LCD(0x0e),
+ GLAMO_REG_LCD_B_BASE1 = REG_LCD(0x10),
+ GLAMO_REG_LCD_B_BASE2 = REG_LCD(0x12),
+ GLAMO_REG_LCD_C_BASE1 = REG_LCD(0x14),
+ GLAMO_REG_LCD_C_BASE2 = REG_LCD(0x16),
+ GLAMO_REG_LCD_PITCH = REG_LCD(0x18),
+ /* RES */
+ GLAMO_REG_LCD_HORIZ_TOTAL = REG_LCD(0x1c),
+ /* RES */
+ GLAMO_REG_LCD_HORIZ_RETR_START = REG_LCD(0x20),
+ /* RES */
+ GLAMO_REG_LCD_HORIZ_RETR_END = REG_LCD(0x24),
+ /* RES */
+ GLAMO_REG_LCD_HORIZ_DISP_START = REG_LCD(0x28),
+ /* RES */
+ GLAMO_REG_LCD_HORIZ_DISP_END = REG_LCD(0x2c),
+ /* RES */
+ GLAMO_REG_LCD_VERT_TOTAL = REG_LCD(0x30),
+ /* RES */
+ GLAMO_REG_LCD_VERT_RETR_START = REG_LCD(0x34),
+ /* RES */
+ GLAMO_REG_LCD_VERT_RETR_END = REG_LCD(0x38),
+ /* RES */
+ GLAMO_REG_LCD_VERT_DISP_START = REG_LCD(0x3c),
+ /* RES */
+ GLAMO_REG_LCD_VERT_DISP_END = REG_LCD(0x40),
+ /* RES */
+ GLAMO_REG_LCD_POL = REG_LCD(0x44),
+ GLAMO_REG_LCD_DATA_START = REG_LCD(0x46),
+ GLAMO_REG_LCD_FRATE_CONTRO = REG_LCD(0x48),
+ GLAMO_REG_LCD_DATA_CMD_HDR = REG_LCD(0x4a),
+ GLAMO_REG_LCD_SP_START = REG_LCD(0x4c),
+ GLAMO_REG_LCD_SP_END = REG_LCD(0x4e),
+ GLAMO_REG_LCD_CURSOR_BASE1 = REG_LCD(0x50),
+ GLAMO_REG_LCD_CURSOR_BASE2 = REG_LCD(0x52),
+ GLAMO_REG_LCD_CURSOR_PITCH = REG_LCD(0x54),
+ GLAMO_REG_LCD_CURSOR_X_SIZE = REG_LCD(0x56),
+ GLAMO_REG_LCD_CURSOR_Y_SIZE = REG_LCD(0x58),
+ GLAMO_REG_LCD_CURSOR_X_POS = REG_LCD(0x5a),
+ GLAMO_REG_LCD_CURSOR_Y_POS = REG_LCD(0x5c),
+ GLAMO_REG_LCD_CURSOR_PRESET = REG_LCD(0x5e),
+ GLAMO_REG_LCD_CURSOR_FG_COLOR = REG_LCD(0x60),
+ /* RES */
+ GLAMO_REG_LCD_CURSOR_BG_COLOR = REG_LCD(0x64),
+ /* RES */
+ GLAMO_REG_LCD_CURSOR_DST_COLOR = REG_LCD(0x68),
+ /* RES */
+ GLAMO_REG_LCD_STATUS1 = REG_LCD(0x80),
+ GLAMO_REG_LCD_STATUS2 = REG_LCD(0x82),
+ GLAMO_REG_LCD_STATUS3 = REG_LCD(0x84),
+ GLAMO_REG_LCD_STATUS4 = REG_LCD(0x86),
+ /* RES */
+ GLAMO_REG_LCD_COMMAND1 = REG_LCD(0xa0),
+ GLAMO_REG_LCD_COMMAND2 = REG_LCD(0xa2),
+ /* RES */
+ GLAMO_REG_LCD_WFORM_DELAY1 = REG_LCD(0xb0),
+ GLAMO_REG_LCD_WFORM_DELAY2 = REG_LCD(0xb2),
+ /* RES */
+ GLAMO_REG_LCD_GAMMA_CORR = REG_LCD(0x100),
+ /* RES */
+ GLAMO_REG_LCD_GAMMA_R_ENTRY01 = REG_LCD(0x110),
+ GLAMO_REG_LCD_GAMMA_R_ENTRY23 = REG_LCD(0x112),
+ GLAMO_REG_LCD_GAMMA_R_ENTRY45 = REG_LCD(0x114),
+ GLAMO_REG_LCD_GAMMA_R_ENTRY67 = REG_LCD(0x116),
+ GLAMO_REG_LCD_GAMMA_R_ENTRY8 = REG_LCD(0x118),
+ /* RES */
+ GLAMO_REG_LCD_GAMMA_G_ENTRY01 = REG_LCD(0x130),
+ GLAMO_REG_LCD_GAMMA_G_ENTRY23 = REG_LCD(0x132),
+ GLAMO_REG_LCD_GAMMA_G_ENTRY45 = REG_LCD(0x134),
+ GLAMO_REG_LCD_GAMMA_G_ENTRY67 = REG_LCD(0x136),
+ GLAMO_REG_LCD_GAMMA_G_ENTRY8 = REG_LCD(0x138),
+ /* RES */
+ GLAMO_REG_LCD_GAMMA_B_ENTRY01 = REG_LCD(0x150),
+ GLAMO_REG_LCD_GAMMA_B_ENTRY23 = REG_LCD(0x152),
+ GLAMO_REG_LCD_GAMMA_B_ENTRY45 = REG_LCD(0x154),
+ GLAMO_REG_LCD_GAMMA_B_ENTRY67 = REG_LCD(0x156),
+ GLAMO_REG_LCD_GAMMA_B_ENTRY8 = REG_LCD(0x158),
+ /* RES */
+ GLAMO_REG_LCD_SRAM_DRIVING1 = REG_LCD(0x160),
+ GLAMO_REG_LCD_SRAM_DRIVING2 = REG_LCD(0x162),
+ GLAMO_REG_LCD_SRAM_DRIVING3 = REG_LCD(0x164),
+};
+
+enum glamo_reg_lcd_mode1 {
+ GLAMO_LCD_MODE1_PWRSAVE = 0x0001,
+ GLAMO_LCD_MODE1_PARTIAL_PRT = 0x0002,
+ GLAMO_LCD_MODE1_HWFLIP = 0x0004,
+ GLAMO_LCD_MODE1_LCD2 = 0x0008,
+ /* RES */
+ GLAMO_LCD_MODE1_PARTIAL_MODE = 0x0020,
+ GLAMO_LCD_MODE1_CURSOR_DSTCOLOR = 0x0040,
+ GLAMO_LCD_MODE1_PARTIAL_ENABLE = 0x0080,
+ GLAMO_LCD_MODE1_TVCLK_IN_ENABLE = 0x0100,
+ GLAMO_LCD_MODE1_HSYNC_HIGH_ACT = 0x0200,
+ GLAMO_LCD_MODE1_VSYNC_HIGH_ACT = 0x0400,
+ GLAMO_LCD_MODE1_HSYNC_FLIP = 0x0800,
+ GLAMO_LCD_MODE1_GAMMA_COR_EN = 0x1000,
+ GLAMO_LCD_MODE1_DITHER_EN = 0x2000,
+ GLAMO_LCD_MODE1_CURSOR_EN = 0x4000,
+ GLAMO_LCD_MODE1_ROTATE_EN = 0x8000,
+};
+
+enum glamo_reg_lcd_mode2 {
+ GLAMO_LCD_MODE2_CRC_CHECK_EN = 0x0001,
+ GLAMO_LCD_MODE2_DCMD_PER_LINE = 0x0002,
+ GLAMO_LCD_MODE2_NOUSE_BDEF = 0x0004,
+ GLAMO_LCD_MODE2_OUT_POS_MODE = 0x0008,
+ GLAMO_LCD_MODE2_FRATE_CTRL_EN = 0x0010,
+ GLAMO_LCD_MODE2_SINGLE_BUFFER = 0x0020,
+ GLAMO_LCD_MODE2_SER_LSB_TO_MSB = 0x0040,
+ /* FIXME */
+};
+
+enum glamo_reg_lcd_mode3 {
+ /* LCD color source data format */
+ GLAMO_LCD_SRC_RGB565 = 0x0000,
+ GLAMO_LCD_SRC_ARGB1555 = 0x4000,
+ GLAMO_LCD_SRC_ARGB4444 = 0x8000,
+ /* interface type */
+ GLAMO_LCD_MODE3_LCD = 0x1000,
+ GLAMO_LCD_MODE3_RGB = 0x0800,
+ GLAMO_LCD_MODE3_CPU = 0x0000,
+ /* mode */
+ GLAMO_LCD_MODE3_RGB332 = 0x0000,
+ GLAMO_LCD_MODE3_RGB444 = 0x0100,
+ GLAMO_LCD_MODE3_RGB565 = 0x0200,
+ GLAMO_LCD_MODE3_RGB666 = 0x0300,
+ /* depth */
+ GLAMO_LCD_MODE3_6BITS = 0x0000,
+ GLAMO_LCD_MODE3_8BITS = 0x0010,
+ GLAMO_LCD_MODE3_9BITS = 0x0020,
+ GLAMO_LCD_MODE3_16BITS = 0x0030,
+ GLAMO_LCD_MODE3_18BITS = 0x0040,
+};
+
+enum glamo_lcd_rot_mode {
+ GLAMO_LCD_ROT_MODE_0 = 0x0000,
+ GLAMO_LCD_ROT_MODE_180 = 0x2000,
+ GLAMO_LCD_ROT_MODE_MIRROR = 0x4000,
+ GLAMO_LCD_ROT_MODE_FLIP = 0x6000,
+ GLAMO_LCD_ROT_MODE_90 = 0x8000,
+ GLAMO_LCD_ROT_MODE_270 = 0xa000,
+};
+#define GLAMO_LCD_ROT_MODE_MASK 0xe000
+
+enum glamo_lcd_cmd_type {
+ GLAMO_LCD_CMD_TYPE_DISP = 0x0000,
+ GLAMO_LCD_CMD_TYPE_PARALLEL = 0x4000,
+ GLAMO_LCD_CMD_TYPE_SERIAL = 0x8000,
+ GLAMO_LCD_CMD_TYPE_SERIAL_DIRECT= 0xc000,
+};
+#define GLAMO_LCD_CMD_TYPE_MASK 0xc000
+
+enum glamo_lcd_cmds {
+ GLAMO_LCD_CMD_DATA_DISP_FIRE = 0x00,
+ GLAMO_LCD_CMD_DATA_DISP_SYNC = 0x01, /* RGB only */
+ /* switch to command mode, no display */
+ GLAMO_LCD_CMD_DATA_FIRE_NO_DISP = 0x02,
+ /* display until VSYNC, switch to command */
+ GLAMO_LCD_CMD_DATA_FIRE_VSYNC = 0x11,
+ /* display until HSYNC, switch to command */
+ GLAMO_LCD_CMD_DATA_FIRE_HSYNC = 0x12,
+ /* display until VSYNC, 1 black frame, VSYNC, switch to command */
+ GLAMO_LCD_CMD_DATA_FIRE_VSYNC_B = 0x13,
+ /* don't care about display and switch to command */
+ GLAMO_LCD_CMD_DATA_FIRE_FREE = 0x14, /* RGB only */
+ /* don't care about display, keep data display but disable data,
+ * and switch to command */
+ GLAMO_LCD_CMD_DATA_FIRE_FREE_D = 0x15, /* RGB only */
+};
+
+enum glamo_core_revisions {
+ GLAMO_CORE_REV_A0 = 0x0000,
+ GLAMO_CORE_REV_A1 = 0x0001,
+ GLAMO_CORE_REV_A2 = 0x0002,
+ GLAMO_CORE_REV_A3 = 0x0003,
+};
+
+#endif /* _GLAMO_REGS_H */