aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ubicom32/files/arch/ubicom32/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/ubicom32/files/arch/ubicom32/kernel')
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/Makefile64
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/asm-offsets.c161
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/devtree.c173
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/dma.c60
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/flat.c206
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/head.S273
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/init_task.c65
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/irq.c597
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/ldsr.c1185
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/module.c463
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/os_node.c88
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/process.c634
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/processor.c348
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/ptrace.c275
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/semaphore.c159
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/setup.c194
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/signal.c458
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/smp.c806
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/stacktrace.c244
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/sys_ubicom32.c237
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/syscalltable.S376
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/thread.c228
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/time.c212
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/timer_broadcast.c102
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/timer_device.c301
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/timer_tick.c109
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/topology.c47
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/traps.c514
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/uaccess.c109
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_context_switch.S359
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_ksyms.c98
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_syscall.S694
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/unaligned_trap.c698
-rw-r--r--target/linux/ubicom32/files/arch/ubicom32/kernel/vmlinux.lds.S370
34 files changed, 10907 insertions, 0 deletions
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/Makefile b/target/linux/ubicom32/files/arch/ubicom32/kernel/Makefile
new file mode 100644
index 000000000..6294fa2ea
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/Makefile
@@ -0,0 +1,64 @@
+#
+# arch/ubicom32/kernel/Makefile
+# Main Makefile for the Ubicom32 arch directory.
+#
+# (C) Copyright 2009, Ubicom, Inc.
+#
+# This file is part of the Ubicom32 Linux Kernel Port.
+#
+# The Ubicom32 Linux Kernel Port is free software: you can redistribute
+# it and/or modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation, either version 2 of the
+# License, or (at your option) any later version.
+#
+# The Ubicom32 Linux Kernel Port is distributed in the hope that it
+# will be useful, but WITHOUT ANY WARRANTY; without even the implied
+# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with the Ubicom32 Linux Kernel Port. If not,
+# see <http://www.gnu.org/licenses/>.
+#
+# Ubicom32 implementation derived from (with many thanks):
+# arch/m68knommu
+# arch/blackfin
+# arch/parisc
+#
+
+extra-y := head.o vmlinux.lds
+
+obj-y += \
+ devtree.o \
+ dma.o \
+ flat.o \
+ init_task.o \
+ irq.o \
+ ldsr.o \
+ os_node.o \
+ process.o \
+ processor.o \
+ ptrace.o \
+ setup.o \
+ signal.o \
+ stacktrace.o \
+ sys_ubicom32.o \
+ syscalltable.o \
+ thread.o \
+ time.o \
+ traps.o \
+ ubicom32_context_switch.o \
+ ubicom32_ksyms.o \
+ ubicom32_syscall.o \
+ unaligned_trap.o
+
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_COMEMPCI) += comempci.o
+obj-$(CONFIG_SMP) += smp.o topology.o
+obj-$(CONFIG_ACCESS_OK_CHECKS_ENABLED) += uaccess.o
+obj-$(CONFIG_GENERIC_CLOCKEVENTS) += timer_device.o
+obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += timer_broadcast.o
+
+ifndef CONFIG_GENERIC_CLOCKEVENTS
+obj-y += timer_tick.o
+endif
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/asm-offsets.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/asm-offsets.c
new file mode 100644
index 000000000..639a536a1
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/asm-offsets.c
@@ -0,0 +1,161 @@
+/*
+ * arch/ubicom32/kernel/asm-offsets.c
+ * Ubicom32 architecture definitions needed by assembly language modules.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/thread_info.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+int main(void)
+{
+ /* offsets into the task struct */
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+ /* offsets into the kernel_stat struct */
+// DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
+
+ /* offsets into the irq_cpustat_t struct */
+ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
+
+ /* offsets into the thread struct */
+ DEFINE(THREAD_D10, offsetof(struct thread_struct, d10));
+ DEFINE(THREAD_D11, offsetof(struct thread_struct, d11));
+ DEFINE(THREAD_D12, offsetof(struct thread_struct, d12));
+ DEFINE(THREAD_D13, offsetof(struct thread_struct, d13));
+ DEFINE(THREAD_A1, offsetof(struct thread_struct, a1));
+ DEFINE(THREAD_A2, offsetof(struct thread_struct, a2));
+ DEFINE(THREAD_A5, offsetof(struct thread_struct, a5));
+ DEFINE(THREAD_A6, offsetof(struct thread_struct, a6));
+ DEFINE(THREAD_SP, offsetof(struct thread_struct, sp));
+
+ /* offsets into the pt_regs */
+ DEFINE(PT_D0, offsetof(struct pt_regs, dn[0]));
+ DEFINE(PT_D1, offsetof(struct pt_regs, dn[1]));
+ DEFINE(PT_D2, offsetof(struct pt_regs, dn[2]));
+ DEFINE(PT_D3, offsetof(struct pt_regs, dn[3]));
+ DEFINE(PT_D4, offsetof(struct pt_regs, dn[4]));
+ DEFINE(PT_D5, offsetof(struct pt_regs, dn[5]));
+ DEFINE(PT_D6, offsetof(struct pt_regs, dn[6]));
+ DEFINE(PT_D7, offsetof(struct pt_regs, dn[7]));
+ DEFINE(PT_D8, offsetof(struct pt_regs, dn[8]));
+ DEFINE(PT_D9, offsetof(struct pt_regs, dn[9]));
+ DEFINE(PT_D10, offsetof(struct pt_regs, dn[10]));
+ DEFINE(PT_D11, offsetof(struct pt_regs, dn[11]));
+ DEFINE(PT_D12, offsetof(struct pt_regs, dn[12]));
+ DEFINE(PT_D13, offsetof(struct pt_regs, dn[13]));
+ DEFINE(PT_D14, offsetof(struct pt_regs, dn[14]));
+ DEFINE(PT_D15, offsetof(struct pt_regs, dn[15]));
+ DEFINE(PT_A0, offsetof(struct pt_regs, an[0]));
+ DEFINE(PT_A1, offsetof(struct pt_regs, an[1]));
+ DEFINE(PT_A2, offsetof(struct pt_regs, an[2]));
+ DEFINE(PT_A3, offsetof(struct pt_regs, an[3]));
+ DEFINE(PT_A4, offsetof(struct pt_regs, an[4]));
+ DEFINE(PT_A5, offsetof(struct pt_regs, an[5]));
+ DEFINE(PT_A6, offsetof(struct pt_regs, an[6]));
+ DEFINE(PT_A7, offsetof(struct pt_regs, an[7]));
+ DEFINE(PT_SP, offsetof(struct pt_regs, an[7]));
+
+ DEFINE(PT_ACC0HI, offsetof(struct pt_regs, acc0[0]));
+ DEFINE(PT_ACC0LO, offsetof(struct pt_regs, acc0[1]));
+ DEFINE(PT_MAC_RC16, offsetof(struct pt_regs, mac_rc16));
+
+ DEFINE(PT_ACC1HI, offsetof(struct pt_regs, acc1[0]));
+ DEFINE(PT_ACC1LO, offsetof(struct pt_regs, acc1[1]));
+
+ DEFINE(PT_SOURCE3, offsetof(struct pt_regs, source3));
+ DEFINE(PT_INST_CNT, offsetof(struct pt_regs, inst_cnt));
+ DEFINE(PT_CSR, offsetof(struct pt_regs, csr));
+ DEFINE(PT_DUMMY_UNUSED, offsetof(struct pt_regs, dummy_unused));
+
+ DEFINE(PT_INT_MASK0, offsetof(struct pt_regs, int_mask0));
+ DEFINE(PT_INT_MASK1, offsetof(struct pt_regs, int_mask1));
+
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+
+ DEFINE(PT_TRAP_CAUSE, offsetof(struct pt_regs, trap_cause));
+
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+
+ DEFINE(PT_FRAME_TYPE, offsetof(struct pt_regs, frame_type));
+
+ DEFINE(PT_ORIGINAL_D0, offsetof(struct pt_regs, original_dn_0));
+ DEFINE(PT_PREVIOUS_PC, offsetof(struct pt_regs, previous_pc));
+
+ /* offsets into the kernel_stat struct */
+// DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
+
+ /* signal defines */
+ DEFINE(SIGSEGV, SIGSEGV);
+ //DEFINE(SEGV_MAPERR, SEGV_MAPERR);
+ DEFINE(SIGTRAP, SIGTRAP);
+ //DEFINE(TRAP_TRACE, TRAP_TRACE);
+
+ DEFINE(PT_PTRACED, PT_PTRACED);
+ DEFINE(PT_DTRACE, PT_DTRACE);
+
+ DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
+
+ /* Offsets in thread_info structure */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_INTR_NESTING, offsetof(struct thread_info, interrupt_nesting));
+ DEFINE(ASM_TIF_NEED_RESCHED, TIF_NEED_RESCHED);
+ DEFINE(ASM_TIF_SYSCALL_TRACE, TIF_SYSCALL_TRACE);
+ DEFINE(ASM_TIF_SIGPENDING, TIF_SIGPENDING);
+
+ return 0;
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/devtree.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/devtree.c
new file mode 100644
index 000000000..1f824d2f1
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/devtree.c
@@ -0,0 +1,173 @@
+/*
+ * arch/ubicom32/kernel/devtree.c
+ * Ubicom32 architecture device tree implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <asm/devtree.h>
+
+/*
+ * The device tree.
+ */
+struct devtree_node *devtree;
+
+/*
+ * devtree_print()
+ * Print the device tree.
+ */
+void devtree_print(void)
+{
+ struct devtree_node *p = devtree;
+ printk(KERN_INFO "Device Tree:\n");
+ while (p) {
+ if (p->magic != DEVTREE_NODE_MAGIC) {
+ printk(KERN_EMERG
+ "device tree has improper node: %p\n", p);
+ return;
+ }
+ printk(KERN_INFO "\t%p: sendirq=%03d, recvirq=%03d, "
+ " name=%s\n", p, p->sendirq, p->recvirq, p->name);
+ p = p->next;
+ }
+}
+EXPORT_SYMBOL(devtree_print);
+
+/*
+ * devtree_irq()
+ * Return the IRQ(s) associated with devtree node.
+ */
+int devtree_irq(struct devtree_node *dn,
+ unsigned char *sendirq,
+ unsigned char *recvirq)
+{
+ if (dn->magic != DEVTREE_NODE_MAGIC) {
+ printk(KERN_EMERG "improper node: %p\n", dn);
+ if (sendirq) {
+ *sendirq = DEVTREE_IRQ_NONE;
+ }
+ if (recvirq) {
+ *recvirq = DEVTREE_IRQ_NONE;
+ }
+ return -EFAULT;
+ }
+
+ /*
+ * Copy the devtree irq(s) to the output parameters.
+ */
+ if (sendirq) {
+ *sendirq = dn->sendirq;
+ }
+ if (recvirq) {
+ *recvirq = dn->recvirq;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(devtree_irq);
+
+/*
+ * devtree_find_next()
+ * Provide an iterator for walking the device tree.
+ */
+struct devtree_node *devtree_find_next(struct devtree_node **cur)
+{
+ struct devtree_node *p = *cur;
+ if (!p) {
+ *cur = devtree;
+ return devtree;
+ }
+ p = p->next;
+ *cur = p;
+ return p;
+}
+
+/*
+ * devtree_find_by_irq()
+ * Return the node associated with a given irq.
+ */
+struct devtree_node *devtree_find_by_irq(uint8_t sendirq, uint8_t recvirq)
+{
+ struct devtree_node *p = devtree;
+
+ if (sendirq == recvirq) {
+ printk(KERN_EMERG "identical request makes no sense sendirq = "
+ "%d, recvirq= %d\n", sendirq, recvirq);
+ return NULL;
+ }
+
+ while (p) {
+ if (p->magic != DEVTREE_NODE_MAGIC) {
+ printk(KERN_EMERG
+ "device tree has improper node: %p\n", p);
+ return NULL;
+ }
+
+ /*
+ * See if we can find a match on the IRQ(s) specified.
+ */
+ if ((sendirq == p->sendirq) && (recvirq == p->recvirq)) {
+ return p;
+ }
+
+ if ((sendirq == DEVTREE_IRQ_DONTCARE) &&
+ (p->recvirq == recvirq)) {
+ return p;
+ }
+
+ if ((recvirq == DEVTREE_IRQ_DONTCARE) &&
+ (p->sendirq == sendirq)) {
+ return p;
+ }
+
+ p = p->next;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(devtree_find_by_irq);
+
+/*
+ * devtree_find_node()
+ * Find a node in the device tree by name.
+ */
+struct devtree_node *devtree_find_node(const char *str)
+{
+ struct devtree_node *p = devtree;
+ while (p) {
+ if (p->magic != DEVTREE_NODE_MAGIC) {
+ printk(KERN_EMERG
+ "device tree has improper node: %p\n", p);
+ return NULL;
+ }
+ if (strcmp(p->name, str) == 0) {
+ return p;
+ }
+ p = p->next;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(devtree_find_node);
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/dma.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/dma.c
new file mode 100644
index 000000000..f61810532
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/dma.c
@@ -0,0 +1,60 @@
+/*
+ * arch/ubicom32/kernel/dma.c
+ * Ubicom32 architecture dynamic DMA mapping support.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ *
+ * We never have any address translations to worry about, so this
+ * is just alloc/free.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+{
+ void *ret;
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+ if (dev == NULL || (*dev->dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_phys(ret);
+ }
+ return ret;
+}
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/flat.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/flat.c
new file mode 100644
index 000000000..e8eb4595f
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/flat.c
@@ -0,0 +1,206 @@
+/*
+ * arch/ubicom32/kernel/flat.c
+ * Ubicom32 architecture flat executable format support.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/flat.h>
+
+unsigned long ubicom32_flat_get_addr_from_rp(unsigned long *rp,
+ u32_t relval,
+ u32_t flags,
+ unsigned long *persistent)
+{
+ u32_t relval_reloc_type = relval >> 27;
+ u32_t insn = *rp;
+
+ if (*persistent) {
+ /*
+ * relval holds the relocation that has to be adjusted.
+ */
+ if (relval == 0) {
+ *persistent = 0;
+ }
+
+ return relval;
+ }
+
+ if (relval_reloc_type == R_UBICOM32_32) {
+ /*
+ * insn holds the relocation
+ */
+ return insn;
+ }
+
+ /*
+ * We don't know this one.
+ */
+ return 0;
+}
+
+void ubicom32_flat_put_addr_at_rp(unsigned long *rp,
+ u32_t val,
+ u32_t relval,
+ unsigned long *persistent)
+{
+ u32_t reloc_type = (relval >> 27) & 0x1f;
+ u32_t insn = *rp;
+
+ /*
+ * If persistent is set then it contains the relocation type.
+ */
+ if (*persistent) {
+ /*
+ * If persistent is set then it contains the relocation type.
+ */
+ reloc_type = (*persistent >> 27) & 0x1f;
+ }
+
+ switch (reloc_type) {
+ case R_UBICOM32_32:
+ /*
+ * Store the 32 bits as is.
+ */
+ *rp = val;
+ break;
+ case R_UBICOM32_HI24:
+ {
+ /*
+ * 24 bit relocation that is part of the MOVEAI
+ * instruction. The 24 bits come from bits 7 - 30 of the
+ * relocation. The 24 bits eventually get split into 2
+ * fields in the instruction encoding.
+ *
+ * - Bits 7 - 27 of the relocation are encoded into bits
+ * 0 - 20 of the instruction.
+ *
+ * - Bits 28 - 30 of the relocation are encoded into bit
+ * 24 - 26 of the instruction.
+ */
+ u32_t mask = 0x1fffff | (0x7 << 24);
+ u32_t valid24bits = (val >> 7) & 0xffffff;
+ u32_t bot_21 = valid24bits & 0x1fffff;
+ u32_t upper_3_bits = ((valid24bits & 0xe00000) << 3);
+ insn &= ~mask;
+
+ insn |= bot_21;
+ insn |= upper_3_bits;
+ *rp = insn;
+ }
+ break;
+ case R_UBICOM32_LO7_S:
+ case R_UBICOM32_LO7_2_S:
+ case R_UBICOM32_LO7_4_S:
+ {
+ /*
+ * Bits 0 - 6 of the relocation are encoded into the
+ * 7bit unsigned immediate fields of the SOURCE-1 field
+ * of the instruction. The immediate value is left
+ * shifted by (0, 1, 2) based on the operand size.
+ */
+ u32_t mask = 0x1f | (0x3 << 8);
+ u32_t bottom, top;
+ val &= 0x7f;
+ if (reloc_type == R_UBICOM32_LO7_2_S) {
+ val >>= 1;
+ } else if (reloc_type == R_UBICOM32_LO7_4_S) {
+ val >>= 2;
+ }
+
+ bottom = val & 0x1f;
+ top = val >> 5;
+ insn &= ~mask;
+ insn |= bottom;
+ insn |= (top << 8);
+ BUG_ON(*rp != insn);
+ *rp = insn;
+ break;
+ }
+ case R_UBICOM32_LO7_D:
+ case R_UBICOM32_LO7_2_D:
+ case R_UBICOM32_LO7_4_D:
+ {
+ /*
+ * Bits 0 - 6 of the relocation are encoded into the
+ * 7bit unsigned immediate fields of the DESTINATION
+ * field of the instruction. The immediate value is
+ * left shifted by (0, 1, 2) based on the operand size.
+ */
+ u32_t mask = (0x1f | (0x3 << 8)) << 16;
+ u32_t bottom, top;
+ val &= 0x7f;
+ if (reloc_type == R_UBICOM32_LO7_2_D) {
+ val >>= 1;
+ } else if (reloc_type == R_UBICOM32_LO7_4_D) {
+ val >>= 2;
+ }
+ bottom = (val & 0x1f) << 16;
+ top = (val >> 5) << 16;
+ insn &= ~mask;
+ insn |= bottom;
+ insn |= (top << 8);
+ BUG_ON(*rp != insn);
+ *rp = insn;
+ break;
+ }
+ case R_UBICOM32_LO7_CALLI:
+ case R_UBICOM32_LO16_CALLI:
+ {
+ /*
+ * Extract the offset for a CALLI instruction. The
+ * offsets can be either 7 bits or 18 bits. Since all
+ * instructions in ubicom32 architecture are at work
+ * aligned addresses the truncated offset is right
+ * shifted by 2 before being encoded in the instruction.
+ */
+ if (reloc_type == R_UBICOM32_LO7_CALLI) {
+ val &= 0x7f;
+ } else {
+ val &= 0x3ffff;
+ }
+
+ val >>= 2;
+
+ insn &= ~0x071f071f;
+ insn |= (val & 0x1f) << 0;
+ val >>= 5;
+ insn |= (val & 0x07) << 8;
+ val >>= 3;
+ insn |= (val & 0x1f) << 16;
+ val >>= 5;
+ insn |= (val & 0x07) << 24;
+ if (reloc_type == R_UBICOM32_LO7_CALLI) {
+ BUG_ON(*rp != insn);
+ }
+ *rp = insn;
+ }
+ break;
+ }
+
+ if (*persistent) {
+ *persistent = 0;
+ }
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/head.S b/target/linux/ubicom32/files/arch/ubicom32/kernel/head.S
new file mode 100644
index 000000000..0c60504af
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/head.S
@@ -0,0 +1,273 @@
+/*
+ * arch/ubicom32/kernel/head.S
+ * <TODO: Replace with short file description>
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/page_offset.h>
+#define __ASM__
+#include <asm/ip5000.h>
+
+
+#define SRC_AN A3
+#define DST_AN A4
+
+#define PARAM_DN D0
+#define TMP_DN D15
+#define TMP2_DN D14
+
+/*
+ * The following code is placed at the start of the Linux section of memory.
+ * This is the primary entry point for Linux.
+ *
+ * However, we also want the syscall entry/exit code to be at a fixed address.
+ * So we take the primary entry point and reserve 16 bytes. That address is
+ * where the system_call entry point exists. This 16 bytes basically allows
+ * us to jump around the system_call entry point code to the actual startup
+ * code.
+ *
+ * Linux Memory Map (see vlinux.lds.S):
+ * 0x40400000 - Primary Entry Point for Linux (jump around code below).
+ * 0x40400010 - Old syscall Entry Point.
+ */
+
+ .sect .skip_syscall, "ax", @progbits
+ .global __skip_syscall_section
+__skip_syscall_section:
+ moveai A3, #%hi(_start)
+ lea.1 A3, %lo(_start)(A3)
+ ret A3
+/*
+ * __os_node_offset contains the offset from KERNELBASE to the os_node, it is
+ * not intended to be used by anything except the boot code.
+ */
+__os_node_offset:
+.long (_os_node - KERNELSTART)
+
+.text
+.global _start
+
+/*
+ * start()
+ * This is the start of the Linux kernel.
+ */
+_start:
+ move.4 SCRATCHPAD1, #0
+
+
+/*
+ * Setup the range registers... the loader has setup a few, but we will go ahead
+ * and correct them for our own limits. Note that once set these are never
+ * changed again. The ranges are as follows
+ *
+ * D_RANGE0 - io block (set up by loaded)
+ *
+ * I_RANGE0 and D_RANGE1 - kernel/ultra loader address space bottom of ocm-> top
+ * of ram typically 0x3ffc0000 - 0x440000000
+ * I_RANGE1 - kernel / userspace transition area (aka syscalls, context switches)
+ * typically 0x3FFC0030 - ~0x3FFC0200
+ * I_RANGE2 / D_RANGE2 - slab area
+ * typically 0x40A00000 - ~0x44000000
+ * I_RANGE3
+ * old system call interface if enabled.
+ *
+ * D_RANGE3, D_RANGE4 - unused.
+ */
+ moveai SRC_AN, #%hi(PAGE_OFFSET_RAW)
+ lea.4 SRC_AN, %lo(PAGE_OFFSET_RAW)(SRC_AN)
+ move.4 D_RANGE1_LO, SRC_AN
+ move.4 I_RANGE0_LO, SRC_AN
+
+; don't try to calculate I_RANGE_HI, see below
+; moveai SRC_AN, #%hi(___init_end-4)
+; lea.4 SRC_AN, %lo(___init_end-4)(SRC_AN)
+; move.4 I_RANGE0_HI, SRC_AN
+
+ moveai SRC_AN, #%hi(SDRAMSTART + CONFIG_MIN_RAMSIZE-4)
+ lea.4 SRC_AN, %lo(SDRAMSTART + CONFIG_MIN_RAMSIZE-4)(SRC_AN)
+ move.4 D_RANGE1_HI, SRC_AN
+
+; for now allow the whole ram to be executable as well so we don't run into problems
+; once we load user more code.
+ move.4 I_RANGE0_HI, SRC_AN
+
+#ifdef CONFIG_PROTECT_KERNEL
+; when kernel protection is enabled, we only open up syscall and non kernel text
+; for userspace apps, for now only irange registers registers 1 and 2 are used for userspace.
+
+ ;; syscall range
+ moveai SRC_AN, #%hi(__syscall_text_run_begin)
+ lea.4 SRC_AN, %lo(__syscall_text_run_begin)(SRC_AN)
+ move.4 I_RANGE1_LO, SRC_AN
+ moveai SRC_AN, #%hi(__syscall_text_run_end)
+ lea.4 SRC_AN, %lo(__syscall_text_run_end)(SRC_AN)
+ move.4 I_RANGE1_HI, SRC_AN
+
+ ;; slab instructions
+ moveai SRC_AN, #%hi(_edata)
+ lea.4 SRC_AN, %lo(_edata)(SRC_AN)
+ move.4 I_RANGE2_LO, SRC_AN
+ ;; End of DDR is already in range0 hi so just copy it.
+ move.4 I_RANGE2_HI, I_RANGE0_HI
+
+#ifdef CONFIG_OLD_40400010_SYSTEM_CALL
+ ;; create a small hole for old syscall location
+ moveai SRC_AN, #%hi(0x40400000)
+ lea.4 I_RANGE3_LO, 0x10(SRC_AN)
+ lea.4 I_RANGE3_HI, 0x14(SRC_AN)
+#endif
+ ;; slab data (same as slab instructions but starting a little earlier).
+ moveai SRC_AN, #%hi(_data_protection_end)
+ lea.4 SRC_AN, %lo(_data_protection_end)(SRC_AN)
+ move.4 D_RANGE2_LO, SRC_AN
+ move.4 D_RANGE2_HI, I_RANGE0_HI
+
+;; enable ranges
+ ;; skip I_RANGE0_EN
+ move.4 I_RANGE1_EN, #-1
+ move.4 I_RANGE2_EN, #-1
+#ifdef CONFIG_OLD_40400010_SYSTEM_CALL
+ move.4 I_RANGE3_EN, #-1
+#else
+ move.4 I_RANGE3_EN, #0
+#endif
+ ;; skip D_RANGE0_EN or D_RANGE1_EN
+ move.4 D_RANGE2_EN, #-1
+ move.4 D_RANGE3_EN, #0
+ move.4 D_RANGE4_EN, #0
+#endif
+
+;
+; If __ocm_free_begin is smaller than __ocm_free_end the
+; setup OCM text and data ram banks properly
+;
+ moveai DST_AN, #%hi(__ocm_free_begin)
+ lea.4 TMP_DN, %lo(__ocm_free_begin)(DST_AN)
+ moveai DST_AN, #%hi(__ocm_free_end)
+ lea.4 TMP2_DN, %lo(__ocm_free_end)(DST_AN)
+ sub.4 #0, TMP2_DN, TMP_DN
+ jmple.f 2f
+ moveai DST_AN, #%hi(__data_begin)
+ lea.4 TMP_DN, %lo(__data_begin)(DST_AN)
+ moveai DST_AN, #%hi(OCMSTART)
+ lea.4 TMP2_DN, %lo(OCMSTART)(DST_AN)
+ sub.4 TMP_DN, TMP_DN, TMP2_DN
+ lsr.4 TMP_DN, TMP_DN, #15
+ lsl.4 TMP_DN, #1, TMP_DN
+ moveai DST_AN, #%hi(OCMC_BASE)
+ add.4 OCMC_BANK_MASK(DST_AN), #-1, TMP_DN
+ pipe_flush 0
+2:
+;
+; Load .ocm_text
+;
+ moveai DST_AN, #%hi(__ocm_text_run_end)
+ lea.4 TMP_DN, %lo(__ocm_text_run_end)(DST_AN)
+ moveai DST_AN, #%hi(__ocm_text_run_begin)
+ lea.4 DST_AN, %lo(__ocm_text_run_begin)(DST_AN)
+ moveai SRC_AN, #%hi(__ocm_text_load_begin)
+ lea.4 SRC_AN, %lo(__ocm_text_load_begin)(SRC_AN)
+ jmpt.t 2f
+
+1: move.4 (DST_AN)4++, (SRC_AN)4++
+
+2: sub.4 #0, DST_AN, TMP_DN
+ jmpne.t 1b
+;
+; Load .syscall_text
+;
+ moveai DST_AN, #%hi(__syscall_text_run_end)
+ lea.4 TMP_DN, %lo(__syscall_text_run_end)(DST_AN)
+ moveai DST_AN, #%hi(__syscall_text_run_begin)
+ lea.4 DST_AN, %lo(__syscall_text_run_begin)(DST_AN)
+ moveai SRC_AN, #%hi(__syscall_text_load_begin)
+ lea.4 SRC_AN, %lo(__syscall_text_load_begin)(SRC_AN)
+ jmpt.t 2f
+
+1: move.4 (DST_AN)4++, (SRC_AN)4++
+
+2: sub.4 #0, DST_AN, TMP_DN
+ jmpne.t 1b
+
+;
+; Load .ocm_data
+;
+ moveai DST_AN, #%hi(__ocm_data_run_end)
+ lea.4 TMP_DN, %lo(__ocm_data_run_end)(DST_AN)
+ moveai DST_AN, #%hi(__ocm_data_run_begin)
+ lea.4 DST_AN, %lo(__ocm_data_run_begin)(DST_AN)
+ moveai SRC_AN, #%hi(__ocm_data_load_begin)
+ lea.4 SRC_AN, %lo(__ocm_data_load_begin)(SRC_AN)
+ jmpt.t 2f
+
+1: move.4 (DST_AN)4++, (SRC_AN)4++
+
+2: sub.4 #0, DST_AN, TMP_DN
+ jmpne.t 1b
+
+; Clear .bss
+;
+ moveai SRC_AN, #%hi(_ebss)
+ lea.4 TMP_DN, %lo(_ebss)(SRC_AN)
+ moveai DST_AN, #%hi(_sbss)
+ lea.4 DST_AN, %lo(_sbss)(DST_AN)
+ jmpt.t 2f
+
+1: move.4 (DST_AN)4++, #0
+
+2: sub.4 #0, DST_AN, TMP_DN
+ jmpne.t 1b
+
+; save our parameter to devtree (after clearing .bss)
+ moveai DST_AN, #%hi(devtree)
+ lea.4 DST_AN, %lo(devtree)(DST_AN)
+ move.4 (DST_AN), PARAM_DN
+
+ moveai sp, #%hi(init_thread_union)
+ lea.4 sp, %lo(init_thread_union)(sp)
+ movei TMP_DN, #ASM_THREAD_SIZE
+ add.4 sp, sp, TMP_DN
+ move.4 -4(sp)++, #0 ; nesting level = 0
+ move.4 -4(sp)++, #1 ; KERNEL_THREAD
+
+;; ip3k-elf-gdb backend now sets scratchpad3 to 1 when either continue
+;; or single step commands are issued. scratchpad3 is set to 0 when the
+;; debugger detaches from the board.
+ move.4 TMP_DN, scratchpad3
+ lsl.4 TMP_DN, TMP_DN, #0x0
+ jmpeq.f _jump_to_start_kernel
+_ok_to_set_break_points_in_linux:
+;; THREAD_STALL
+ move.4 mt_dbg_active_clr,#-1
+;; stalling the threads isn't instantaneous.. need to flush the pipe.
+ pipe_flush 0
+ pipe_flush 0
+
+_jump_to_start_kernel:
+ moveai SRC_AN, #%hi(start_kernel)
+ lea.4 SRC_AN, %lo(start_kernel)(SRC_AN)
+ ret SRC_AN
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/init_task.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/init_task.c
new file mode 100644
index 000000000..58baf5270
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/init_task.c
@@ -0,0 +1,65 @@
+/*
+ * arch/ubicom32/kernel/init_task.c
+ * Ubicom32 architecture task initialization implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <linux/version.h>
+
+///static struct fs_struct init_fs = INIT_FS;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+struct mm_struct init_mm = INIT_MM(init_mm);
+EXPORT_SYMBOL(init_mm);
+#endif
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union thread_union init_thread_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/irq.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/irq.c
new file mode 100644
index 000000000..c041f23e2
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/irq.c
@@ -0,0 +1,597 @@
+/*
+ * arch/ubicom32/kernel/irq.c
+ * Ubicom32 architecture IRQ support.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/ldsr.h>
+#include <asm/ip5000.h>
+#include <asm/machdep.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread.h>
+#include <asm/devtree.h>
+
+unsigned int irq_soft_avail;
+static struct irqaction ubicom32_reserve_action[NR_IRQS];
+
+#if !defined(CONFIG_DEBUG_IRQMEASURE)
+#define IRQ_DECLARE_MEASUREMENT
+#define IRQ_MEASUREMENT_START()
+#define IRQ_MEASUREMENT_END(irq)
+#else
+#define IRQ_DECLARE_MEASUREMENT \
+ int __diff; \
+ unsigned int __tstart;
+
+#define IRQ_MEASUREMENT_START() \
+ __tstart = UBICOM32_IO_TIMER->sysval;
+
+#define IRQ_MEASUREMENT_END(irq) \
+ __diff = (int)UBICOM32_IO_TIMER->sysval - (int)__tstart; \
+ irq_measurement_update((irq), __diff);
+
+/*
+ * We keep track of the time spent in both irq_enter()
+ * and irq_exit().
+ */
+#define IRQ_WEIGHT 32
+
+struct irq_measurement {
+ volatile unsigned int min;
+ volatile unsigned int avg;
+ volatile unsigned int max;
+};
+
+static DEFINE_SPINLOCK(irq_measurement_lock);
+
+/*
+ * Add 1 in for softirq (irq_exit());
+ */
+static struct irq_measurement irq_measurements[NR_IRQS + 1];
+
+/*
+ * irq_measurement_update()
+ * Update an entry in the measurement array for this irq.
+ */
+static void irq_measurement_update(int irq, int sample)
+{
+ struct irq_measurement *im = &irq_measurements[irq];
+ spin_lock(&irq_measurement_lock);
+ if ((im->min == 0) || (im->min > sample)) {
+ im->min = sample;
+ }
+ if (im->max < sample) {
+ im->max = sample;
+ }
+ im->avg = ((im->avg * (IRQ_WEIGHT - 1)) + sample) / IRQ_WEIGHT;
+ spin_unlock(&irq_measurement_lock);
+}
+#endif
+
+/*
+ * irq_kernel_stack_check()
+ * See if the kernel stack is within STACK_WARN of the end.
+ */
+static void irq_kernel_stack_check(int irq, struct pt_regs *regs)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ unsigned long sp;
+
+ /*
+ * Make sure that we are not close to the top of the stack and thus
+ * can not really service this interrupt.
+ */
+ asm volatile (
+ "and.4 %0, SP, %1 \n\t"
+ : "=d" (sp)
+ : "d" (THREAD_SIZE - 1)
+ : "cc"
+ );
+
+ if (sp < (sizeof(struct thread_info) + STACK_WARN)) {
+ printk(KERN_WARNING
+ "cpu[%d]: possible overflow detected sp remain: %p, "
+ "irq: %d, regs: %p\n",
+ thread_get_self(), (void *)sp, irq, regs);
+ dump_stack();
+ }
+
+ if (sp < (sizeof(struct thread_info) + 16)) {
+ THREAD_STALL;
+ }
+#endif
+}
+
+/*
+ * irq_get_lsb()
+ * Get the LSB set in value
+ */
+static int irq_get_lsb(unsigned int value)
+{
+ static unsigned char irq_bits[8] = {
+ 3, 0, 1, 0, 2, 0, 1, 0
+ };
+ u32_t nextbit = 0;
+
+ value = (value >> nextbit) | (value << ((sizeof(value) * 8) - nextbit));
+
+ /*
+ * It's unlikely that we find that we execute the body of this while
+ * loop. 50% of the time we won't take this at all and then of the
+ * cases where we do about 50% of those we only execute once.
+ */
+ if (!(value & 0xffff)) {
+ nextbit += 0x10;
+ value >>= 16;
+ }
+
+ if (!(value & 0xff)) {
+ nextbit += 0x08;
+ value >>= 8;
+ }
+
+ if (!(value & 0xf)) {
+ nextbit += 0x04;
+ value >>= 4;
+ }
+
+ nextbit += irq_bits[value & 0x7];
+ if (nextbit > 63) {
+ panic("nextbit out of range: %d\n", nextbit);
+ }
+ return nextbit;
+}
+
+/*
+ * ubicom32_reserve_handler()
+ * Bogus handler associated with pre-reserved IRQ(s).
+ */
+static irqreturn_t ubicom32_reserve_handler(int irq, void *dev_id)
+{
+ BUG();
+ return IRQ_HANDLED;
+}
+
+/*
+ * __irq_disable_vector()
+ * Disable the interrupt by clearing the appropriate bit in the
+ * LDSR Mask Register.
+ */
+static void __irq_disable_vector(unsigned int irq)
+{
+ ldsr_disable_vector(irq);
+}
+
+/*
+ * __irq_ack_vector()
+ * Acknowledge the specific interrupt by clearing the associate bit in
+ * hardware
+ */
+static void __irq_ack_vector(unsigned int irq)
+{
+ if (irq < 32) {
+ asm volatile ("move.4 INT_CLR0, %0" : : "d" (1 << irq));
+ } else {
+ asm volatile ("move.4 INT_CLR1, %0" : : "d" (1 << (irq - 32)));
+ }
+}
+
+/*
+ * __irq_enable_vector()
+ * Clean and then enable the interrupt by setting the appropriate bit in
+ * the LDSR Mask Register.
+ */
+static void __irq_enable_vector(unsigned int irq)
+{
+ /*
+ * Acknowledge, really clear the vector.
+ */
+ __irq_ack_vector(irq);
+ ldsr_enable_vector(irq);
+}
+
+/*
+ * __irq_mask_vector()
+ */
+static void __irq_mask_vector(unsigned int irq)
+{
+ ldsr_mask_vector(irq);
+}
+
+/*
+ * __irq_unmask_vector()
+ */
+static void __irq_unmask_vector(unsigned int irq)
+{
+ ldsr_unmask_vector(irq);
+}
+
+/*
+ * __irq_end_vector()
+ * Called once an interrupt is completed (reset the LDSR mask).
+ */
+static void __irq_end_vector(unsigned int irq)
+{
+ ldsr_unmask_vector(irq);
+}
+
+#if defined(CONFIG_SMP)
+/*
+ * __irq_set_affinity()
+ * Set the cpu affinity for this interrupt.
+ * affinity container allocated at boot
+ */
+static void __irq_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+ smp_set_affinity(irq, dest);
+ cpumask_copy(irq_desc[irq].affinity, dest);
+}
+#endif
+
+/*
+ * On-Chip Generic Interrupt function handling.
+ */
+static struct irq_chip ubicom32_irq_chip = {
+ .name = "Ubicom32",
+ .startup = NULL,
+ .shutdown = NULL,
+ .enable = __irq_enable_vector,
+ .disable = __irq_disable_vector,
+ .ack = __irq_ack_vector,
+ .mask = __irq_mask_vector,
+ .unmask = __irq_unmask_vector,
+ .end = __irq_end_vector,
+#if defined(CONFIG_SMP)
+ .set_affinity = __irq_set_affinity,
+#endif
+};
+
+/*
+ * do_IRQ()
+ * Primary interface for handling IRQ() requests.
+ */
+asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
+{
+ struct pt_regs *oldregs;
+ struct thread_info *ti = current_thread_info();
+
+ IRQ_DECLARE_MEASUREMENT;
+
+ /*
+ * Mark that we are inside of an interrupt and
+ * that interrupts are disabled.
+ */
+ oldregs = set_irq_regs(regs);
+ ti->interrupt_nesting++;
+ trace_hardirqs_off();
+ irq_kernel_stack_check(irq, regs);
+
+ /*
+ * Start the interrupt sequence
+ */
+ irq_enter();
+
+ /*
+ * Execute the IRQ handler and any pending SoftIRQ requests.
+ */
+ BUG_ON(!irqs_disabled());
+ IRQ_MEASUREMENT_START();
+ __do_IRQ(irq);
+ IRQ_MEASUREMENT_END(irq);
+ BUG_ON(!irqs_disabled());
+
+ /*
+ * TODO: Since IRQ's are disabled when calling irq_exit()
+ * modify Kconfig to set __ARCH_IRQ_EXIT_IRQS_DISABLED flag.
+ * This will slightly improve performance by enabling
+ * softirq handling to avoid disabling/disabled interrupts.
+ */
+ IRQ_MEASUREMENT_START();
+ irq_exit();
+ IRQ_MEASUREMENT_END(NR_IRQS);
+ BUG_ON(!irqs_disabled());
+
+ /*
+ * Outside of an interrupt (or nested exit).
+ */
+ set_irq_regs(oldregs);
+ trace_hardirqs_on();
+ ti->interrupt_nesting--;
+}
+
+/*
+ * irq_soft_alloc()
+ * Allocate a soft IRQ.
+ */
+int irq_soft_alloc(unsigned int *soft)
+{
+ if (irq_soft_avail == 0) {
+ printk(KERN_NOTICE "no soft irqs to allocate\n");
+ return -EFAULT;
+ }
+
+ *soft = irq_get_lsb(irq_soft_avail);
+ irq_soft_avail &= ~(1 << *soft);
+ return 0;
+}
+
+/*
+ * ack_bad_irq()
+ * Called to handle an bad irq request.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_ERR "IRQ: unexpected irq=%d\n", irq);
+ __irq_end_vector(irq);
+}
+
+/*
+ * show_interrupts()
+ * Return a string that displays the state of each of the interrupts.
+ */
+int show_interrupts(struct seq_file *p, void *v)
+{
+ struct irqaction *ap;
+ int irq = *((loff_t *) v);
+ int j;
+
+ if (irq >= NR_IRQS) {
+ return 0;
+ }
+
+ if (irq == 0) {
+ seq_puts(p, " ");
+ for_each_online_cpu(j) {
+ seq_printf(p, "CPU%d ", j);
+ }
+ seq_putc(p, '\n');
+ }
+
+ ap = irq_desc[irq].action;
+ if (ap) {
+ seq_printf(p, "%3d: ", irq);
+ for_each_online_cpu(j) {
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
+ }
+ seq_printf(p, "%14s ", irq_desc[irq].chip->name);
+ seq_printf(p, "%s", ap->name);
+ for (ap = ap->next; ap; ap = ap->next) {
+ seq_printf(p, ", %s", ap->name);
+ }
+ seq_putc(p, '\n');
+ }
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_IRQMEASURE)
+static unsigned int irq_cycles_to_micro(unsigned int cycles, unsigned int frequency)
+{
+ unsigned int micro = (cycles / (frequency / 1000000));
+ return micro;
+}
+
+/*
+ * irq_measurement_show()
+ * Print out the min, avg, max values for each IRQ
+ *
+ * By request, the max value is reset after each dump.
+ */
+static int irq_measurement_show(struct seq_file *p, void *v)
+{
+ struct irqaction *ap;
+ unsigned int freq = processor_frequency();
+ int irq = *((loff_t *) v);
+
+
+ if (irq == 0) {
+ seq_puts(p, "\tmin\tavg\tmax\t(micro-seconds)\n");
+ }
+
+ if (irq > NR_IRQS) {
+ return 0;
+ }
+
+ if (irq == NR_IRQS) {
+ unsigned int min, avg, max;
+ spin_lock(&irq_measurement_lock);
+ min = irq_cycles_to_micro(irq_measurements[irq].min, freq);
+ avg = irq_cycles_to_micro(irq_measurements[irq].avg, freq);
+ max = irq_cycles_to_micro(irq_measurements[irq].max, freq);
+ irq_measurements[irq].max = 0;
+ spin_unlock(&irq_measurement_lock);
+ seq_printf(p, " \t%u\t%u\t%u\tsoftirq\n", min, avg, max);
+ return 0;
+ }
+
+ ap = irq_desc[irq].action;
+ if (ap) {
+ unsigned int min, avg, max;
+ spin_lock(&irq_measurement_lock);
+ min = irq_cycles_to_micro(irq_measurements[irq].min, freq);
+ avg = irq_cycles_to_micro(irq_measurements[irq].avg, freq);
+ max = irq_cycles_to_micro(irq_measurements[irq].max, freq);
+ irq_measurements[irq].max = 0;
+ spin_unlock(&irq_measurement_lock);
+ seq_printf(p, "%2u:\t%u\t%u\t%u\t%s\n", irq, min, avg, max, ap->name);
+ }
+ return 0;
+}
+
+static void *irq_measurement_start(struct seq_file *f, loff_t *pos)
+{
+ return (*pos <= NR_IRQS) ? pos : NULL;
+}
+
+static void *irq_measurement_next(struct seq_file *f, void *v, loff_t *pos)
+{
+ (*pos)++;
+ if (*pos > NR_IRQS)
+ return NULL;
+ return pos;
+}
+
+static void irq_measurement_stop(struct seq_file *f, void *v)
+{
+ /* Nothing to do */
+}
+
+static const struct seq_operations irq_measurement_seq_ops = {
+ .start = irq_measurement_start,
+ .next = irq_measurement_next,
+ .stop = irq_measurement_stop,
+ .show = irq_measurement_show,
+};
+
+static int irq_measurement_open(struct inode *inode, struct file *filp)
+{
+ return seq_open(filp, &irq_measurement_seq_ops);
+}
+
+static const struct file_operations irq_measurement_fops = {
+ .open = irq_measurement_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init irq_measurement_init(void)
+{
+ proc_create("irq_measurements", 0, NULL, &irq_measurement_fops);
+ return 0;
+}
+module_init(irq_measurement_init);
+#endif
+
+/*
+ * init_IRQ(void)
+ * Initialize the on-chip IRQ subsystem.
+ */
+void __init init_IRQ(void)
+{
+ int irq;
+ struct devtree_node *p = NULL;
+ struct devtree_node *iter = NULL;
+ unsigned int mask = 0;
+ unsigned int reserved = 0;
+
+ /*
+ * Pull out the list of software interrupts that are avialable to
+ * Linux and provide an allocation function for them. The first
+ * 24 interrupts of INT0 are software interrupts.
+ */
+ irq_soft_avail = 0;
+ if (processor_interrupts(&irq_soft_avail, NULL) < 0) {
+ printk(KERN_WARNING "No Soft IRQ(s) available\n");
+ }
+ irq_soft_avail &= ((1 << 24) - 1);
+
+ /*
+ * Initialize all of the on-chip interrupt handling
+ * to use a common set of interrupt functions.
+ */
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ irq_desc[irq].status = IRQ_DISABLED;
+ irq_desc[irq].action = NULL;
+ irq_desc[irq].depth = 1;
+ set_irq_chip(irq, &ubicom32_irq_chip);
+ }
+
+ /*
+ * The sendirq of a devnode is not registered within Linux but instead
+ * is used by the software I/O thread. These interrupts are reserved.
+ * The recvirq is used by Linux and registered by a device driver, these
+ * are not reserved.
+ *
+ * recvirq(s) that are in the software interrupt range are not supposed
+ * to be marked as reserved. We track this while we scan the device
+ * nodes.
+ */
+ p = devtree_find_next(&iter);
+ while (p) {
+ unsigned char sendirq, recvirq;
+ devtree_irq(p, &sendirq, &recvirq);
+
+ /*
+ * If the sendirq is valid, mark that irq as taken by the
+ * devtree node.
+ */
+ if (sendirq < NR_IRQS) {
+ ubicom32_reserve_action[sendirq].handler =
+ ubicom32_reserve_handler;
+ ubicom32_reserve_action[sendirq].name = p->name;
+ irq_desc[sendirq].action =
+ &ubicom32_reserve_action[sendirq];
+ mask |= (1 << sendirq);
+ }
+
+ /*
+ * Track the relevant recieve IRQ(s)
+ */
+ if (recvirq < 24) {
+ mask |= (1 << recvirq);
+ }
+
+ /*
+ * Move to the next node.
+ */
+ p = devtree_find_next(&iter);
+ }
+
+ /*
+ * Remove these bits from the irq_soft_avail list and then use the
+ * result as the list of pre-reserved IRQ(s).
+ */
+ reserved = ~irq_soft_avail & ~mask;
+ for (irq = 0; irq < 24; irq++) {
+ if ((reserved & (1 << irq))) {
+ ubicom32_reserve_action[irq].handler =
+ ubicom32_reserve_handler;
+ ubicom32_reserve_action[irq].name = "reserved";
+ irq_desc[irq].action = &ubicom32_reserve_action[irq];
+ }
+ }
+
+ /*
+ * Initialize the LDSR which is the Ubicom32 programmable
+ * interrupt controller.
+ */
+ ldsr_init();
+
+ /*
+ * The Ubicom trap code needs a 2nd init after IRQ(s) are setup.
+ */
+ trap_init_interrupt();
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/ldsr.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/ldsr.c
new file mode 100644
index 000000000..a608d74cf
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/ldsr.c
@@ -0,0 +1,1185 @@
+/*
+ * arch/ubicom32/kernel/ldsr.c
+ * Ubicom32 architecture Linux Device Services Driver Interface
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ *
+ * NOTES:
+ *
+ * The LDSR is a programmable interrupt controller that is written in software.
+ * It emulates the behavior of an pic by fielding the interrupts, choosing a
+ * victim thread to take the interrupt and forcing that thread to take a context
+ * switch to the appropriate interrupt handler.
+ *
+ * Because traps are treated as just a special class of interrupts, the LDSR
+ * also handles the processing of traps.
+ *
+ * Because we compile Linux both UP and SMP, we need the LDSR to use
+ * architectural locking that is not "compiled out" when compiling UP. For now,
+ * we use the single atomic bit lock.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <asm/ip5000.h>
+#include <asm/atomic.h>
+#include <asm/machdep.h>
+#include <asm/asm-offsets.h>
+#include <asm/traps.h>
+#include <asm/thread.h>
+#include <asm/range-protect.h>
+
+/*
+ * One can not print from the LDSR so the best we can do is
+ * check a condition and stall all of the threads.
+ */
+
+// #define DEBUG_LDSR 1
+#if defined(DEBUG_LDSR)
+#define DEBUG_ASSERT(cond) \
+ if (!(cond)) { \
+ THREAD_STALL; \
+ }
+#else
+#define DEBUG_ASSERT(cond)
+#endif
+
+/*
+ * Make global so that we can use it in the RFI code in assembly.
+ */
+unsigned int ldsr_soft_irq_mask;
+EXPORT_SYMBOL(ldsr_soft_irq_mask);
+
+static unsigned int ldsr_suspend_mask;
+static unsigned int ldsr_soft_irq;
+static unsigned int ldsr_stack_space[1024];
+
+static struct ldsr_register_bank {
+ volatile unsigned int enabled0;
+ volatile unsigned int enabled1;
+ volatile unsigned int mask0;
+ volatile unsigned int mask1;
+ unsigned int total;
+ unsigned int retry;
+ unsigned int backout;
+} ldsr_interrupt;
+
+/*
+ * Which thread/cpu are we?
+ */
+static int ldsr_tid = -1;
+
+#if defined(CONFIG_IRQSTACKS)
+/*
+ * per-CPU IRQ stacks (thread information and stack)
+ *
+ * NOTE: Do not use DEFINE_PER_CPU() as it makes it harder
+ * to find the location of ctx from assembly language.
+ */
+union irq_ctx {
+ struct thread_info tinfo;
+ u32 stack[THREAD_SIZE/sizeof(u32)];
+};
+static union irq_ctx *percpu_irq_ctxs[NR_CPUS];
+
+/*
+ * Storage for the interrupt stack.
+ */
+#if !defined(CONFIG_IRQSTACKS_USEOCM)
+static char percpu_irq_stacks[(NR_CPUS * THREAD_SIZE) + (THREAD_SIZE - 1)];
+#else
+/*
+ * For OCM, the linker will ensure that space is allocated for the stack
+ * see (vmlinux.lds.S)
+ */
+static char percpu_irq_stacks[];
+#endif
+
+#endif
+
+/*
+ * Save trap IRQ because we need to un-suspend if it gets set.
+ */
+static unsigned int ldsr_trap_irq_mask;
+static unsigned int ldsr_trap_irq;
+
+/*
+ * ret_from_interrupt_to_kernel
+ * Just restore the context and do nothing else.
+ */
+asmlinkage void ret_from_interrupt_to_kernel(void)__attribute__((naked));
+
+/*
+ * ret_from_interrupt_to_user
+ * Call scheduler if needed. Just restore the context.
+ */
+asmlinkage void ret_from_interrupt_to_user(void)__attribute__((naked));
+
+#ifdef DEBUG_LDSR
+u32_t old_sp, old_pc, old_a0, old_a5, old_a3;
+struct pt_regs copy_regs, *copy_save_area;
+#endif
+
+int __user_mode(unsigned long sp)
+{
+
+ u32_t saved_stack_base = sp & ~(ASM_THREAD_SIZE - 1);
+#if defined(CONFIG_IRQSTACKS_USEOCM)
+ if ((union irq_ctx *)saved_stack_base == percpu_irq_ctxs[smp_processor_id()]) {
+ /*
+ * On the interrupt stack.
+ */
+ return 0;
+ }
+#endif
+
+ if (!(u32_t)current) {
+ return 0;
+ }
+ return saved_stack_base != ((u32_t)current->stack);
+}
+
+/*
+ * ldsr_lock_release()
+ * Release the LDSR lock.
+ */
+static void ldsr_lock_release(void)
+{
+ UBICOM32_UNLOCK(LDSR_LOCK_BIT);
+}
+
+/*
+ * ldsr_lock_acquire()
+ * Acquire the LDSR lock, spin if not available.
+ */
+static void ldsr_lock_acquire(void)
+{
+ UBICOM32_LOCK(LDSR_LOCK_BIT);
+}
+
+/*
+ * ldsr_thread_irq_disable()
+ * Disable interrupts for the specified thread.
+ */
+static void ldsr_thread_irq_disable(unsigned int tid)
+{
+ unsigned int mask = (1 << tid);
+
+ asm volatile (
+ " or.4 scratchpad1, scratchpad1, %0 \n\t"
+ :
+ : "d"(mask)
+ : "cc"
+ );
+}
+
+/*
+ * ldsr_thread_get_interrupts()
+ * Get the interrupt state for all threads.
+ */
+static unsigned long ldsr_thread_get_interrupts(void)
+{
+ unsigned long ret = 0;
+ asm volatile (
+ " move.4 %0, scratchpad1 \n\t"
+ : "=r" (ret)
+ :
+ );
+ return ret;
+}
+
+/*
+ * ldsr_emulate_and_run()
+ * Emulate the instruction and then set the thread to run.
+ */
+static void ldsr_emulate_and_run(unsigned int tid)
+{
+ unsigned int thread_mask = (1 << tid);
+ u32_t write_csr = (tid << 15) | (1 << 14);
+
+ /*
+ * Emulate the unaligned access.
+ */
+ unaligned_emulate(tid);
+
+ /*
+ * Get the thread back in a running state.
+ */
+ asm volatile (
+ " setcsr %0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 trap_cause, #0 \n\t" /* Clear the trap cause
+ * register */
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 mt_dbg_active_set, %1 \n\t" /* Activate thread even if
+ * in dbg/fault state */
+ " move.4 mt_active_set, %1 \n\t" /* Restart target
+ * thread. */
+ :
+ : "r" (write_csr), "d" (thread_mask)
+ : "cc"
+ );
+ thread_enable_mask(thread_mask);
+}
+
+/*
+ * ldsr_preemptive_context_save()
+ * save thread context from another hardware thread. The other thread must
+ * be stalled.
+ */
+static inline void ldsr_preemptive_context_save(u32_t thread,
+ struct pt_regs *regs)
+{
+ /*
+ * Save the current state of the specified thread
+ */
+ asm volatile (
+ " move.4 a3, %0 \n\t"
+
+ /* set src1 from the target thread */
+ " move.4 csr, %1 \n\t"
+ " setcsr_flush 0 \n\t"
+ " setcsr_flush 0 \n\t"
+
+ /* copy state from the other thread */
+ " move.4 "D(PT_D0)"(a3), d0 \n\t"
+ " move.4 "D(PT_D1)"(a3), d1 \n\t"
+ " move.4 "D(PT_D2)"(a3), d2 \n\t"
+ " move.4 "D(PT_D3)"(a3), d3 \n\t"
+ " move.4 "D(PT_D4)"(a3), d4 \n\t"
+ " move.4 "D(PT_D5)"(a3), d5 \n\t"
+ " move.4 "D(PT_D6)"(a3), d6 \n\t"
+ " move.4 "D(PT_D7)"(a3), d7 \n\t"
+ " move.4 "D(PT_D8)"(a3), d8 \n\t"
+ " move.4 "D(PT_D9)"(a3), d9 \n\t"
+ " move.4 "D(PT_D10)"(a3), d10 \n\t"
+ " move.4 "D(PT_D11)"(a3), d11 \n\t"
+ " move.4 "D(PT_D12)"(a3), d12 \n\t"
+ " move.4 "D(PT_D13)"(a3), d13 \n\t"
+ " move.4 "D(PT_D14)"(a3), d14 \n\t"
+ " move.4 "D(PT_D15)"(a3), d15 \n\t"
+ " move.4 "D(PT_A0)"(a3), a0 \n\t"
+ " move.4 "D(PT_A1)"(a3), a1 \n\t"
+ " move.4 "D(PT_A2)"(a3), a2 \n\t"
+ " move.4 "D(PT_A3)"(a3), a3 \n\t"
+ " move.4 "D(PT_A4)"(a3), a4 \n\t"
+ " move.4 "D(PT_A5)"(a3), a5 \n\t"
+ " move.4 "D(PT_A6)"(a3), a6 \n\t"
+ " move.4 "D(PT_SP)"(a3), a7 \n\t"
+ " move.4 "D(PT_ACC0HI)"(a3), acc0_hi \n\t"
+ " move.4 "D(PT_ACC0LO)"(a3), acc0_lo \n\t"
+ " move.4 "D(PT_MAC_RC16)"(a3), mac_rc16 \n\t"
+ " move.4 "D(PT_ACC1HI)"(a3), acc1_hi \n\t"
+ " move.4 "D(PT_ACC1LO)"(a3), acc1_lo \n\t"
+ " move.4 "D(PT_SOURCE3)"(a3), source3 \n\t"
+ " move.4 "D(PT_INST_CNT)"(a3), inst_cnt \n\t"
+ " move.4 "D(PT_CSR)"(a3), csr \n\t"
+ " move.4 "D(PT_DUMMY_UNUSED)"(a3), #0 \n\t"
+ " move.4 "D(PT_INT_MASK0)"(a3), int_mask0 \n\t"
+ " move.4 "D(PT_INT_MASK1)"(a3), int_mask1 \n\t"
+ " move.4 "D(PT_TRAP_CAUSE)"(a3), trap_cause \n\t"
+ " move.4 "D(PT_PC)"(a3), pc \n\t"
+ " move.4 "D(PT_PREVIOUS_PC)"(a3), previous_pc \n\t"
+ /* disable csr thread select */
+ " movei csr, #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ :
+ : "r" (regs->dn), "d" ((thread << 9) | (1 << 8))
+ : "a3"
+ );
+}
+
+/*
+ * ldsr_rotate_threads()
+ * Simple round robin algorithm for choosing the next cpu
+ */
+static int ldsr_rotate_threads(unsigned long cpus)
+{
+ static unsigned char ldsr_bits[8] = {
+ 3, 0, 1, 0, 2, 0, 1, 0
+ };
+
+ static int nextbit;
+ int thisbit;
+
+ /*
+ * Move the interrupts down so that we consider interrupts from where
+ * we left off, then take the interrupts we would lose and move them
+ * to the top half of the interrupts value.
+ */
+ cpus = (cpus >> nextbit) | (cpus << ((sizeof(cpus) * 8) - nextbit));
+
+ /*
+ * 50% of the time we won't take this at all and then of the cases where
+ * we do about 50% of those we only execute once.
+ */
+ if (!(cpus & 0xffff)) {
+ nextbit += 16;
+ cpus >>= 16;
+ }
+
+ if (!(cpus & 0xff)) {
+ nextbit += 8;
+ cpus >>= 8;
+ }
+
+ if (!(cpus & 0xf)) {
+ nextbit += 4;
+ cpus >>= 4;
+ }
+
+ nextbit += ldsr_bits[cpus & 0x7];
+ thisbit = (nextbit & ((sizeof(cpus) * 8) - 1));
+ nextbit = (thisbit + 1) & ((sizeof(cpus) * 8) - 1);
+ DEBUG_ASSERT(thisbit < THREAD_ARCHITECTURAL_MAX);
+ return thisbit;
+}
+
+/*
+ * ldsr_rotate_interrupts()
+ * Get rotating next set bit value.
+ */
+static int ldsr_rotate_interrupts(unsigned long long interrupts)
+{
+ static unsigned char ldsr_bits[8] = {
+ 3, 0, 1, 0, 2, 0, 1, 0
+ };
+
+ static int nextbit;
+ int thisbit;
+
+ /*
+ * Move the interrupts down so that we consider interrupts from where
+ * we left off, then take the interrupts we would lose and move them
+ * to the top half of the interrupts value.
+ */
+ interrupts = (interrupts >> nextbit) |
+ (interrupts << ((sizeof(interrupts) * 8) - nextbit));
+
+ /*
+ * 50% of the time we won't take this at all and then of the cases where
+ * we do about 50% of those we only execute once.
+ */
+ if (!(interrupts & 0xffffffff)) {
+ nextbit += 32;
+ interrupts >>= 32;
+ }
+
+ if (!(interrupts & 0xffff)) {
+ nextbit += 16;
+ interrupts >>= 16;
+ }
+
+ if (!(interrupts & 0xff)) {
+ nextbit += 8;
+ interrupts >>= 8;
+ }
+
+ if (!(interrupts & 0xf)) {
+ nextbit += 4;
+ interrupts >>= 4;
+ }
+
+ nextbit += ldsr_bits[interrupts & 0x7];
+ thisbit = (nextbit & ((sizeof(interrupts) * 8) - 1));
+ nextbit = (thisbit + 1) & ((sizeof(interrupts) * 8) - 1);
+
+ DEBUG_ASSERT(thisbit < (sizeof(interrupts) * 8));
+ return thisbit;
+}
+
+/*
+ * ldsr_backout_or_irq()
+ *
+ * One way or the other this interrupt is not being
+ * processed, make sure that it is reset. We are
+ * not going to call irq_end_vector() so unmask the
+ * interrupt.
+ */
+static void ldsr_backout_of_irq(int vector, unsigned long tid_mask)
+{
+#if defined(CONFIG_SMP)
+ if (unlikely(vector == smp_ipi_irq)) {
+ smp_reset_ipi(tid_mask);
+ }
+#endif
+ ldsr_unmask_vector(vector);
+ ldsr_interrupt.backout++;
+}
+
+#if defined(CONFIG_IRQSTACKS)
+/*
+ * ldsr_choose_savearea_and_returnvec()
+ * Test our current state (user, kernel, interrupt) and set things up.
+ *
+ * This version of the function uses 3 stacks and nests interrupts
+ * on the interrupt stack.
+ */
+static struct pt_regs *ldsr_choose_savearea_and_returnvec(thread_t tid, u32_t linux_sp, u32_t *pvec)
+{
+ struct pt_regs *save_area;
+ u32_t masked_linux_sp = linux_sp & ~(THREAD_SIZE - 1);
+ struct thread_info * ti= (struct thread_info *)sw_ksp[tid];
+
+#if defined(CONFIG_SMP)
+ union irq_ctx *icp = percpu_irq_ctxs[tid];
+#else
+ union irq_ctx *icp = percpu_irq_ctxs[0];
+#endif
+
+ if (masked_linux_sp == (u32_t)icp) {
+ /*
+ * Fault/Interrupt occurred while on the interrupt stack.
+ */
+ save_area = (struct pt_regs *)((char *)linux_sp - sizeof(struct pt_regs) - 8);
+ *pvec = (u32_t)(&ret_from_interrupt_to_kernel);
+ } else {
+ /*
+ * Fault/Interrupt occurred while on user/kernel stack. This is a new
+ * first use of the interrupt stack.
+ */
+ save_area = (struct pt_regs *) ((char *)icp + sizeof(icp->stack) - sizeof(struct pt_regs) - 8);
+ if (masked_linux_sp == (u32_t)ti) {
+ *pvec = (u32_t)(&ret_from_interrupt_to_kernel);
+ } else {
+ *pvec = (u32_t)(&ret_from_interrupt_to_user);
+ }
+
+ /*
+ * Because the softirq code will execute on the "interrupt" stack, we
+ * need to maintain the knowledge of what "task" was executing on the
+ * cpu. This is done by copying the thread_info->task from the cpu
+ * we are about to context switch into the interrupt contexts thread_info
+ * structure.
+ */
+ icp->tinfo.task = ti->task;
+ icp->tinfo.preempt_count =
+ (icp->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (ti->preempt_count & SOFTIRQ_MASK);
+ icp->tinfo.interrupt_nesting = 0;
+ }
+ save_area->nesting_level = icp->tinfo.interrupt_nesting;
+ return save_area;
+}
+
+#else
+/*
+ * ldsr_choose_savearea_and_returnvec()
+ * Test our current state (user, kernel, interrupt) and set things up.
+ *
+ * The version of the function uses just the user & kernel stack and
+ * nests interrupts on the existing kernel stack.
+ */
+static struct pt_regs *ldsr_choose_savearea_and_returnvec(thread_t tid, u32_t linux_sp, u32_t *pvec)
+{
+ struct pt_regs *save_area;
+ u32_t masked_linux_sp = linux_sp & ~(THREAD_SIZE - 1);
+ struct thread_info *ti = (struct thread_info *)sw_ksp[tid];
+
+ if (masked_linux_sp == (u32_t)ti) {
+ /*
+ * Fault/Interrupt occurred while on the kernel stack.
+ */
+ save_area = (struct pt_regs *)((char *)linux_sp - sizeof(struct pt_regs) - 8);
+ *pvec = (u32_t) (&ret_from_interrupt_to_kernel);
+ } else {
+ /*
+ * Fault/Interrupt occurred while on user stack.
+ */
+ ti->interrupt_nesting = 0;
+ save_area = (struct pt_regs *)((u32_t)ti + THREAD_SIZE - sizeof(struct pt_regs) - 8);
+ *pvec = (u32_t) (&ret_from_interrupt_to_user);
+ }
+ save_area->nesting_level = ti->interrupt_nesting;
+ return save_area;
+}
+#endif
+
+/*
+ * ldsr_ctxsw_thread()
+ * Context switch a mainline thread to execute do_IRQ() for the specified
+ * vector.
+ */
+static void ldsr_ctxsw_thread(int vector, thread_t tid)
+{
+ u32_t linux_sp;
+ u32_t return_vector;
+ struct pt_regs *save_area, *regs;
+ u32_t thread_mask = (1 << tid);
+ u32_t read_csr = ((tid << 9) | (1 << 8));
+ u32_t write_csr = (tid << 15) | (1 << 14);
+ u32_t interrupt_vector = (u32_t)(&do_IRQ);
+
+ unsigned int frame_type = UBICOM32_FRAME_TYPE_INTERRUPT;
+
+
+ DEBUG_ASSERT(!thread_is_enabled(tid));
+
+ /*
+ * Acquire the necessary global and per thread locks for tid.
+ * As a side effect, we ensure that the thread has not trapped
+ * and return true if it has.
+ */
+ if (unlikely(thread_is_trapped(tid))) {
+ /*
+ * Read the trap cause, the sp and clear the MT_TRAP bits.
+ */
+ unsigned int cause;
+ asm volatile (
+ " setcsr %3 \n\t"
+ " setcsr_flush 0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 %0, TRAP_CAUSE \n\t"
+ " move.4 %1, SP \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 MT_BREAK_CLR, %2\n\t"
+ " move.4 MT_TRAP_CLR, %2 \n\t"
+ : "=&r" (cause), "=&r" (linux_sp)
+ : "r" (thread_mask), "m" (read_csr)
+ );
+
+ ldsr_backout_of_irq(vector, (1 << tid));
+
+#if !defined(CONFIG_UNALIGNED_ACCESS_DISABLED)
+ /*
+ * See if the unaligned trap handler can deal with this.
+ * If so, emulate the instruction and then just restart
+ * the thread.
+ */
+ if (unaligned_only(cause)) {
+#if defined(CONFIG_UNALIGNED_ACCESS_USERSPACE_ONLY)
+ /*
+ * Check if this is a kernel stack if so we will not
+ * handle the trap
+ */
+ u32_t masked_linux_sp = linux_sp & ~(THREAD_SIZE - 1);
+ if ((masked_linux_sp != (u32_t)sw_ksp[tid]) &&
+ unaligned_only(cause)) {
+ ldsr_emulate_and_run(tid);
+ return;
+ }
+#else
+ ldsr_emulate_and_run(tid);
+ return;
+#endif
+
+ }
+#endif
+
+ interrupt_vector = (u32_t)(&trap_handler);
+ frame_type = UBICOM32_FRAME_TYPE_TRAP;
+ } else {
+ /*
+ * Read the target thread's SP
+ */
+ asm volatile (
+ " setcsr %1 \n\t"
+ " setcsr_flush 0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 %0, SP \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ : "=m" (linux_sp)
+ : "m" (read_csr)
+ );
+ }
+
+ /*
+ * We are delivering an interrupt, count it.
+ */
+ ldsr_interrupt.total++;
+
+ /*
+ * At this point, we will definitely force this thread to
+ * a new context, show its interrupts as disabled.
+ */
+ ldsr_thread_irq_disable(tid);
+
+ /*
+ * Test our current state (user, kernel, interrupt). Save the
+ * appropriate data and setup for the return.
+ */
+ save_area = ldsr_choose_savearea_and_returnvec(tid, linux_sp, &return_vector);
+
+ /*
+ * The pt_regs (save_area) contains the type of thread that we are dealing
+ * with (KERNEL/NORMAL) and is copied into each pt_regs area. We get this
+ * from the current tasks kernel pt_regs area that always exists at the
+ * top of the kernel stack.
+ */
+ regs = (struct pt_regs *)((u32_t)sw_ksp[tid] + THREAD_SIZE - sizeof(struct pt_regs) - 8);
+ save_area->thread_type = regs->thread_type;
+
+ /*
+ * Preserve the context of the Linux thread.
+ */
+ ldsr_preemptive_context_save(tid, save_area);
+
+ /*
+ * Load the fram_type into the save_area.
+ */
+ save_area->frame_type = frame_type;
+
+#ifdef CONFIG_STOP_ON_TRAP
+ /*
+ * Before we get backtrace and showing stacks working well, it sometimes
+ * helps to enter the debugger when a trap occurs before we change the
+ * thread to handle the fault. This optional code causes all threads to
+ * stop on every trap frame. One assumes that GDB connected via the
+ * mailbox interface will be used to recover from this state.
+ */
+ if (frame_type == UBICOM32_FRAME_TYPE_TRAP) {
+ THREAD_STALL;
+ }
+#endif
+
+#ifdef DEBUG_LDSR
+ copy_regs = *save_area;
+ copy_save_area = save_area;
+
+ old_a0 = save_area->an[0];
+ old_a3 = save_area->an[3];
+ old_sp = save_area->an[7];
+ old_a5 = save_area->an[5];
+ old_pc = save_area->pc;
+#endif
+
+ /*
+ * Now we have to switch the kernel thread to run do_IRQ function.
+ * Set pc to do_IRQ
+ * Set d0 to vector
+ * Set d1 to save_area.
+ * Set a5 to the proper return vector.
+ */
+ asm volatile (
+ " setcsr %0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 d0, %5 \n\t" /* d0 = 0 vector # */
+ " move.4 d1, %1 \n\t" /* d1 = save_area */
+ " move.4 sp, %1 \n\t" /* sp = save_area */
+ " move.4 a5, %2 \n\t" /* a5 = return_vector */
+ " move.4 pc, %3 \n\t" /* pc = do_IRQ routine. */
+ " move.4 trap_cause, #0 \n\t" /* Clear the trap cause
+ * register */
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " enable_kernel_ranges %4 \n\t"
+ " move.4 mt_dbg_active_set, %4 \n\t" /* Activate thread even if
+ * in dbg/fault state */
+ " move.4 mt_active_set, %4 \n\t" /* Restart target
+ * thread. */
+ :
+ : "r" (write_csr), "r" (save_area),
+ "r" (return_vector), "r" (interrupt_vector),
+ "d" (thread_mask), "r" (vector)
+ : "cc"
+ );
+ thread_enable_mask(thread_mask);
+}
+
+/*
+ * ldsr_deliver_interrupt()
+ * Deliver the interrupt to one of the threads or all of the threads.
+ */
+static void ldsr_deliver_interrupt(int vector,
+ unsigned long deliver_to,
+ int all)
+{
+ unsigned long disabled_threads;
+ unsigned long possible_threads;
+ unsigned long trapped_threads;
+ unsigned long global_locks;
+
+ /*
+ * Disable all of the threads that we might want to send
+ * this interrupt to.
+ */
+retry:
+ DEBUG_ASSERT(deliver_to);
+ thread_disable_mask(deliver_to);
+
+ /*
+ * If any threads are in the trap state, we have to service the
+ * trap for those threads first.
+ */
+ asm volatile (
+ "move.4 %0, MT_TRAP \n\t"
+ : "=r" (trapped_threads)
+ :
+ );
+
+ trapped_threads &= deliver_to;
+ if (unlikely(trapped_threads)) {
+ /*
+ * all traps will be handled, so clear the trap bit before restarting any threads
+ */
+ ubicom32_clear_interrupt(ldsr_trap_irq);
+
+ /*
+ * Let the remaining untrapped threads, continue.
+ */
+ deliver_to &= ~trapped_threads;
+ if (deliver_to) {
+ thread_enable_mask(deliver_to);
+ }
+
+ /*
+ * For the trapped threads force them to handle
+ * a trap.
+ */
+ while (trapped_threads) {
+ unsigned long which = ffz(~trapped_threads);
+ trapped_threads &= ~(1 << which);
+ ldsr_ctxsw_thread(vector, which);
+ }
+ return;
+ }
+
+ /*
+ * Can we deliver an interrupt to any of the threads?
+ */
+ disabled_threads = ldsr_thread_get_interrupts();
+ possible_threads = deliver_to & ~disabled_threads;
+ if (unlikely(!possible_threads)) {
+#if defined(CONFIG_SMP)
+ /*
+ * In the SMP case, we can not wait because 1 cpu might be
+ * sending an IPI to another cpu which is currently blocked.
+ * The only way to ensure IPI delivery is to backout and
+ * keep trying. For SMP, we don't sleep until the interrupts
+ * are delivered.
+ */
+ thread_enable_mask(deliver_to);
+ ldsr_backout_of_irq(vector, deliver_to);
+ return;
+#else
+ /*
+ * In the UP case, we have nothing to do so we should wait.
+ *
+ * Since the INT_MASK0 and INT_MASK1 are "re-loaded" before we
+ * suspend in the outer loop, we do not need to save them here.
+ *
+ * We test that we were awakened for our specific interrupts
+ * because the ldsr mask/unmask operations will force the ldsr
+ * awake even if the interrupt on the mainline thread is not
+ * completed.
+ */
+ unsigned int scratch = 0;
+ thread_enable_mask(deliver_to);
+ asm volatile (
+ " move.4 INT_MASK0, %1 \n\t"
+ " move.4 INT_MASK1, #0 \n\t"
+
+ "1: suspend \n\t"
+ " move.4 %0, INT_STAT0 \n\t"
+ " and.4 %0, %0, %1 \n\t"
+ " jmpeq.f 1b \n\t"
+
+ " move.4 INT_CLR0, %2 \n\t"
+ : "+r" (scratch)
+ : "d" (ldsr_suspend_mask), "r" (ldsr_soft_irq_mask)
+ : "cc"
+ );
+
+ /*
+ * This delay is sized to coincide with the time it takes a
+ * thread to complete the exit (see return_from_interrupt).
+ */
+ ldsr_interrupt.retry++;
+ __delay(10);
+ goto retry;
+#endif
+ }
+
+ /*
+ * If any of the global locks are held, we can not deliver any
+ * interrupts, we spin delay(10) and then try again. If our
+ * spinning becomes a bottle neck, we will need to suspend but for
+ * now lets just spin.
+ */
+ asm volatile (
+ "move.4 %0, scratchpad1 \n\t"
+ : "=r" (global_locks)
+ :
+ );
+ if (unlikely(global_locks & 0xffff0000)) {
+ thread_enable_mask(deliver_to);
+
+ /*
+ * This delay is sized to coincide with the average time it
+ * takes a thread to release a global lock.
+ */
+ ldsr_interrupt.retry++;
+ __delay(10);
+ goto retry;
+ }
+
+ /*
+ * Deliver to one cpu.
+ */
+ if (!all) {
+ /*
+ * Find our victim and then enable everyone else.
+ */
+ unsigned long victim = ldsr_rotate_threads(possible_threads);
+ DEBUG_ASSERT((deliver_to & (1 << victim)));
+ DEBUG_ASSERT((possible_threads & (1 << victim)));
+
+ deliver_to &= ~(1 << victim);
+ if (deliver_to) {
+ thread_enable_mask(deliver_to);
+ }
+ ldsr_ctxsw_thread(vector, victim);
+ return;
+ }
+
+ /*
+ * If we can't deliver to some threads, wake them
+ * back up and reset things to deliver to them.
+ */
+ deliver_to &= ~possible_threads;
+ if (unlikely(deliver_to)) {
+ thread_enable_mask(deliver_to);
+ ldsr_backout_of_irq(vector, deliver_to);
+ }
+
+ /*
+ * Deliver to all possible threads(s).
+ */
+ while (possible_threads) {
+ unsigned long victim = ffz(~possible_threads);
+ possible_threads &= ~(1 << victim);
+ ldsr_ctxsw_thread(vector, victim);
+ }
+}
+
+/*
+ * ldsr_thread()
+ * This thread acts as the interrupt controller for Linux.
+ */
+static void ldsr_thread(void *arg)
+{
+ int stat0;
+ int stat1;
+ int interrupt0;
+ int interrupt1;
+ long long interrupts;
+ unsigned long cpus;
+
+#if !defined(CONFIG_SMP)
+ /*
+ * In a non-smp configuration, we can not use the cpu(s) arrays because
+ * there is not a 1-1 correspondence between cpus(s) and our threads.
+ * Thus we must get a local idea of the mainline threads and use the
+ * one and only 1 set as the victim. We do this once before the ldsr
+ * loop.
+ *
+ * In the SMP case, we will use the cpu(s) map to determine which cpu(s)
+ * are valid to send interrupts to.
+ */
+ int victim = 0;
+ unsigned int mainline = thread_get_mainline();
+ if (mainline == 0) {
+ panic("no mainline Linux threads to interrupt");
+ return;
+ }
+ victim = ffz(~mainline);
+ cpus = (1 << victim);
+#endif
+
+ while (1) {
+ /*
+ * If one changes this code not to reload the INT_MASK(s), you
+ * need to know that code in the lock waiting above does not
+ * reset the MASK registers back; so that code will need to be
+ * changed.
+ */
+ ldsr_lock_acquire();
+ asm volatile (
+ " move.4 INT_MASK0, %0 \n\t"
+ " move.4 INT_MASK1, %1 \n\t"
+ :
+ : "U4" (ldsr_interrupt.mask0), "U4" (ldsr_interrupt.mask1)
+ );
+ ldsr_lock_release();
+ thread_suspend();
+
+ /*
+ * Read the interrupt status registers
+ */
+ asm volatile (
+ "move.4 %0, INT_STAT0 \n\t"
+ "move.4 %1, INT_STAT1 \n\t"
+ : "=r" (stat0), "=r" (stat1)
+ :
+ );
+
+ /*
+ * We only care about interrupts that we have been told to care
+ * about. The interrupt must be enabled, unmasked, and have
+ * occurred in the hardware.
+ */
+ ldsr_lock_acquire();
+ interrupt0 = ldsr_interrupt.enabled0 &
+ ldsr_interrupt.mask0 & stat0;
+ interrupt1 = ldsr_interrupt.enabled1 &
+ ldsr_interrupt.mask1 & stat1;
+ ldsr_lock_release();
+
+ /*
+ * For each interrupt in the "snapshot" we will mask the
+ * interrupt handle the interrupt (typically calling do_IRQ()).
+ *
+ * The interrupt is unmasked by desc->chip->end() function in
+ * the per chip generic interrupt handling code
+ * (arch/ubicom32/kernel/irq.c).8
+ */
+ interrupts = ((unsigned long long)interrupt1 << 32) |
+ interrupt0;
+ while (interrupts) {
+ int all = 0;
+ int vector = ldsr_rotate_interrupts(interrupts);
+ interrupts &= ~((unsigned long long)1 << vector);
+
+ /*
+ * Now mask off this vector so that the LDSR ignores
+ * it until it is acknowledged.
+ */
+ ldsr_mask_vector(vector);
+#if !defined(CONFIG_SMP)
+ ldsr_deliver_interrupt(vector, cpus, all);
+#else
+ cpus = smp_get_affinity(vector, &all);
+ if (!cpus) {
+ /*
+ * No CPU to deliver to so just leave
+ * the interrupt unmasked and increase
+ * the backout count. We will eventually
+ * return and deliver it again.
+ */
+ ldsr_unmask_vector(vector);
+ ldsr_interrupt.backout++;
+ continue;
+ }
+ ldsr_deliver_interrupt(vector, cpus, all);
+#endif
+ }
+ }
+
+ /* NOTREACHED */
+}
+
+/*
+ * ldsr_mask_vector()
+ * Temporarily mask the interrupt vector, turn off the bit in the mask
+ * register.
+ */
+void ldsr_mask_vector(unsigned int vector)
+{
+ unsigned int mask;
+ if (vector < 32) {
+ mask = ~(1 << vector);
+ ldsr_lock_acquire();
+ ldsr_interrupt.mask0 &= mask;
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+ return;
+ }
+
+ mask = ~(1 << (vector - 32));
+ ldsr_lock_acquire();
+ ldsr_interrupt.mask1 &= mask;
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+}
+
+/*
+ * ldsr_unmask_vector()
+ * Unmask the interrupt vector so that it can be used, turn on the bit in
+ * the mask register.
+ *
+ * Because it is legal for the interrupt path to disable an interrupt,
+ * the unmasking code must ensure that disabled interrupts are not
+ * unmasked.
+ */
+void ldsr_unmask_vector(unsigned int vector)
+{
+ unsigned int mask;
+ if (vector < 32) {
+ mask = (1 << vector);
+ ldsr_lock_acquire();
+ ldsr_interrupt.mask0 |= (mask & ldsr_interrupt.enabled0);
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+ return;
+ }
+
+ mask = (1 << (vector - 32));
+ ldsr_lock_acquire();
+ ldsr_interrupt.mask1 |= (mask & ldsr_interrupt.enabled1);
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+}
+
+/*
+ * ldsr_enable_vector()
+ * The LDSR implements an interrupt controller and has a local (to the
+ * LDSR) copy of its interrupt mask.
+ */
+void ldsr_enable_vector(unsigned int vector)
+{
+ unsigned int mask;
+ if (vector < 32) {
+ mask = (1 << vector);
+ ldsr_lock_acquire();
+ ldsr_interrupt.enabled0 |= mask;
+ ldsr_interrupt.mask0 |= mask;
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+ return;
+ }
+
+ mask = (1 << (vector - 32));
+ ldsr_lock_acquire();
+ ldsr_interrupt.enabled1 |= mask;
+ ldsr_interrupt.mask1 |= mask;
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+}
+
+/*
+ * ldsr_disable_vector()
+ * The LDSR implements an interrupt controller and has a local (to the
+ * LDSR) copy of its interrupt mask.
+ */
+void ldsr_disable_vector(unsigned int vector)
+{
+ unsigned int mask;
+
+ if (vector < 32) {
+ mask = ~(1 << vector);
+ ldsr_lock_acquire();
+ ldsr_interrupt.enabled0 &= mask;
+ ldsr_interrupt.mask0 &= mask;
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+ return;
+ }
+
+ mask = ~(1 << (vector - 32));
+ ldsr_lock_acquire();
+ ldsr_interrupt.enabled1 &= mask;
+ ldsr_interrupt.mask1 &= mask;
+ ldsr_lock_release();
+ thread_resume(ldsr_tid);
+}
+
+/*
+ * ldsr_get_threadid()
+ * Return the threadid of the LDSR thread.
+ */
+thread_t ldsr_get_threadid(void)
+{
+ return ldsr_tid;
+}
+
+/*
+ * ldsr_set_trap_irq()
+ * Save away the trap Soft IRQ
+ *
+ * See the per thread lock suspend code above for an explination.
+ */
+void ldsr_set_trap_irq(unsigned int irq)
+{
+ ldsr_trap_irq = irq;
+ ldsr_trap_irq_mask = (1 << irq);
+ ldsr_suspend_mask |= ldsr_trap_irq_mask;
+}
+
+/*
+ * ldsr_init()
+ * Initialize the LDSR (Interrupt Controller)
+ */
+void ldsr_init(void)
+{
+#if defined(CONFIG_IRQSTACKS)
+ int i;
+ union irq_ctx *icp;
+#endif
+
+ void *stack_high = (void *)ldsr_stack_space;
+ stack_high += sizeof(ldsr_stack_space);
+ stack_high -= 8;
+
+
+ /*
+ * Obtain a soft IRQ to use
+ */
+ if (irq_soft_alloc(&ldsr_soft_irq) < 0) {
+ panic("no software IRQ is available\n");
+ return;
+ }
+ ldsr_soft_irq_mask |= (1 << ldsr_soft_irq);
+ ldsr_suspend_mask |= ldsr_soft_irq_mask;
+
+ /*
+ * Now allocate and start the LDSR thread.
+ */
+ ldsr_tid = thread_alloc();
+ if (ldsr_tid < 0) {
+ panic("no thread available to run LDSR");
+ return;
+ }
+
+#if defined(CONFIG_IRQSTACKS)
+ /*
+ * Initialize the per-cpu irq thread_info structure that
+ * is at the top of each per-cpu irq stack.
+ */
+ icp = (union irq_ctx *)
+ (((unsigned long)percpu_irq_stacks + (THREAD_SIZE - 1)) & ~(THREAD_SIZE - 1));
+ for (i = 0; i < NR_CPUS; i++) {
+ struct thread_info *ti = &(icp->tinfo);
+ ti->task = NULL;
+ ti->exec_domain = NULL;
+ ti->cpu = i;
+ ti->preempt_count = 0;
+ ti->interrupt_nesting = 0;
+ percpu_irq_ctxs[i] = icp++;
+ }
+#endif
+ thread_start(ldsr_tid, ldsr_thread, NULL,
+ stack_high, THREAD_TYPE_NORMAL);
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/module.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/module.c
new file mode 100644
index 000000000..3d29dc2b8
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/module.c
@@ -0,0 +1,463 @@
+/*
+ * arch/ubicom32/kernel/module.c
+ * Ubicom32 architecture loadable module support.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/moduleloader.h>
+#include <linux/bug.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/ocm-alloc.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+static void _module_free_ocm(struct module *mod)
+{
+ printk(KERN_INFO "module arch cleanup %s: OCM instruction memory free "
+ " of %d @%p\n", mod->name, mod->arch.ocm_inst_size,
+ mod->arch.ocm_inst);
+
+ if (mod->arch.ocm_inst) {
+ ocm_inst_free(mod->arch.ocm_inst);
+ mod->arch.ocm_inst = 0;
+ mod->arch.ocm_inst_size = 0;
+ }
+}
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+
+ /*
+ * This is expected to be final module free, use this to prune the
+ * ocm
+ */
+ if (module_region && module_region == mod->module_core)
+ _module_free_ocm(mod);
+
+}
+
+/*
+ * module_frob_arch_sections()
+ * Called from kernel/module.c allowing arch specific handling of
+ * sections/headers.
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ Elf_Shdr *s, *sechdrs_end;
+ void *ocm_inst = NULL;
+ int ocm_inst_size = 0;
+
+ /*
+ * Ubicom32 v3 and v4 are almost binary compatible but not completely.
+ * To be safe check that the module was compiled with the correct -march
+ * which is flags.
+ */
+#ifdef CONFIG_UBICOM32_V4
+ if ((hdr->e_flags & 0xFFFF) != EF_UBICOM32_V4) {
+ printk(KERN_WARNING "Module %s was not compiled for "
+ "ubicom32v4, elf_flags:%x,\n",
+ mod->name, hdr->e_flags);
+ return -ENOEXEC;
+ }
+#elif defined CONFIG_UBICOM32_V3
+ if ((hdr->e_flags & 0xFFFF) != EF_UBICOM32_V3) {
+ printk(KERN_WARNING "Module %s was not compiled for "
+ "ubicom32v3, elf_flags:%x\n",
+ mod->name, hdr->e_flags);
+ return -ENOEXEC;
+ }
+#else
+#error Unknown/Unsupported ubicom32 architecture.
+#endif
+
+ /*
+ * XXX: sechdrs are vmalloced in kernel/module.c
+ * and would be vfreed just after module is loaded,
+ * so we hack to keep the only information we needed
+ * in mod->arch to correctly free L1 I/D sram later.
+ * NOTE: this breaks the semantic of mod->arch structure.
+ */
+ sechdrs_end = sechdrs + hdr->e_shnum;
+ for (s = sechdrs; s < sechdrs_end; ++s) {
+ if (strncmp(".ocm_text", secstrings + s->sh_name, 9) == 0)
+ ocm_inst_size += s->sh_size;
+ }
+
+ if (!ocm_inst_size)
+ return 0;
+
+ ocm_inst = ocm_inst_alloc(ocm_inst_size, 0 /* internal */);
+ if (ocm_inst == NULL) {
+#ifdef CONFIG_OCM_MODULES_FALLBACK_TO_DDR
+ printk(KERN_WARNING
+ "module %s: OCM instruction memory allocation of %d"
+ "failed, fallback to DDR\n", mod->name, ocm_inst_size);
+ return 0;
+#else
+ printk(KERN_ERR
+ "module %s: OCM instruction memory allocation of %d"
+ "failed.\n", mod->name, ocm_inst_size);
+ return -ENOMEM;
+#endif
+ }
+
+ mod->arch.ocm_inst = ocm_inst;
+ mod->arch.ocm_inst_size = ocm_inst_size;
+
+ printk(KERN_INFO
+ "module %s: OCM instruction memory allocation of %d @%p\n",
+ mod->name, mod->arch.ocm_inst_size, mod->arch.ocm_inst);
+
+ for (s = sechdrs; s < sechdrs_end; ++s) {
+ if (strncmp(".ocm_text", secstrings + s->sh_name, 9) == 0) {
+ memcpy(ocm_inst, (void *)s->sh_addr, s->sh_size);
+ s->sh_flags &= ~SHF_ALLOC;
+ s->sh_addr = (unsigned long)ocm_inst;
+ ocm_inst += s->sh_size;
+ }
+ }
+
+ return 0;
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ DEBUGP("Invalid Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ return -EINVAL;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ uint32_t insn;
+
+ DEBUGP("Applying relocate_add section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ uint32_t v;
+ const int elf32_rtype = ELF32_R_TYPE(rel[i].r_info);
+
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+
+ v = rel[i].r_addend + sym->st_value;
+
+
+ switch (elf32_rtype) {
+ case R_UBICOM32_32:
+ {
+ /*
+ * Store the 32 bit relocation as is.
+ */
+ *location = v;
+ break;
+ }
+ case R_UBICOM32_HI24:
+ {
+ /*
+ * 24 bit relocation that is part of the MOVEAI
+ * instruction. The 24 bits come from bits 7 - 30 of the
+ * relocation. Theses bits eventually get split into 2
+ * fields in the instruction encoding.
+ *
+ * - Bits 7 - 27 of the relocation are encoded into bits
+ * 0 - 20 of the instruction.
+ *
+ * - Bits 28 - 30 of the relocation are encoded into
+ * bit 24 - 26 of the instruction.
+ */
+ uint32_t valid24 = (v >> 7) & 0xffffff;
+ insn = *location;
+
+ insn &= ~(0x1fffff | (0x7 << 24));
+ insn |= (valid24 & 0x1fffff);
+ insn |= ((valid24 & 0xe00000) << 3);
+ *location = insn;
+ }
+ break;
+ case R_UBICOM32_LO7_S:
+ case R_UBICOM32_LO7_2_S:
+ case R_UBICOM32_LO7_4_S:
+ {
+ /*
+ * Bits 0 - 6 of the relocation are encoded into the
+ * 7bit unsigned immediate fields of the SOURCE-1 field
+ * of the instruction. The immediate value is left
+ * shifted by (0, 1, 2) based on the operand size.
+ */
+ uint32_t valid7 = v & 0x7f;
+ insn = *location;
+
+ if (elf32_rtype == R_UBICOM32_LO7_2_S) {
+ valid7 >>= 1;
+ } else if (elf32_rtype == R_UBICOM32_LO7_4_S) {
+ valid7 >>= 2;
+ }
+
+ insn &= ~(0x1f | (0x3 << 8));
+ insn |= (valid7 & 0x1f);
+ insn |= ((valid7 & 0x60) << 3);
+ *location = insn;
+ }
+ break;
+ case R_UBICOM32_LO7_D:
+ case R_UBICOM32_LO7_2_D:
+ case R_UBICOM32_LO7_4_D:
+ {
+ /*
+ * Bits 0 - 6 of the relocation are encoded into the
+ * 7bit unsigned immediate fields of the DESTINATION
+ * field of the instruction. The immediate value is
+ * left shifted by (0, 1, 2) based on the operand size.
+ */
+ uint32_t valid7 = v & 0x7f;
+ insn = *location;
+
+ if (elf32_rtype == R_UBICOM32_LO7_2_D) {
+ valid7 >>= 1;
+ } else if (elf32_rtype == R_UBICOM32_LO7_4_D) {
+ valid7 >>= 2;
+ }
+
+ insn &= ~((0x1f | (0x3 << 8)) << 16);
+ insn |= ((valid7 & 0x1f) << 16);
+ insn |= ((valid7 & 0x60) << 19);
+ *location = insn;
+ }
+ break;
+ case R_UBICOM32_LO7_CALLI:
+ case R_UBICOM32_LO16_CALLI:
+ {
+ /*
+ * Extract the offset for a CALLI instruction. The
+ * offsets can be either 7 bits or 18 bits. Since all
+ * instructions in ubicom32 architecture are at work
+ * aligned addresses the truncated offset is right
+ * shifted by 2 before being encoded in the instruction.
+ */
+ uint32_t val;
+ if (elf32_rtype == R_UBICOM32_LO7_CALLI) {
+ val = v & 0x7f;
+ } else {
+ val = v & 0x3ffff;
+ }
+
+ val >>= 2;
+
+ insn = *location;
+
+ insn &= ~0x071f071f;
+ insn |= (val & 0x1f) << 0;
+ val >>= 5;
+ insn |= (val & 0x07) << 8;
+ val >>= 3;
+ insn |= (val & 0x1f) << 16;
+ val >>= 5;
+ insn |= (val & 0x07) << 24;
+ *location = insn;
+ }
+ break;
+ case R_UBICOM32_24_PCREL:
+ {
+ /*
+ * Extract 26 bit signed PC relative offset for CALL
+ * instructions. Since instruction addresses are word
+ * aligned the offset is right shited by 2 before
+ * encoding into instruction.
+ */
+ int32_t val = v - (int32_t)location;
+
+ /*
+ * Check that the top 7 bits are all equal to the sign
+ * bit (26), i.e all 0's or all 1's. If they are not then
+ * the absolute difference is greater than 25 bits.
+ */
+ if (((uint32_t)val & 0xFE000000) != 0xFE000000 &&
+ ((uint32_t)val & 0xFE000000) != 0x0) {
+ /*
+ * The relocation is beyond our addressable
+ * range with a 26 bit call.
+ */
+ printk(KERN_ERR "module %s: PC Relative "
+ "relocation out of range: "
+ "%u (%x->%x, %x)\n",
+ me->name, elf32_rtype,
+ v, (uint32_t) location, val);
+ return -ENOEXEC;
+ }
+
+ val = (val & 0x3ffffff) >> 2;
+ insn = *location;
+ insn = insn & 0xf8e00000;
+
+ insn |= (val >> 21) << 24;
+ insn |= (val & 0x1fffff);
+ *location = insn;
+ }
+ break;
+ case R_UBICOM32_LO16:
+ case R_UBICOM32_HI16:
+ {
+ /*
+ * 16 bit immediate value that is encoded into bit 0 -
+ * 15 of the instruction.
+ */
+ uint32_t val;
+
+ if (elf32_rtype == R_UBICOM32_LO16) {
+ val = v & 0xffff;
+ } else {
+ val = (v >> 16) & 0xffff;
+ }
+
+ insn = *location;
+ insn &= 0xffff0000;
+
+ insn |= val;
+ *location = insn;
+ }
+ break;
+ case R_UBICOM32_21_PCREL:
+ {
+ /*
+ * Extract 23 bit signed PC relative offset for JMP<cc>
+ * instructions. Since instruction addresses are word
+ * aligned the offset is right shited by 2 before
+ * encoding into instruction.
+ */
+ int32_t val = v - (int32_t)location;
+
+ val = (val & 0x7fffff) >> 2;
+ insn = *location;
+ insn = insn & 0xffe00000;
+
+ insn |= (val >> 21) << 24;
+ insn |= val;
+ *location = insn;
+ }
+ break;
+ default:
+ BUG();
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, elf32_rtype);
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+ unsigned int i, strindex = 0, symindex = 0;
+ char *secstrings;
+ int err;
+
+ err = module_bug_finalize(hdr, sechdrs, mod);
+ if (err)
+ return err;
+
+ if (!mod->arch.ocm_inst) {
+ /*
+ * No OCM code, so nothing more to do.
+ */
+ return 0;
+ }
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ }
+ }
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ if ((sechdrs[i].sh_type == SHT_RELA) &&
+ (strncmp(".rela.ocm_text",
+ secstrings + sechdrs[i].sh_name, 5 + 9) == 0)) {
+ err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
+ symindex, i, mod);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ module_bug_cleanup(mod);
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/os_node.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/os_node.c
new file mode 100644
index 000000000..9e014d5cb
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/os_node.c
@@ -0,0 +1,88 @@
+/*
+ * arch/ubicom32/kernel/os_node.c
+ * <TODO: Replace with short file description>
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include "linux/types.h"
+#include "linux/linkage.h"
+#include "linux/uts.h"
+#include "linux/utsrelease.h"
+#include "linux/version.h"
+#include <asm/ocm_size.h>
+#include <asm/devtree.h>
+#include <asm/ip5000.h>
+
+extern asmlinkage void *_start;
+
+/*
+ * This file provides static information to the boot code allowing it to decide
+ * if the os is compatible. Thus hopefully enabling the boot code to prevent
+ * accidentally booting a kernel that has no hope of running.
+ */
+struct os_node {
+ struct devtree_node node;
+ unsigned long version; /* Always 1 */
+ unsigned long entry_point;
+ const char os_name[32]; /* For diagnostic purposes only */
+ const char os_version_str[32];
+ unsigned long os_version_num;
+ unsigned long expected_ocm_code_start;/* OS Code */
+ unsigned long expected_ocm_data_end; /* OS Data */
+ unsigned long expected_ram_start;
+ unsigned long expected_ram_end;
+ unsigned long arch_version;
+ unsigned long expected_os_syscall_begin;
+ unsigned long expected_os_syscall_end;
+};
+
+
+extern void __os_syscall_begin;
+extern void __os_syscall_end;
+/*
+ * The os_node is only referenced by head.S and should never be modified at
+ * run-time.
+ */
+asmlinkage const struct os_node _os_node = {
+ .node = {
+ .next = NULL,
+ .name = { "OS" },
+ .magic = 0x10203040,
+ },
+ .version = 0x10002,
+ .entry_point = (unsigned long)&_start,
+#if APP_OCM_CODE_SIZE || APP_OCM_DATA_SIZE
+ .expected_ocm_code_start = OCMSTART + APP_OCM_CODE_SIZE,
+ .expected_ocm_data_end = OCMEND - APP_OCM_DATA_SIZE,
+#else
+ .expected_ocm_code_start = OCMEND,
+ .expected_ocm_data_end = OCMEND,
+#endif
+ .os_name = { UTS_SYSNAME },
+ .os_version_str = { UTS_RELEASE },
+ .os_version_num = LINUX_VERSION_CODE,
+ .expected_ram_start = KERNELSTART,
+ .expected_ram_end = SDRAMSTART + CONFIG_MIN_RAMSIZE,
+ .arch_version = UBICOM32_ARCH_VERSION,
+ .expected_os_syscall_begin = (unsigned long)&__os_syscall_begin,
+ .expected_os_syscall_end = (unsigned long)&__os_syscall_end,
+
+
+};
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/process.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/process.c
new file mode 100644
index 000000000..23872fed0
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/process.c
@@ -0,0 +1,634 @@
+/*
+ * arch/ubicom32/kernel/process.c
+ * Ubicom32 architecture-dependent process handling.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * Copyright (C) 1995 Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ * uClinux changes
+ * Copyright (C) 2000-2002, David McCullough <davidm@snapgear.com>
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/fs.h>
+#include <linux/pm.h>
+
+#include <linux/uaccess.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+#include <asm/ip5000.h>
+#include <asm/range-protect.h>
+
+#define DUMP_RANGE_REGISTER(REG, IDX) asm volatile ( \
+ " move.4 %0, "REG"_RANGE"IDX"_EN \n\t" \
+ " move.4 %1, "REG"_RANGE"IDX"_LO \n\t" \
+ " move.4 %2, "REG"_RANGE"IDX"_HI \n\t" \
+ : "=d"(en), "=d"(lo), "=d"(hi) \
+ ); \
+ printk(KERN_NOTICE REG"Range"IDX": en:%08x, range: %08x-%08x\n", \
+ (unsigned int)en, \
+ (unsigned int)lo, \
+ (unsigned int)hi)
+
+asmlinkage void ret_from_fork(void);
+
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+/* machine-dependent / hardware-specific power functions */
+void (*mach_reset)(void);
+void (*mach_halt)(void);
+void (*mach_power_off)(void);
+
+/*
+ * cpu_idle()
+ * The idle thread.
+ *
+ * Our idle loop suspends and is woken up by a timer interrupt.
+ */
+void cpu_idle(void)
+{
+ while (1) {
+ local_irq_disable();
+ while (!need_resched()) {
+ local_irq_enable();
+ thread_suspend();
+ local_irq_disable();
+ }
+ local_irq_enable();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
+/*
+ * dump_fpu()
+ *
+ * Fill in the fpu structure for a core dump. (just a stub as we don't have
+ * an fpu)
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
+{
+ return 1;
+}
+
+/*
+ * machine_restart()
+ * Resets the system.
+ */
+void machine_restart(char *__unused)
+{
+ /*
+ * Disable all threads except myself. We can do this
+ * directly without needing to call smp_send_stop
+ * because we have a unique architecture where
+ * one thread can disable one or more other threads.
+ */
+ thread_disable_others();
+
+ /*
+ * Call the hardware-specific machine reset function.
+ */
+ if (mach_reset) {
+ mach_reset();
+ }
+
+ printk(KERN_EMERG "System Restarting\n");
+
+ /*
+ * Set watchdog to trigger (after 1ms delay) (12 Mhz is the fixed OSC)
+ */
+ UBICOM32_IO_TIMER->tkey = TIMER_TKEYVAL;
+ UBICOM32_IO_TIMER->wdcom = UBICOM32_IO_TIMER->mptval +
+ (12000000 / 1000);
+ UBICOM32_IO_TIMER->wdcfg = 0;
+ UBICOM32_IO_TIMER->tkey = 0;
+
+ /*
+ * Wait for watchdog
+ */
+ asm volatile (
+ " move.4 MT_EN, #0 \n\t"
+ " pipe_flush 0 \n\t"
+ );
+
+ local_irq_disable();
+ for (;;) {
+ thread_suspend();
+ }
+}
+
+/*
+ * machine_halt()
+ * Halt the machine.
+ *
+ * Similar to machine_power_off, but don't shut off power. Add code
+ * here to freeze the system for e.g. post-mortem debug purpose when
+ * possible. This halt has nothing to do with the idle halt.
+ */
+void machine_halt(void)
+{
+ /*
+ * Disable all threads except myself. We can do this
+ * directly without needing to call smp_send_stop
+ * because we have a unique architecture where
+ * one thread can disable one or more other threads.
+ */
+ thread_disable_others();
+
+ /*
+ * Call the hardware-specific machine halt function.
+ */
+ if (mach_halt) {
+ mach_halt();
+ }
+
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ for (;;) {
+ thread_suspend();
+ }
+}
+
+/*
+ * machine_power_off()
+ * Turn the power off, if a power off handler is defined, otherwise, spin
+ * endlessly.
+ */
+void machine_power_off(void)
+{
+ /*
+ * Disable all threads except myself. We can do this
+ * directly without needing to call smp_send_stop
+ * because we have a unique architecture where
+ * one thread can disable one or more other threads.
+ */
+ thread_disable_others();
+
+ /*
+ * Call the hardware-specific machine power off function.
+ */
+ if (mach_power_off) {
+ mach_power_off();
+ }
+
+ printk(KERN_EMERG "System Halted, OK to turn off power\n");
+ local_irq_disable();
+ for (;;) {
+ thread_suspend();
+ }
+}
+
+/*
+ * address_is_valid()
+ * check if an address is valid -- (for read access)
+ */
+static bool address_is_valid(const void *address)
+{
+ int addr = (int)address;
+ unsigned long socm, eocm, sdram, edram;
+
+ if (addr & 3)
+ return false;
+
+ processor_ocm(&socm, &eocm);
+ processor_dram(&sdram, &edram);
+ if (addr >= socm && addr < eocm)
+ return true;
+
+ if (addr >= sdram && addr < edram)
+ return true;
+
+ return false;
+}
+
+/*
+ * vma_path_name_is_valid()
+ * check if path_name of a vma is a valid string
+ */
+static bool vma_path_name_is_valid(const char *str)
+{
+#define MAX_NAME_LEN 256
+ int i = 0;
+ if (!address_is_valid(str))
+ return false;
+
+ for (; i < MAX_NAME_LEN; i++, str++) {
+ if (*str == '\0')
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * show_vmas()
+ * show vma info of a process
+ */
+void show_vmas(struct task_struct *task)
+{
+#ifdef CONFIG_DEBUG_VERBOSE
+#define UBICOM32_MAX_VMA_COUNT 1024
+
+ struct vm_area_struct *vma;
+ struct file *file;
+ char *name = "";
+ int flags, loop = 0;
+
+ printk(KERN_NOTICE "Start of vma list\n");
+
+ if (!address_is_valid(task) || !address_is_valid(task->mm))
+ goto error;
+
+ vma = task->mm->mmap;
+ while (vma) {
+ if (!address_is_valid(vma))
+ goto error;
+
+ flags = vma->vm_flags;
+ file = vma->vm_file;
+
+ if (file) {
+ /* seems better to use dentry op here, but sanity check is easier this way */
+ if (!address_is_valid(file) || !address_is_valid(file->f_path.dentry) || !vma_path_name_is_valid(file->f_path.dentry->d_name.name))
+ goto error;
+
+ name = (char *)file->f_path.dentry->d_name.name;
+ }
+
+ /* Similar to /proc/pid/maps format */
+ printk(KERN_NOTICE "%08lx-%08lx %c%c%c%c %08lx %s\n",
+ vma->vm_start,
+ vma->vm_end,
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+ flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
+ vma->vm_pgoff << PAGE_SHIFT,
+ name);
+
+ vma = vma->vm_next;
+
+ if (loop++ > UBICOM32_MAX_VMA_COUNT)
+ goto error;
+ }
+
+ printk(KERN_NOTICE "End of vma list\n");
+ return;
+
+error:
+ printk(KERN_NOTICE "\nCorrupted vma list, abort!\n");
+#endif
+}
+
+/*
+ * show_regs()
+ * Print out all of the registers.
+ */
+void show_regs(struct pt_regs *regs)
+{
+ unsigned int i;
+ unsigned int en, lo, hi;
+
+ printk(KERN_NOTICE "regs: %p, tid: %d\n",
+ (void *)regs,
+ thread_get_self());
+
+ printk(KERN_NOTICE "pc: %08x, previous_pc: %08x\n\n",
+ (unsigned int)regs->pc,
+ (unsigned int)regs->previous_pc);
+
+ printk(KERN_NOTICE "Data registers\n");
+ for (i = 0; i < 16; i++) {
+ printk("D%02d: %08x, ", i, (unsigned int)regs->dn[i]);
+ if ((i % 4) == 3) {
+ printk("\n");
+ }
+ }
+ printk("\n");
+
+ printk(KERN_NOTICE "Address registers\n");
+ for (i = 0; i < 8; i++) {
+ printk("A%02d: %08x, ", i, (unsigned int)regs->an[i]);
+ if ((i % 4) == 3) {
+ printk("\n");
+ }
+ }
+ printk("\n");
+
+ printk(KERN_NOTICE "acc0: %08x-%08x, acc1: %08x-%08x\n",
+ (unsigned int)regs->acc0[1],
+ (unsigned int)regs->acc0[0],
+ (unsigned int)regs->acc1[1],
+ (unsigned int)regs->acc1[0]);
+
+ printk(KERN_NOTICE "mac_rc16: %08x, source3: %08x\n",
+ (unsigned int)regs->mac_rc16,
+ (unsigned int)regs->source3);
+
+ printk(KERN_NOTICE "inst_cnt: %08x, csr: %08x\n",
+ (unsigned int)regs->inst_cnt,
+ (unsigned int)regs->csr);
+
+ printk(KERN_NOTICE "int_mask0: %08x, int_mask1: %08x\n",
+ (unsigned int)regs->int_mask0,
+ (unsigned int)regs->int_mask1);
+
+ /*
+ * Dump range registers
+ */
+ DUMP_RANGE_REGISTER("I", "0");
+ DUMP_RANGE_REGISTER("I", "1");
+ DUMP_RANGE_REGISTER("I", "2");
+ DUMP_RANGE_REGISTER("I", "3");
+ DUMP_RANGE_REGISTER("D", "0");
+ DUMP_RANGE_REGISTER("D", "1");
+ DUMP_RANGE_REGISTER("D", "2");
+ DUMP_RANGE_REGISTER("D", "3");
+ DUMP_RANGE_REGISTER("D", "4");
+
+ printk(KERN_NOTICE "frame_type: %d, nesting_level: %d, thread_type %d\n\n",
+ (int)regs->frame_type,
+ (int)regs->nesting_level,
+ (int)regs->thread_type);
+}
+
+/*
+ * kernel_thread_helper()
+ * On execution d0 will be 0, d1 will be the argument to be passed to the
+ * kernel function. d2 contains the kernel function that needs to get
+ * called. d3 will contain address to do_exit which need to get moved
+ * into a5. On return from fork the child thread d0 will be 0. We call
+ * this dummy function which in turn loads the argument
+ */
+asmlinkage void kernel_thread_helper(void);
+
+/*
+ * kernel_thread()
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.dn[1] = (unsigned long)arg;
+ regs.dn[2] = (unsigned long)fn;
+ regs.dn[3] = (unsigned long)do_exit;
+ regs.an[5] = (unsigned long)kernel_thread_helper;
+ regs.pc = (unsigned long)kernel_thread_helper;
+ regs.nesting_level = 0;
+ regs.thread_type = KERNEL_THREAD;
+
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
+ 0, &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * flush_thread()
+ * XXX todo
+ */
+void flush_thread(void)
+{
+ /* XXX todo */
+}
+
+/*
+ * sys_fork()
+ * Not implemented on no-mmu.
+ */
+asmlinkage int sys_fork(struct pt_regs *regs)
+{
+ /* fork almost works, enough to trick you into looking elsewhere :-( */
+ return -EINVAL;
+}
+
+/*
+ * sys_vfork()
+ * By the time we get here, the non-volatile registers have also been saved
+ * on the stack. We do some ugly pointer stuff here.. (see also copy_thread
+ * which does context copy).
+ */
+asmlinkage int sys_vfork(struct pt_regs *regs)
+{
+ unsigned long old_sp = regs->an[7];
+ unsigned long old_a5 = regs->an[5];
+ unsigned long old_return_address;
+ long do_fork_return;
+
+ /*
+ * Read the old retrun address from the stack.
+ */
+ if (copy_from_user(&old_return_address,
+ (void *)old_sp, sizeof(unsigned long))) {
+ force_sig(SIGSEGV, current);
+ return 0;
+ }
+
+ /*
+ * Pop the vfork call frame by setting a5 and pc to the old_return
+ * address and incrementing the stack pointer by 4.
+ */
+ regs->an[5] = old_return_address;
+ regs->pc = old_return_address;
+ regs->an[7] += 4;
+
+ do_fork_return = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+ regs->an[7], regs, 0, NULL, NULL);
+
+ /*
+ * Now we have to test if the return code is an error. If it is an error
+ * then restore the frame and we will execute error processing in user
+ * space. Other wise the child and the parent will return to the correct
+ * places.
+ */
+ if ((unsigned long)(do_fork_return) >= (unsigned long)(-125)) {
+ /*
+ * Error case. We need to restore the frame.
+ */
+ regs->an[5] = old_a5;
+ regs->pc = old_a5;
+ regs->an[7] = old_sp;
+ }
+
+ return do_fork_return;
+}
+
+/*
+ * sys_clone()
+ * creates a child thread.
+ */
+asmlinkage int sys_clone(unsigned long clone_flags,
+ unsigned long newsp,
+ struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->an[7];
+ return do_fork(clone_flags, newsp, regs, 0,
+ NULL, NULL);
+}
+
+/*
+ * copy_thread()
+ * low level thread copy, only used by do_fork in kernel/fork.c
+ */
+int copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long topstk,
+ struct task_struct *p, struct pt_regs *regs)
+
+{
+ struct pt_regs *childregs;
+
+ childregs = (struct pt_regs *)
+ (task_stack_page(p) + THREAD_SIZE - 8) - 1;
+
+ *childregs = *regs;
+
+ /*
+ * Set return value for child to be 0.
+ */
+ childregs->dn[0] = 0;
+
+ if (usp)
+ childregs->an[7] = usp;
+ else
+ childregs->an[7] = (unsigned long)task_stack_page(p) +
+ THREAD_SIZE - 8;
+
+ /*
+ * Set up the switch_to frame to return to "ret_from_fork"
+ */
+ p->thread.a5 = (unsigned long)ret_from_fork;
+ p->thread.sp = (unsigned long)childregs;
+
+ return 0;
+}
+
+/*
+ * sys_execve()
+ * executes a new program.
+ */
+asmlinkage int sys_execve(char *name, char **argv,
+ char **envp, struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ lock_kernel();
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+ asm (" .global sys_execve_complete\n"
+ " sys_execve_complete:");
+out:
+ unlock_kernel();
+ return error;
+}
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ return tsk->thread.a5;
+}
+
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long pc;
+
+ /*
+ * If we don't have a process, or it is not the current
+ * one or not RUNNING, it makes no sense to ask for a
+ * wchan.
+ */
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ /*
+ * TODO: If the process is in the middle of schedule, we
+ * are supposed to do something different but for now we
+ * will return the same thing in both situations.
+ */
+ pc = thread_saved_pc(p);
+ if (in_sched_functions(pc))
+ return pc;
+ return pc;
+}
+
+
+/*
+ * Infrequently used interface to dump task registers to core files.
+ */
+int dump_task_regs(struct task_struct *task, elf_gregset_t *elfregs)
+{
+ struct pt_regs *regs = task_pt_regs(task);
+ *(struct pt_regs *)elfregs = *regs;
+
+ return 1;
+}
+
+/*
+ * __switch_to is the function that implements the contex save and
+ * switch within the kernel. Since this is a function call very few
+ * registers have to be saved to pull this off. d0 holds prev and we
+ * want to preserve it. prev_switch is a pointer to task->thread
+ * structure. This is where we will save the register state. next_switch
+ * is pointer to the next task's thread structure that holds the
+ * registers.
+ */
+asmlinkage void *__switch_to(struct task_struct *prev,
+ struct thread_struct *prev_switch,
+ struct thread_struct *next_switch)
+ __attribute__((naked));
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/processor.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/processor.c
new file mode 100644
index 000000000..55d1bdf62
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/processor.c
@@ -0,0 +1,348 @@
+/*
+ * arch/ubicom32/kernel/processor.c
+ * Ubicom32 architecture processor info implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/types.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/cpu.h>
+#include <asm/devtree.h>
+#include <asm/processor.h>
+#include <asm/cpu.h>
+#include <asm/ocm_size.h>
+
+struct procnode {
+ struct devtree_node dn;
+ unsigned int threads;
+ unsigned int timers;
+ unsigned int frequency;
+ unsigned int ddr_frequency;
+ unsigned int interrupt0;
+ unsigned int interrupt1;
+ void *socm;
+ void *eocm;
+ void *sdram;
+ void *edram;
+ unsigned int arch_version;
+ void *os_syscall_begin;
+ void *os_syscall_end;
+};
+
+struct procnode *pn;
+
+/*
+ * show_processorinfo()
+ * Print the actual processor information.
+ */
+static void show_processorinfo(struct seq_file *m)
+{
+ char *cpu, *mmu, *fpu;
+ unsigned int clockfreq;
+ unsigned int chipid;
+
+ cpu = CPU;
+ mmu = "none";
+ fpu = "none";
+
+ asm volatile (
+ "move.4 %0, CHIP_ID \n\t"
+ : "=r" (chipid)
+ );
+
+ /*
+ * General Processor Information.
+ */
+ seq_printf(m, "Vendor:\t\t%s\n", "Ubicom");
+ seq_printf(m, "CPU:\t\t%s\n", cpu);
+ seq_printf(m, "MMU:\t\t%s\n", mmu);
+ seq_printf(m, "FPU:\t\t%s\n", fpu);
+ seq_printf(m, "Arch:\t\t%hx\n", chipid >> 16);
+ seq_printf(m, "Rev:\t\t%hx\n", (chipid & 0xffff));
+
+ /*
+ * Now compute the clock frequency in Mhz.
+ */
+ clockfreq = processor_frequency();
+ seq_printf(m, "Clock Freq:\t%u.0 MHz\n",
+ clockfreq / 1000000);
+ seq_printf(m, "DDR Freq:\t%u.0 MHz\n",
+ pn ? pn->ddr_frequency / 1000000 : 0);
+ seq_printf(m, "BogoMips:\t%lu.%02lu\n",
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100);
+ seq_printf(m, "Calibration:\t%lu loops\n", (loops_per_jiffy * HZ));
+}
+
+/*
+ * show_cpuinfo()
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long n = (unsigned long)v - 1;
+
+#if defined(CONFIG_SMP)
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, n);
+#endif
+
+ /*
+ * Print the general processor information on the first
+ * call.
+ */
+ if (n == 0) {
+ show_processorinfo(m);
+ }
+
+#if defined(CONFIG_SMP)
+ /*
+ * For each hwthread, print if this hwthread is running Linux
+ * or is an I/O thread.
+ */
+ if (cpu_isset(n, cpu_online_map)) {
+ seq_printf(m, "cpu[%02lu]:\tthread id - %lu\n", n, p->tid);
+ } else {
+ seq_printf(m, "cpu[%02lu]:\toff-line\n", n);
+ }
+#endif
+ return 0;
+
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < NR_CPUS ? (void *)(i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
+/*
+ * processor_timers()
+ * Returns the timers available to Linux.
+ */
+unsigned int processor_timers(void)
+{
+ if (!pn) {
+ return 0;
+ }
+ return pn->timers;
+}
+
+/*
+ * processor_threads()
+ * Returns the threads available to Linux.
+ */
+unsigned int processor_threads(void)
+{
+ if (!pn) {
+ return 0;
+ }
+ return pn->threads;
+}
+
+/*
+ * processor_frequency()
+ * Returns the frequency of the system clock.
+ */
+unsigned int processor_frequency(void)
+{
+ if (!pn) {
+ return 0;
+ }
+ return pn->frequency;
+}
+EXPORT_SYMBOL(processor_frequency);
+
+/*
+ * processor_interrupts()
+ * Return the interrupts that are setup at boot time.
+ */
+int processor_interrupts(unsigned int *int0, unsigned int *int1)
+{
+ if (!pn) {
+ return -EFAULT;
+ }
+
+ if (int0) {
+ *int0 = pn->interrupt0;
+ }
+
+ if (int1) {
+ *int1 = pn->interrupt1;
+ }
+ return 0;
+}
+
+/*
+ * processor_ocm()
+ * Returns the start and end of OCM available to Linux.
+ */
+void processor_ocm(unsigned long *socm, unsigned long *eocm)
+{
+ *socm = (unsigned long)pn->socm;
+ *eocm = (unsigned long)pn->eocm;
+}
+
+/*
+ * processor_dram()
+ * Returns the start and end of dram available to Linux.
+ */
+void processor_dram(unsigned long *sdram, unsigned long *edram)
+{
+ *sdram = (unsigned long)pn->sdram;
+ *edram = (unsigned long)pn->edram;
+}
+
+/*
+ * processor_validate_failed()
+ * Returns the dram available to Linux.
+ */
+static noinline void processor_validate_failed(void)
+{
+ while (1)
+ THREAD_STALL;
+}
+
+/*
+ * processor_validate()
+ * Validates the procnode against limitations of this link/built.
+ */
+static void processor_validate(void)
+{
+ void *dram_start = (void *)(KERNELSTART);
+ void *dram_end = (void *)(SDRAMSTART + CONFIG_MIN_RAMSIZE);
+#if APP_OCM_CODE_SIZE || APP_OCM_DATA_SIZE
+ void *ocm_code_start = (void *)(OCMSTART + APP_OCM_CODE_SIZE);
+ void *ocm_data_end = (void *)(OCMEND - APP_OCM_DATA_SIZE);
+#endif
+ extern void __os_syscall_begin;
+ extern void __os_syscall_end;
+ int proc_node_valid = 1;
+
+ if (!pn) {
+ printk(KERN_ERR "ERROR: processor node not found\n");
+ goto error;
+ }
+
+
+ if (dram_start < pn->sdram || dram_end > pn->edram) {
+ printk(KERN_ERR "ERROR: processor dram mismatch %p-%p "
+ "available but we are expecting %p-%p\n",
+ pn->sdram, pn->edram, dram_start, dram_end);
+ proc_node_valid = 0;
+ } else {
+ printk(KERN_ERR "processor dram %p-%p, expecting %p-%p\n",
+ pn->sdram, pn->edram, dram_start, dram_end);
+ }
+ if (&__os_syscall_begin < pn->os_syscall_begin ||
+ &__os_syscall_end > pn->os_syscall_end) {
+ printk(KERN_ERR "ERROR: processor syscall area mismatch "
+ "%p-%p available but we are expecting %p-%p\n",
+ pn->os_syscall_begin, pn->os_syscall_end,
+ &__os_syscall_begin, &__os_syscall_end);
+ proc_node_valid = 0;
+ } else {
+ printk(KERN_ERR "processor dram %p-%p, expecting %p-%p\n",
+ pn->sdram, pn->edram, dram_start, dram_end);
+ }
+#if APP_OCM_CODE_SIZE || APP_OCM_DATA_SIZE
+ if (ocm_code_start < pn->socm || ocm_data_end > pn->eocm) {
+ printk(KERN_ERR "ERROR: processor ocm mismatch %p-%p "
+ "available but we are expecting %p-%p\n",
+ pn->socm, pn->eocm, ocm_code_start, ocm_data_end);
+ proc_node_valid = 0;
+ } else {
+ printk(KERN_INFO "processor ocm %p-%p, expecting %p-%p\n",
+ pn->socm, pn->eocm, ocm_code_start, ocm_data_end);
+
+ }
+#endif
+
+ if (UBICOM32_ARCH_VERSION != pn->arch_version) {
+ printk(KERN_ERR "ERROR: processor arch mismatch, kernel"
+ "compiled for %d found %d\n",
+ UBICOM32_ARCH_VERSION, pn->arch_version);
+ proc_node_valid = 0;
+ }
+
+ if (proc_node_valid)
+ return;
+error:
+ processor_validate_failed();
+}
+
+void __init processor_init(void)
+{
+ /*
+ * If we do not have a trap node in the device tree, we leave the fault
+ * handling to the underlying hardware.
+ */
+ pn = (struct procnode *)devtree_find_node("processor");
+
+ processor_validate();
+
+ /*
+ * If necessary correct the initial range registers to cover the
+ * complete physical space
+ */
+ if (pn->edram > (void *)(SDRAMSTART + CONFIG_MIN_RAMSIZE)) {
+ printk(KERN_INFO "updating range registers for expanded dram\n");
+ asm volatile (
+ " move.4 D_RANGE1_HI, %0 \t\n"
+ " move.4 I_RANGE0_HI, %0 \t\n"
+#ifdef CONFIG_PROTECT_KERNEL
+ " move.4 D_RANGE2_HI, %0 \t\n"
+ " move.4 I_RANGE2_HI, %0 \t\n"
+#endif
+ : : "a"((unsigned long)pn->edram - 4)
+ );
+ }
+
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/ptrace.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/ptrace.c
new file mode 100644
index 000000000..18bb39e9d
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/ptrace.c
@@ -0,0 +1,275 @@
+/*
+ * arch/ubicom32/kernel/ptrace.c
+ * Ubicom32 architecture ptrace implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+
+/*
+ * ptrace_getregs()
+ *
+ * Get all user integer registers.
+ */
+static inline int ptrace_getregs(struct task_struct *task, void __user *uregs)
+{
+ struct pt_regs *regs = task_pt_regs(task);
+ return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
+}
+
+/*
+ * ptrace_get_reg()
+ *
+ * Get contents of register REGNO in task TASK.
+ */
+static unsigned long ptrace_get_reg(struct task_struct *task, int regno)
+{
+ if (regno < sizeof(struct pt_regs)) {
+ struct pt_regs *pt_regs = task_pt_regs(task);
+ return *(unsigned long *)((long) pt_regs + regno);
+ }
+
+ return -EIO;
+}
+
+/*
+ * ptrace_put_reg()
+ * Write contents of register REGNO in task TASK.
+ */
+static int ptrace_put_reg(struct task_struct *task, int regno,
+ unsigned long data)
+{
+ if (regno <= sizeof(struct pt_regs) && regno != PT_FRAME_TYPE) {
+ struct pt_regs *pt_regs = task_pt_regs(task);
+ *(unsigned long *)((long) pt_regs + regno) = data;
+ return 0;
+ }
+ return -EIO;
+}
+
+/*
+ * ptrace_disable_single_step()
+ * Disable Single Step
+ */
+static int ptrace_disable_single_step(struct task_struct *task)
+{
+ /*
+ * Single Step not yet implemented, so must always be disabled
+ */
+ return 0;
+}
+
+/*
+ * ptrace_disable()
+ * Make sure the single step bit is not set.
+ * Called by kernel/ptrace.c when detaching..
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ ptrace_disable_single_step(child);
+}
+
+/*
+ * arch_ptrace()
+ * architecture specific ptrace routine.
+ */
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+ int ret;
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if (((unsigned long) addr > PT_INTERP_FDPIC_LOADMAP)
+ || (addr & 3))
+ break;
+
+ tmp = 0; /* Default return condition */
+
+ ret = -EIO;
+ if (addr < sizeof(struct pt_regs)) {
+ tmp = ptrace_get_reg(child, addr);
+ } else if (addr == PT_TEXT_ADDR) {
+ tmp = child->mm->start_code;
+ } else if (addr == PT_TEXT_END_ADDR) {
+ tmp = child->mm->end_code;
+ } else if (addr == PT_DATA_ADDR) {
+ tmp = child->mm->start_data;
+ } else if (addr == PT_EXEC_FDPIC_LOADMAP) {
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ tmp = child->mm->context.exec_fdpic_loadmap;
+#endif
+ } else if (addr == PT_INTERP_FDPIC_LOADMAP) {
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ tmp = child->mm->context.interp_fdpic_loadmap;
+#endif
+ } else {
+ break;
+ }
+
+ ret = put_user(tmp, (unsigned long *)data);
+ break;
+ }
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+
+ /*
+ * If we just changed some code so we need to
+ * correct the caches
+ */
+ if (request == PTRACE_POKETEXT && ret == 0) {
+ flush_icache_range(addr, addr + 4);
+ }
+ break;
+
+ case PTRACE_POKEUSR: /* write the word at location addr
+ * in the USER area */
+ ret = -EIO;
+
+ if (((unsigned long) addr > PT_DATA_ADDR) || (addr & 3))
+ break;
+
+ if (addr < sizeof(struct pt_regs)) {
+ ret = ptrace_put_reg(child, addr, data);
+ }
+ break;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from)
+ * syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+
+ ret = -EIO;
+ if (!valid_signal(data))
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+ ptrace_disable_single_step(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ /*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to exit.
+ */
+ case PTRACE_KILL: {
+ ret = 0;
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+ ptrace_disable_single_step(child);
+ wake_up_process(child);
+ break;
+ }
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PTRACE_GETREGS: /* Get all gp regs from the child. */
+ ptrace_getregs(child, (unsigned long *)data);
+ ret = 0;
+ break;
+
+ case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+ int i;
+ unsigned long tmp;
+ int count = sizeof(struct pt_regs) / sizeof(unsigned long);
+ for (i = 0; i < count; i++) {
+ if (get_user(tmp, (unsigned long *) data)) {
+ ret = -EFAULT;
+ break;
+ }
+ ptrace_put_reg(child, sizeof(unsigned long) * i, tmp);
+ data += sizeof(long);
+ }
+ ret = 0;
+ break;
+ }
+
+ default:
+ return ptrace_request(child, request, addr, data);
+ break;
+ }
+ return ret;
+}
+/*
+ * syscall_trace
+ *
+ * called by syscall enter/exit when the TIF_SYSCALL_TRACE bit is set.
+ */
+asmlinkage void syscall_trace(void)
+{
+ struct task_struct *cur = current;
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(cur->ptrace & PT_PTRACED))
+ return;
+ ptrace_notify(SIGTRAP | ((cur->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (cur->exit_code) {
+ send_sig(cur->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/semaphore.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/semaphore.c
new file mode 100644
index 000000000..d996ac2b3
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/semaphore.c
@@ -0,0 +1,159 @@
+/*
+ * arch/ubicom32/kernel/semaphore.c
+ * Ubicom32 architecture semaphore implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+/*
+ * Generic semaphore code. Buyer beware. Do your own
+ * specific changes in <asm/semaphore-helper.h>
+ */
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <asm/semaphore-helper.h>
+
+#ifndef CONFIG_RMW_INSNS
+spinlock_t semaphore_wake_lock;
+#endif
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ current->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ current->state = (task_state); \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __sched __down(struct semaphore *sem)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __sched __down_interruptible(struct semaphore *sem)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, current);
+ if (ret) {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore *sem)
+{
+ return waking_non_zero_trylock(sem);
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/setup.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/setup.c
new file mode 100644
index 000000000..7357f4ee3
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/setup.c
@@ -0,0 +1,194 @@
+/*
+ * arch/ubicom32/kernel/setup.c
+ * Ubicom32 architecture-dependent parts of system setup.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * Copyright (C) 1999-2007 Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1998,1999 D. Jeff Dionne <jeff@uClinux.org>
+ * Copyleft ()) 2000 James D. Schettine {james@telos-systems.com}
+ * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
+ * Copyright (C) 1995 Hamish Macdonald
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
+ * Copyright (C) 2001 Lineo, Inc. <www.lineo.com>
+ * 68VZ328 Fixes/support Evan Stawnyczy <e@lineo.ca>
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+
+#include <asm/devtree.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/ubicom32-common.h>
+#include <asm/processor.h>
+#include <asm/bootargs.h>
+#include <asm/thread.h>
+
+unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+
+unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+#ifdef CONFIG_CMDLINE_BOOL
+static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+#endif
+
+extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
+
+/*
+ * setup_arch()
+ * Setup the architecture dependent portions of the system.
+ */
+void __init setup_arch(char **cmdline_p)
+{
+ int bootmap_size;
+ unsigned long ram_start;
+
+ processor_init();
+ bootargs_init();
+
+ /*
+ * Use the link for memory_start from the link and the processor
+ * node for memory_end.
+ */
+ memory_start = PAGE_ALIGN(((unsigned long)&_end));
+ processor_dram(&ram_start, &memory_end);
+
+ init_mm.start_code = (unsigned long) &_stext;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) 0;
+
+ /*
+ * bootexec copies the original default command line to end of memory.
+ * u-boot can modify it there (i.e. to enable network boot) and the
+ * kernel picks up the modified version.
+ *
+ * mainexec creates a `new default' command_line which is in the
+ * bootargs devnode. It is updated on every firmware update but
+ * not used at the moment.
+ */
+ strlcpy(boot_command_line, (char *)(memory_end - COMMAND_LINE_SIZE), COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ /* append boot loader cmdline to builtin */
+ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+#endif
+#endif
+
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ parse_early_param();
+
+ printk(KERN_INFO "%s Processor, Ubicom, Inc. <www.ubicom.com>\n", CPU);
+
+#if defined(DEBUG)
+ printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
+ "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
+ (int) &_sdata, (int) &_edata,
+ (int) &_sbss, (int) &_ebss);
+ printk(KERN_DEBUG "MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
+ (int) &_ebss, (int) memory_start,
+ (int) memory_start, (int) memory_end);
+#endif
+
+#ifdef DEBUG
+ if (strlen(*cmdline_p))
+ printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
+#endif
+
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+ /*
+ * If we have a device tree, see if we have the nodes we need.
+ */
+ if (devtree) {
+ devtree_print();
+ }
+
+ /*
+ * From the arm initialization comment:
+ *
+ * This doesn't seem to be used by the Linux memory manager any
+ * more, but is used by ll_rw_block. If we can get rid of it, we
+ * also get rid of some of the stuff above as well.
+ *
+ * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
+ * the system, not the maximum PFN.
+ */
+ max_pfn = max_low_pfn = (memory_end - PAGE_OFFSET) >> PAGE_SHIFT;
+
+ /*
+ * Give all the memory to the bootmap allocator, tell it to put the
+ * boot mem_map at the start of memory.
+ */
+ bootmap_size = init_bootmem_node(
+ NODE_DATA(0),
+ memory_start >> PAGE_SHIFT, /* map goes here */
+ PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */
+ memory_end >> PAGE_SHIFT);
+ /*
+ * Free the usable memory, we have to make sure we do not free
+ * the bootmem bitmap so we then reserve it after freeing it :-)
+ */
+ free_bootmem(memory_start, memory_end - memory_start);
+ reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
+
+ /*
+ * Get kmalloc into gear.
+ */
+ paging_init();
+
+ /*
+ * Fix up the thread_info structure, indicate this is a mainline Linux
+ * thread and setup the sw_ksp().
+ */
+ sw_ksp[thread_get_self()] = (unsigned int) current_thread_info();
+ thread_set_mainline(thread_get_self());
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/signal.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/signal.c
new file mode 100644
index 000000000..f6ccbe3a7
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/signal.c
@@ -0,0 +1,458 @@
+/*
+ * arch/ubicom32/kernel/signal.c
+ * Ubicom32 architecture signal handling implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Linux/m68k support by Hamish Macdonald
+ * 68060 fixes by Jesper Skov
+ * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
+ * mathemu support by Roman Zippel
+ * ++roman (07/09/96): implemented signal stacks
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ *
+ * mathemu support by Roman Zippel
+ * (Note: fpstate in the signal context is completely ignored for the emulator
+ * and the internal floating point format is put on stack)
+ *
+ * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
+ * Atari :-) Current limitation: Only one sigstack can be active at one time.
+ * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
+ * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
+ * signal handlers!
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/tty.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * asm signal return handlers.
+ */
+void ret_from_user_signal(void);
+void ret_from_user_rt_signal(void);
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
+
+/*
+ * Common signal suspend implementation
+ */
+static int signal_suspend(sigset_t *saveset, struct pt_regs *regs)
+{
+ regs->dn[0] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (!do_signal(saveset, regs)) {
+ continue;
+ }
+ /*
+ * If the current frame type is a signal trampoline we are
+ * actually going to call the signal handler so we return the
+ * desired d0 as the return value.
+ */
+ if (regs->frame_type == UBICOM32_FRAME_TYPE_SIGTRAMP) {
+ return regs->dn[0];
+ }
+ return -EINTR;
+ }
+ /*
+ * Should never get here
+ */
+ BUG();
+ return 0;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
+{
+ old_sigset_t mask = regs->dn[0];
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ /*
+ * Call common handler
+ */
+ return signal_suspend(&saveset, regs);
+}
+
+asmlinkage int
+do_rt_sigsuspend(struct pt_regs *regs)
+{
+ sigset_t *unewset = (sigset_t *)regs->dn[0];
+ size_t sigsetsize = (size_t)regs->dn[1];
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ /*
+ * Call common handler
+ */
+ return signal_suspend(&saveset, regs);
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+do_sys_sigaltstack(struct pt_regs *regs)
+{
+ const stack_t *uss = (stack_t *) regs->dn[0];
+ stack_t *uoss = (stack_t *)regs->dn[1];
+ return do_sigaltstack(uss, uoss, regs->an[7]);
+}
+
+/*
+ * fdpic_func_descriptor describes sa_handler when the application is FDPIC
+ */
+struct fdpic_func_descriptor {
+ unsigned long text;
+ unsigned long GOT;
+};
+
+/*
+ * rt_sigframe is stored on the user stack immediately before (above)
+ * the signal handlers stack.
+ */
+struct rt_sigframe
+{
+ unsigned long syscall_number; /* This holds __NR_rt_sigreturn. */
+ unsigned long restore_all_regs; /* This field gets set to 1 if the frame
+ * type is TRAP or INTERRUPT. */
+ siginfo_t *info;
+ struct ucontext uc;
+ int sig;
+ void *pretcode;
+};
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+asmlinkage int do_sigreturn(unsigned long __unused)
+{
+ BUG();
+ return 0;
+}
+
+asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
+{
+ unsigned long usp = regs->an[7];
+ struct rt_sigframe *frame = (struct rt_sigframe *)(usp);
+ sigset_t set;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (copy_from_user(regs, &frame->uc.uc_mcontext, sizeof(struct pt_regs)))
+ goto badframe;
+ return regs->dn[0];
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = regs->an[7];
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (!sas_ss_flags(usp))
+ usp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void *)((usp - frame_size) & ~0x3);
+}
+
+/*
+ * signal_trampoline: Defined in ubicom32_syscall.S
+ */
+asmlinkage void signal_trampoline(void)__attribute__((naked));
+
+static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+
+ frame = (struct rt_sigframe *) get_sigframe(ka, regs, sizeof(*frame));
+
+ /*
+ * The 'err |=' have been may criticized as bad code style, but I
+ * strongly suspect that we want this code to be fast. So for
+ * now it stays as is.
+ */
+ err |= __put_user( ( (current_thread_info()->exec_domain)
+ && (current_thread_info()->exec_domain->signal_invmap)
+ && (sig < 32) )
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig, &frame->sig);
+ err |= __put_user(info, &frame->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->an[7]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __put_user(__NR_rt_sigreturn, &frame->syscall_number);
+ if ((regs->frame_type == UBICOM32_FRAME_TYPE_TRAP) ||
+ (regs->frame_type == UBICOM32_FRAME_TYPE_INTERRUPT)) {
+ err |= __put_user(1, &frame->restore_all_regs);
+ } else {
+ err |= __put_user(0, &frame->restore_all_regs);
+ }
+ err |= copy_to_user (&frame->uc.uc_mcontext.sc_regs, regs, sizeof(struct pt_regs));
+ err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /*
+ * Set up registers for signal handler NOTE: Do not modify dn[14], it
+ * contains the userspace tls pointer, so it important that it carries
+ * over to the signal handler.
+ */
+ regs->an[7] = (unsigned long)frame;
+ regs->pc = (unsigned long) signal_trampoline;
+ regs->an[5] = (unsigned long) signal_trampoline;
+ regs->dn[0] = sig;
+ regs->dn[1] = (unsigned long) frame->info;
+ regs->dn[2] = (unsigned int) &frame->uc;
+
+ /*
+ * If this is FDPIC then the signal handler is actually a function
+ * descriptor.
+ */
+ if (current->personality & FDPIC_FUNCPTRS) {
+ struct fdpic_func_descriptor __user *funcptr =
+ (struct fdpic_func_descriptor *) ka->sa.sa_handler;
+ err |= __get_user(regs->dn[3], &funcptr->text);
+ err |= __get_user(regs->an[0], &funcptr->GOT);
+ if (err)
+ goto give_sigsegv;
+
+ /*
+ * The funcdesc must be in a3 as this is required for the lazy
+ * resolver in ld.so, if the application is not FDPIC a3 is not
+ * used.
+ */
+ regs->an[3] = (unsigned long) funcptr;
+
+ } else {
+ regs->dn[3] = (unsigned long)ka->sa.sa_handler;
+ regs->an[0] = 0;
+ }
+
+ regs->frame_type = UBICOM32_FRAME_TYPE_SIGTRAMP;
+
+ return;
+
+give_sigsegv:
+ /* user space exception */
+ force_sigsegv(sig, current);
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->dn[0]) {
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->dn[0] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->dn[0] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ do_restart:
+ regs->dn[0] = regs->original_dn_0;
+ regs->pc -= 8;
+ regs->an[5] -= 8;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ /* are we from a system call? */
+ if (regs->frame_type == -1)
+ /* If so, check system call restarting.. */
+ handle_restart(regs, ka, 1);
+
+ /* set up the stack frame */
+ setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 1;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->frame_type == -1) {
+ /* Restart the system call - no handlers present */
+ handle_restart(regs, NULL, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * sys_sigreturn()
+ * Return handler for signal clean-up.
+ *
+ * NOTE: Ubicom32 does not use this syscall. Instead we rely
+ * on do_rt_sigreturn().
+ */
+asmlinkage long sys_sigreturn(void)
+{
+ return -ENOSYS;
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/smp.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/smp.c
new file mode 100644
index 000000000..4aa27eb44
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/smp.c
@@ -0,0 +1,806 @@
+/*
+ * arch/ubicom32/kernel/smp.c
+ * SMP implementation for Ubicom32 processors.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/profile.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/irq.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/current.h>
+#include <asm/tlbflush.h>
+#include <asm/timex.h>
+#include <asm/cpu.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+#include <asm/thread.h>
+#include <asm/sections.h>
+#include <asm/ip5000.h>
+
+/*
+ * Mask the debug printout for IPI because they are too verbose
+ * for regular debugging.
+ */
+
+// #define DEBUG_SMP 1
+#if !defined(DEBUG_SMP)
+#define smp_debug(lvl, ...)
+#else
+static unsigned int smp_debug_lvl = 50;
+#define smp_debug(lvl, printargs...) \
+ if (lvl >= smp_debug_lvl) { \
+ printk(printargs); \
+ }
+#endif
+
+#if !defined(DEBUG_SMP)
+#define DEBUG_ASSERT(cond)
+#else
+#define DEBUG_ASSERT(cond) \
+ if (!(cond)) { \
+ THREAD_STALL; \
+ }
+#endif
+
+/*
+ * List of IPI Commands (more than one can be set at a time).
+ */
+enum ipi_message_type {
+ IPI_NOP,
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_STOP,
+ IPI_CPU_TIMER,
+};
+
+/*
+ * We maintain a hardware thread oriented view of online threads
+ * and those involved or needing IPI.
+ */
+static volatile unsigned long smp_online_threads = 0;
+static volatile unsigned long smp_needs_ipi = 0;
+static volatile unsigned long smp_inside_ipi = 0;
+static unsigned long smp_irq_affinity[NR_IRQS];
+
+/*
+ * What do we need to track on a per cpu/thread basis?
+ */
+DEFINE_PER_CPU(struct cpuinfo_ubicom32, cpu_data);
+
+/*
+ * Each thread cpuinfo IPI information is guarded by a lock
+ * that is kept local to this file.
+ */
+DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
+
+/*
+ * The IPI(s) are based on a software IRQ through the LDSR.
+ */
+unsigned int smp_ipi_irq;
+
+/*
+ * Define a spinlock so that only one cpu is able to modify the
+ * smp_needs_ipi and to set/clear the IRQ at a time.
+ */
+DEFINE_SPINLOCK(smp_ipi_lock);
+
+/*
+ * smp_halt_processor()
+ * Halt this hardware thread.
+ */
+static void smp_halt_processor(void)
+{
+ int cpuid = thread_get_self();
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ local_irq_disable();
+ printk(KERN_EMERG "cpu[%d] has halted. It is not OK to turn off power \
+ until all cpu's are off.\n", cpuid);
+ for (;;) {
+ thread_suspend();
+ }
+}
+
+/*
+ * ipi_interrupt()
+ * Handle an Interprocessor Interrupt.
+ */
+static irqreturn_t ipi_interrupt(int irq, void *dev_id)
+{
+ int cpuid = smp_processor_id();
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
+ unsigned long ops;
+
+ /*
+ * Count this now; we may make a call that never returns.
+ */
+ p->ipi_count++;
+
+ /*
+ * We are about to process all ops. If another cpu has stated
+ * that we need an IPI, we will have already processed it. By
+ * clearing our smp_needs_ipi, and processing all ops,
+ * we reduce the number of IPI interrupts. However, this introduces
+ * the possibility that smp_needs_ipi will be clear and the soft irq
+ * will have gone off; so we need to make the get_affinity() path
+ * tolerant of spurious interrupts.
+ */
+ spin_lock(&smp_ipi_lock);
+ smp_needs_ipi &= ~(1 << p->tid);
+ spin_unlock(&smp_ipi_lock);
+
+ for (;;) {
+ /*
+ * Read the set of IPI commands we should handle.
+ */
+ spinlock_t *lock = &per_cpu(ipi_lock, cpuid);
+ spin_lock(lock);
+ ops = p->ipi_pending;
+ p->ipi_pending = 0;
+ spin_unlock(lock);
+
+ /*
+ * If we have no IPI commands to execute, break out.
+ */
+ if (!ops) {
+ break;
+ }
+
+ /*
+ * Execute the set of commands in the ops word, one command
+ * at a time in no particular order. Strip of each command
+ * as we execute it.
+ */
+ while (ops) {
+ unsigned long which = ffz(~ops);
+ ops &= ~(1 << which);
+
+ BUG_ON(!irqs_disabled());
+ switch (which) {
+ case IPI_NOP:
+ smp_debug(100, KERN_INFO "cpu[%d]: "
+ "IPI_NOP\n", cpuid);
+ break;
+
+ case IPI_RESCHEDULE:
+ /*
+ * Reschedule callback. Everything to be
+ * done is done by the interrupt return path.
+ */
+ smp_debug(200, KERN_INFO "cpu[%d]: "
+ "IPI_RESCHEDULE\n", cpuid);
+ break;
+
+ case IPI_CALL_FUNC:
+ smp_debug(100, KERN_INFO "cpu[%d]: "
+ "IPI_CALL_FUNC\n", cpuid);
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ smp_debug(100, KERN_INFO "cpu[%d]: "
+ "IPI_CALL_FUNC_SINGLE\n", cpuid);
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ smp_debug(100, KERN_INFO "cpu[%d]: "
+ "IPI_CPU_STOP\n", cpuid);
+ smp_halt_processor();
+ break;
+
+#if !defined(CONFIG_LOCAL_TIMERS)
+ case IPI_CPU_TIMER:
+ smp_debug(100, KERN_INFO "cpu[%d]: "
+ "IPI_CPU_TIMER\n", cpuid);
+#if defined(CONFIG_GENERIC_CLOCKEVENTS)
+ local_timer_interrupt();
+#else
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
+#endif
+#endif
+ break;
+
+ default:
+ printk(KERN_CRIT "cpu[%d]: "
+ "Unknown IPI: %lu\n", cpuid, which);
+
+ return IRQ_NONE;
+ }
+
+ /*
+ * Let in any pending interrupts
+ */
+ BUG_ON(!irqs_disabled());
+ local_irq_enable();
+ local_irq_disable();
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/*
+ * ipi_send()
+ * Send an Interprocessor Interrupt.
+ */
+static void ipi_send(int cpu, enum ipi_message_type op)
+{
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpu);
+ spinlock_t *lock = &per_cpu(ipi_lock, cpu);
+ unsigned long flags;
+
+ /*
+ * We protect the setting of the ipi_pending field and ensure
+ * that the ipi delivery mechanism and interrupt are atomically
+ * handled.
+ */
+ spin_lock_irqsave(lock, flags);
+ p->ipi_pending |= 1 << op;
+ spin_unlock_irqrestore(lock, flags);
+
+ spin_lock_irqsave(&smp_ipi_lock, flags);
+ smp_needs_ipi |= (1 << p->tid);
+ ubicom32_set_interrupt(smp_ipi_irq);
+ spin_unlock_irqrestore(&smp_ipi_lock, flags);
+ smp_debug(100, KERN_INFO "cpu[%d]: send: %d\n", cpu, op);
+}
+
+/*
+ * ipi_send_mask
+ * Send an IPI to each cpu in mask.
+ */
+static inline void ipi_send_mask(unsigned int op, const struct cpumask mask)
+{
+ int cpu;
+ for_each_cpu_mask(cpu, mask) {
+ ipi_send(cpu, op);
+ }
+}
+
+/*
+ * ipi_send_allbutself()
+ * Send an IPI to all threads but ourselves.
+ */
+static inline void ipi_send_allbutself(unsigned int op)
+{
+ int self = smp_processor_id();
+ struct cpumask result;
+ cpumask_copy(&result, &cpu_online_map);
+ cpu_clear(self, result);
+ ipi_send_mask(op, result);
+}
+
+/*
+ * smp_enable_vector()
+ */
+static void smp_enable_vector(unsigned int irq)
+{
+ ubicom32_clear_interrupt(smp_ipi_irq);
+ ldsr_enable_vector(irq);
+}
+
+/*
+ * smp_disable_vector()
+ * Disable the interrupt by clearing the appropriate bit in the
+ * LDSR Mask Register.
+ */
+static void smp_disable_vector(unsigned int irq)
+{
+ ldsr_disable_vector(irq);
+}
+
+/*
+ * smp_mask_vector()
+ */
+static void smp_mask_vector(unsigned int irq)
+{
+ ldsr_mask_vector(irq);
+}
+
+/*
+ * smp_unmask_vector()
+ */
+static void smp_unmask_vector(unsigned int irq)
+{
+ ldsr_unmask_vector(irq);
+}
+
+/*
+ * smp_end_vector()
+ * Called once an interrupt is completed (reset the LDSR mask).
+ */
+static void smp_end_vector(unsigned int irq)
+{
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, smp_processor_id());
+ spin_lock(&smp_ipi_lock);
+ smp_inside_ipi &= ~(1 << p->tid);
+ if (smp_inside_ipi) {
+ spin_unlock(&smp_ipi_lock);
+ return;
+ }
+ spin_unlock(&smp_ipi_lock);
+ ldsr_unmask_vector(irq);
+ smp_debug(100, KERN_INFO "cpu[%d]: unamesk vector\n", smp_processor_id());
+}
+
+/*
+ * Special hanlder functions for SMP.
+ */
+static struct irq_chip ubicom32_smp_chip = {
+ .name = "UbicoIPI",
+ .startup = NULL,
+ .shutdown = NULL,
+ .enable = smp_enable_vector,
+ .disable = smp_disable_vector,
+ .ack = NULL,
+ .mask = smp_mask_vector,
+ .unmask = smp_unmask_vector,
+ .end = smp_end_vector,
+};
+
+/*
+ * smp_reset_ipi()
+ * None of these cpu(s) got their IPI, turn it back on.
+ *
+ * Note: This is called by the LDSR which is not a full
+ * Linux cpu. Thus you must use the raw form of locks
+ * because lock debugging will not work on the partial
+ * cpu nature of the LDSR.
+ */
+void smp_reset_ipi(unsigned long mask)
+{
+ __raw_spin_lock(&smp_ipi_lock.raw_lock);
+ smp_needs_ipi |= mask;
+ smp_inside_ipi &= ~mask;
+ ubicom32_set_interrupt(smp_ipi_irq);
+ __raw_spin_unlock(&smp_ipi_lock.raw_lock);
+ smp_debug(100, KERN_INFO "smp: reset IPIs for: 0x%x\n", mask);
+}
+
+/*
+ * smp_get_affinity()
+ * Choose the thread affinity for this interrupt.
+ *
+ * Note: This is called by the LDSR which is not a full
+ * Linux cpu. Thus you must use the raw form of locks
+ * because lock debugging will not work on the partial
+ * cpu nature of the LDSR.
+ */
+unsigned long smp_get_affinity(unsigned int irq, int *all)
+{
+ unsigned long mask = 0;
+
+ /*
+ * Most IRQ(s) are delivered in a round robin fashion.
+ */
+ if (irq != smp_ipi_irq) {
+ unsigned long result = smp_irq_affinity[irq] & smp_online_threads;
+ DEBUG_ASSERT(result);
+ *all = 0;
+ return result;
+ }
+
+ /*
+ * This is an IPI request. Return all cpu(s) scheduled for an IPI.
+ * We also track those cpu(s) that are going to be "receiving" IPI this
+ * round. When all CPU(s) have called smp_end_vector(),
+ * we will unmask the IPI interrupt.
+ */
+ __raw_spin_lock(&smp_ipi_lock.raw_lock);
+ ubicom32_clear_interrupt(smp_ipi_irq);
+ if (smp_needs_ipi) {
+ mask = smp_needs_ipi;
+ smp_inside_ipi |= smp_needs_ipi;
+ smp_needs_ipi = 0;
+ }
+ __raw_spin_unlock(&smp_ipi_lock.raw_lock);
+ *all = 1;
+ return mask;
+}
+
+/*
+ * smp_set_affinity()
+ * Set the affinity for this irq but store the value in tid(s).
+ */
+void smp_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+ int cpuid;
+ unsigned long *paffinity = &smp_irq_affinity[irq];
+
+ /*
+ * If none specified, all cpus are allowed.
+ */
+ if (cpus_empty(*dest)) {
+ *paffinity = 0xffffffff;
+ return;
+ }
+
+ /*
+ * Make sure to clear the old value before setting up the
+ * list.
+ */
+ *paffinity = 0;
+ for_each_cpu_mask(cpuid, *dest) {
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
+ *paffinity |= (1 << p->tid);
+ }
+}
+
+/*
+ * smp_send_stop()
+ * Send a stop request to all CPU but this one.
+ */
+void smp_send_stop(void)
+{
+ ipi_send_allbutself(IPI_CPU_STOP);
+}
+
+/*
+ * smp_send_timer_all()
+ * Send all cpu(s) but this one, a request to update times.
+ */
+void smp_send_timer_all(void)
+{
+ ipi_send_allbutself(IPI_CPU_TIMER);
+}
+
+/*
+ * smp_timer_broadcast()
+ * Use an IPI to broadcast a timer message
+ */
+void smp_timer_broadcast(const struct cpumask *mask)
+{
+ ipi_send_mask(IPI_CPU_TIMER, *mask);
+}
+
+/*
+ * smp_send_reschedule()
+ * Send a reschedule request to the specified cpu.
+ */
+void smp_send_reschedule(int cpu)
+{
+ ipi_send(cpu, IPI_RESCHEDULE);
+}
+
+/*
+ * arch_send_call_function_ipi()
+ * Cause each cpu in the mask to call the generic function handler.
+ */
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ int cpu;
+ for_each_cpu_mask(cpu, *mask) {
+ ipi_send(cpu, IPI_CALL_FUNC);
+ }
+}
+
+/*
+ * arch_send_call_function_single_ipi()
+ * Cause the specified cpu to call the generic function handler.
+ */
+void arch_send_call_function_single_ipi(int cpu)
+{
+ ipi_send(cpu, IPI_CALL_FUNC_SINGLE);
+}
+
+/*
+ * setup_profiling_timer()
+ * Dummy function created to keep Oprofile happy in the SMP case.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+/*
+ * smp_mainline_start()
+ * Start a slave thread executing a mainline Linux context.
+ */
+static void __init smp_mainline_start(void *arg)
+{
+ int cpuid = smp_processor_id();
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
+
+ BUG_ON(p->tid != thread_get_self());
+
+ /*
+ * Well, support 2.4 linux scheme as well.
+ */
+ if (cpu_test_and_set(cpuid, cpu_online_map)) {
+ printk(KERN_CRIT "cpu[%d]: already initialized!\n", cpuid);
+ smp_halt_processor();
+ return;
+ }
+
+ /*
+ * Initialise the idle task for this CPU
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if (current->mm) {
+ printk(KERN_CRIT "cpu[%d]: idle task already has memory "
+ "management\n", cpuid);
+ smp_halt_processor();
+ return;
+ }
+
+ /*
+ * TODO: X86 does this prior to calling notify, try to understand why?
+ */
+ preempt_disable();
+
+#if defined(CONFIG_GENERIC_CLOCKEVENTS)
+ /*
+ * Setup a local timer event so that this cpu will get timer interrupts
+ */
+ if (local_timer_setup(cpuid) == -1) {
+ printk(KERN_CRIT "cpu[%d]: timer alloc failed\n", cpuid);
+ smp_halt_processor();
+ return;
+ }
+#endif
+
+ /*
+ * Notify those interested that we are up and alive. This must
+ * be done before interrupts are enabled. It must also be completed
+ * before the bootstrap cpu returns from __cpu_up() (see comment
+ * above cpu_set() of the cpu_online_map).
+ */
+ notify_cpu_starting(cpuid);
+
+ /*
+ * Indicate that this thread is now online and present. Setting
+ * cpu_online_map has the side effect of allowing the bootstrap
+ * cpu to continue along; so anything that MUST be done prior to the
+ * bootstrap cpu returning from __cpu_up() needs to go above here.
+ */
+ cpu_set(cpuid, cpu_online_map);
+ cpu_set(cpuid, cpu_present_map);
+
+ /*
+ * Maintain a thread mapping in addition to the cpu mapping.
+ */
+ smp_online_threads |= (1 << p->tid);
+
+ /*
+ * Enable interrupts for this thread.
+ */
+ local_irq_enable();
+
+ /*
+ * Enter the idle loop and wait for a timer to schedule some work.
+ */
+ printk(KERN_INFO "cpu[%d]: entering cpu_idle()\n", cpuid);
+ cpu_idle();
+
+ /* Not Reached */
+}
+
+/*
+ * smp_cpus_done()
+ * Called once the kernel_init() has brought up all cpu(s).
+ */
+void smp_cpus_done(unsigned int cpu_max)
+{
+ /* Do Nothing */
+}
+
+/*
+ * __cpu_up()
+ * Called to startup a sepcific cpu.
+ */
+int __cpuinit __cpu_up(unsigned int cpu)
+{
+ struct task_struct *idle;
+ unsigned int *stack;
+ long timeout;
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpu);
+
+ /*
+ * Create an idle task for this CPU.
+ */
+ idle = fork_idle(cpu);
+ if (IS_ERR(idle)) {
+ panic("cpu[%d]: fork failed\n", cpu);
+ return -ENOSYS;
+ }
+ task_thread_info(idle)->cpu = cpu;
+
+ /*
+ * Setup the sw_ksp[] to point to this new task.
+ */
+ sw_ksp[p->tid] = (unsigned int)idle->stack;
+ stack = (unsigned int *)(sw_ksp[p->tid] + PAGE_SIZE - 8);
+
+ /*
+ * Cause the specified thread to execute our smp_mainline_start
+ * function as a TYPE_NORMAL thread.
+ */
+ printk(KERN_INFO "cpu[%d]: launching mainline Linux thread\n", cpu);
+ if (thread_start(p->tid, smp_mainline_start, (void *)NULL, stack,
+ THREAD_TYPE_NORMAL) == -1) {
+ printk(KERN_WARNING "cpu[%d]: failed thread_start\n", cpu);
+ return -ENOSYS;
+ }
+
+ /*
+ * Wait for the thread to start up. The thread will set
+ * the online bit when it is running. Our caller execpts the
+ * cpu to be online if we return 0.
+ */
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_online(cpu)) {
+ break;
+ }
+
+ udelay(100);
+ barrier();
+ continue;
+ }
+
+ if (!cpu_online(cpu)) {
+ printk(KERN_CRIT "cpu[%d]: failed to live after %ld us\n",
+ cpu, timeout * 100);
+ return -ENOSYS;
+ }
+
+ printk(KERN_INFO "cpu[%d]: came alive after %ld us\n",
+ cpu, timeout * 100);
+ return 0;
+}
+
+/*
+ * Data used by setup_irq for the IPI.
+ */
+static struct irqaction ipi_irq = {
+ .name = "ipi",
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .handler = ipi_interrupt,
+};
+
+/*
+ * smp_prepare_cpus()
+ * Mark threads that are available to Linux as possible cpus(s).
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * We will need a software IRQ to send IPI(s). We will use
+ * a single software IRQ for all IPI(s).
+ */
+ if (irq_soft_alloc(&smp_ipi_irq) < 0) {
+ panic("no software IRQ is available\n");
+ return;
+ }
+
+ /*
+ * For the IPI interrupt, we want to use our own chip definition.
+ * This allows us to define what happens in SMP IPI without affecting
+ * the performance of the other interrupts.
+ *
+ * Next, Register the IPI interrupt function against the soft IRQ.
+ */
+ set_irq_chip(smp_ipi_irq, &ubicom32_smp_chip);
+ setup_irq(smp_ipi_irq, &ipi_irq);
+
+ /*
+ * We use the device tree node to determine how many
+ * free cpus we will have (up to NR_CPUS) and we indicate
+ * that those cpus are present.
+ *
+ * We need to do this very early in the SMP case
+ * because the Linux init code uses the cpu_present_map.
+ */
+ for_each_possible_cpu(i) {
+ thread_t tid;
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, i);
+
+ /*
+ * Skip the bootstrap cpu
+ */
+ if (i == 0) {
+ continue;
+ }
+
+ /*
+ * If we have a free thread left in the mask,
+ * indicate that the cpu is present.
+ */
+ tid = thread_alloc();
+ if (tid == (thread_t)-1) {
+ break;
+ }
+
+ /*
+ * Save the hardware thread id for this cpu.
+ */
+ p->tid = tid;
+ cpu_set(i, cpu_present_map);
+ printk(KERN_INFO "cpu[%d]: added to cpu_present_map - tid: %d\n", i, tid);
+ }
+}
+
+/*
+ * smp_prepare_boot_cpu()
+ * Copy the per_cpu data into the appropriate spot for the bootstrap cpu.
+ *
+ * The code in boot_cpu_init() has already set the boot cpu's
+ * state in the possible, present, and online maps.
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, 0);
+
+ smp_online_threads |= (1 << p->tid);
+ printk(KERN_INFO "cpu[%d]: bootstrap CPU online - tid: %ld\n",
+ current_thread_info()->cpu, p->tid);
+}
+
+/*
+ * smp_setup_processor_id()
+ * Set the current_thread_info() structure cpu value.
+ *
+ * We set the value to the true hardware thread value that we are running on.
+ * NOTE: this function overrides the weak alias function in main.c
+ */
+void __init smp_setup_processor_id(void)
+{
+ struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, 0);
+ int i;
+ for_each_cpu_mask(i, CPU_MASK_ALL)
+ set_cpu_possible(i, true);
+
+ current_thread_info()->cpu = 0;
+ p->tid = thread_get_self();
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/stacktrace.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/stacktrace.c
new file mode 100644
index 000000000..2a10e3f4f
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/stacktrace.c
@@ -0,0 +1,244 @@
+/*
+ * arch/ubicom32/kernel/stacktrace.c
+ * Ubicom32 architecture stack back trace implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/module.h>
+#include <asm/stacktrace.h>
+#include <asm/thread.h>
+#include <asm/ip5000.h>
+
+/*
+ * These symbols are filled in by the linker.
+ */
+extern unsigned long _stext;
+extern unsigned long _etext;
+
+extern unsigned long __ocm_text_run_begin;
+extern unsigned long __data_begin;
+
+/*
+ * stacktrace_iterate()
+ * Walk the stack looking for call and calli instructions on an aligned
+ * boundary.
+ *
+ * Trace must point to the top of the current stack frame.
+ */
+unsigned long stacktrace_iterate(unsigned long **trace,
+ unsigned long stext,
+ unsigned long etext,
+ unsigned long ocm_stext,
+ unsigned long ocm_etext,
+ unsigned long sstack,
+ unsigned long estack)
+{
+ unsigned int thread_trap_en, instruction;
+ unsigned long address;
+ unsigned int limit = 0;
+ unsigned long result = 0;
+ unsigned long *sp = *trace;
+
+ /*
+ * Exclude the current thread from being monitored for traps.
+ */
+ asm volatile(
+ " thread_get_self_mask d15 \n\t"
+ /* save current trap status */
+ " and.4 %0, MT_TRAP_EN, d15 \n\t"
+ " not.4 d15, d15 \n\t"
+ /* disable trap */
+ " and.4 MT_TRAP_EN, MT_TRAP_EN, d15 \n\t"
+ " pipe_flush 0 \n\t"
+ : "=r" (thread_trap_en)
+ :
+ : "d15", "cc"
+ );
+
+ while (limit++ < 256) {
+ /*
+ * See if we have a valid stack.
+ */
+ if (!between((unsigned long)sp, sstack, estack)) {
+#ifdef TRAP_DEBUG_STACK_TRACE
+ printk(KERN_EMERG "stack address is out of range - "
+ "sp: %x, sstack: %x, estack: %x\n",
+ (unsigned int)sp, (unsigned int)sstack,
+ (unsigned int)estack);
+#endif
+ result = 0;
+ *trace = 0;
+ break;
+ }
+
+ /*
+ * Get the value off the stack and back up 4 bytes to what
+ * should be the address of a call or calli.
+ */
+ address = (*sp++) - 4;
+
+ /*
+ * If the address is not within the text segment, skip this
+ * value.
+ */
+ if (!between(address, stext, etext) &&
+ !between(address, ocm_stext, ocm_etext)) {
+#ifdef TRAP_DEBUG_STACK_TRACE
+ printk(KERN_EMERG "not a text address - "
+ "address: %08x, stext: %08x, etext: %08x\n"
+ "ocm_stext: %08x, ocm_etext: %08x\n",
+ (unsigned int)address,
+ (unsigned int)stext,
+ (unsigned int)etext,
+ (unsigned int)ocm_stext,
+ (unsigned int)ocm_etext);
+#endif
+ continue;
+
+ }
+
+ /*
+ * If the address is not on an aligned boundary it can not be a
+ * return address.
+ */
+ if (address & 0x3) {
+ continue;
+ }
+
+ /*
+ * Read the probable instruction.
+ */
+ instruction = *(unsigned int *)address;
+
+ /*
+ * Is this a call instruction?
+ */
+ if ((instruction & 0xF8000000) == (u32_t)(0x1B << 27)) {
+#ifdef TRAP_DEBUG_STACK_TRACE
+ printk(KERN_EMERG "call inst. result: %x, "
+ "test: %x\n", (unsigned int)address,
+ (unsigned int)instruction);
+#endif
+ *trace = sp;
+ result = address;
+ break;
+ }
+
+ /*
+ * Is this a calli instruction?
+ */
+ if ((instruction & 0xF8000000) == (u32_t)(0x1E << 27)) {
+#ifdef TRAP_DEBUG_STACK_TRACE
+ printk(KERN_EMERG "calli inst. result: %x, "
+ "test: %x\n", (unsigned int)address,
+ (unsigned int)instruction);
+#endif
+ *trace = sp;
+ result = address;
+ break;
+ }
+ }
+
+ /*
+ * Restore the current thread to be monitored for traps.
+ */
+ if (thread_trap_en) {
+ asm volatile(
+ " thread_get_self_mask d15 \n\t"
+ " or.4 MT_TRAP_EN, MT_TRAP_EN, d15 \n\t"
+ :
+ :
+ : "d15", "cc"
+ );
+ }
+ return result;
+}
+
+#ifdef CONFIG_STACKTRACE
+/*
+ * stacktrace_save_entries()
+ * Save stack back trace information into the provided trace structure.
+ */
+void stacktrace_save_entries(struct task_struct *tsk,
+ struct stack_trace *trace,
+ unsigned long sp)
+{
+ unsigned long code_start = (unsigned long)&_stext;
+ unsigned long code_end = (unsigned long)&_etext;
+ unsigned long ocm_code_start = (unsigned long)&__ocm_text_run_begin;
+ unsigned long ocm_code_end = (unsigned long)&__data_begin;
+ unsigned long stack_end = (unsigned long)(tsk->stack + THREAD_SIZE - 8);
+ unsigned long stack = (unsigned long)sp;
+ unsigned int idx = 0;
+ unsigned long *handle;
+ int skip = trace->skip;
+
+ handle = (unsigned long *)stack;
+ while (idx < trace->max_entries) {
+ if (skip) {
+ skip--;
+ continue;
+ }
+ trace->entries[idx] = stacktrace_iterate(&handle,
+ code_start, code_end,
+ ocm_code_start, ocm_code_end,
+ (unsigned long)stack, stack_end);
+ if (trace->entries[idx] == 0) {
+ break;
+ }
+ idx++;
+ }
+}
+
+/*
+ * save_stack_trace()
+ * Save the specified amount of the kernel stack trace information
+ * for the current task.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long sp = 0;
+ asm volatile (
+ " move.4 %0, SP \n\t"
+ : "=r" (sp)
+ );
+ stacktrace_save_entries(current, trace, sp);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+/*
+ * save_stack_trace_tsk()
+ * Save the specified amount of the kernel stack trace information
+ * for the specified task.
+ *
+ * Note: We assume the specified task is not currently running.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ stacktrace_save_entries(tsk, trace, tsk->thread.sp);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+#endif /* CONFIG_STACKTRACE */
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/sys_ubicom32.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/sys_ubicom32.c
new file mode 100644
index 000000000..b06f3f396
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/sys_ubicom32.c
@@ -0,0 +1,237 @@
+/*
+ * arch/ubicom32/kernel/sys_ubicom32.c
+ * Ubicom32 architecture system call support implementation.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/ubicom32
+ * platform.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/ipc.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+
+#include <asm/setup.h>
+#include <asm/traps.h>
+#include <asm/cacheflush.h>
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file *file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
+ * handle more than 4 system call parameters, so these system calls
+ * used a memory block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ struct mmap_arg_struct a;
+ int error = -EFAULT;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ goto out;
+
+ error = -EINVAL;
+ if (a.offset & ~PAGE_MASK)
+ goto out;
+
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
+out:
+ return error;
+}
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc(uint call, int first, int second,
+ int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop(first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget(first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void **) ptr))
+ return -EFAULT;
+ return sys_semctl(first, second, third, fourth);
+ }
+ default:
+ return -EINVAL;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd(first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+ if (copy_from_user(&tmp,
+ (struct ipc_kludge *)ptr,
+ sizeof(tmp)))
+ return -EFAULT;
+ return sys_msgrcv(first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv(first,
+ (struct msgbuf *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl(first, second,
+ (struct msqid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = do_shmat(first, ptr, second, &raddr);
+ if (ret)
+ return ret;
+ return put_user(raddr, (ulong __user *) third);
+ }
+ }
+ case SHMDT:
+ return sys_shmdt(ptr);
+ case SHMGET:
+ return sys_shmget(first, second, third);
+ case SHMCTL:
+ return sys_shmctl(first, second, ptr);
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+/* sys_cacheflush -- flush (part of) the processor cache. */
+asmlinkage int
+sys_cacheflush(unsigned long addr, int scope, int cache, unsigned long len)
+{
+ flush_cache_all();
+ return 0;
+}
+
+asmlinkage int sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/syscalltable.S b/target/linux/ubicom32/files/arch/ubicom32/kernel/syscalltable.S
new file mode 100644
index 000000000..8921fb83d
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/syscalltable.S
@@ -0,0 +1,376 @@
+/*
+ * arch/ubicom32/kernel/syscalltable.S
+ * <TODO: Replace with short file description>
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+/*
+ *
+ * Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, Kenneth Albanowski <kjahds@kjahds.com>,
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+.text
+ALIGN
+ .global sys_call_table
+sys_call_table:
+ .long sys_ni_syscall /* 0 - old "setup()" system call*/
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long execve_intercept
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_chown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long old_select
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_ni_syscall /* _sys_swapon */
+ .long sys_reboot
+ .long sys_old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm for i386 */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall
+ .long sys_ni_syscall /* iopl for i386 */ /* 110 */
+ .long sys_vhangup
+ .long sys_ni_syscall /* obsolete idle() syscall */
+ .long sys_ni_syscall /* vm86old for i386 */
+ .long sys_wait4
+ .long sys_ni_syscall /* 115 */ /* _sys_swapoff */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long clone_intercept /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_cacheflush /* modify_ldt for i386 */
+ .long sys_adjtimex
+ .long sys_ni_syscall /* 125 */ /* _sys_mprotect */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "creat_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_ni_syscall /* _sys_msync */
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_ni_syscall /* 150 */ /* _sys_mlock */
+ .long sys_ni_syscall /* _sys_munlock */
+ .long sys_ni_syscall /* _sys_mlockall */
+ .long sys_ni_syscall /* _sys_munlockall */
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_ni_syscall /* _sys_mremap */
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_getpagesize /* _sys_getpagesize */
+ .long sys_ni_syscall /* old "query_module" */
+ .long sys_poll
+ .long sys_ni_syscall /* _sys_nfsservctl */
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_lchown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long vfork_intercept /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_chown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_lchown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+ .long sys_getdents64 /* 220 */
+ .long sys_gettid
+ .long sys_tkill
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr /* 225 */
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr /* 230 */
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_futex /* 235 */
+ .long sys_sendfile64
+ .long sys_ni_syscall /* _sys_mincore */
+ .long sys_ni_syscall /* _sys_madvise */
+ .long sys_fcntl64
+ .long sys_readahead /* 240 */
+ .long sys_io_setup
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel /* 245 */
+ .long sys_fadvise64
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 250 */
+ .long sys_epoll_wait
+ .long sys_ni_syscall /* _sys_remap_file_pages */
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 255 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 260 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 265 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy /* 270 */
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive
+ .long sys_mq_notify /* 275 */
+ .long sys_mq_getsetattr
+ .long sys_waitid
+ .long sys_ni_syscall /* for _sys_vserver */
+ .long sys_add_key
+ .long sys_request_key /* 280 */
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get
+ .long sys_inotify_init
+ .long sys_inotify_add_watch /* 285 */
+ .long sys_inotify_rm_watch
+ .long sys_migrate_pages
+ .long sys_openat
+ .long sys_mkdirat
+ .long sys_mknodat /* 290 */
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64
+ .long sys_unlinkat
+ .long sys_renameat /* 295 */
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat
+ .long sys_fchmodat
+ .long sys_faccessat /* 300 */
+ .long sys_ni_syscall /* Reserved for pselect6 */
+ .long sys_ni_syscall /* Reserved for ppoll */
+ .long sys_unshare
+ .long sys_set_robust_list
+ .long sys_get_robust_list /* 305 */
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee
+ .long sys_vmsplice
+ .long sys_move_pages /* 310 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_kexec_load
+ .long sys_getcpu
+ .long sys_epoll_pwait /* 315 */
+ .long sys_utimensat
+ .long sys_signalfd
+ .long sys_timerfd_create
+ .long sys_eventfd
+ .long sys_fallocate /* 320 */
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
+ .long sys_ni_syscall /* sys_signalfd4 */
+ .long sys_ni_syscall /* sys_eventfd2 */
+ .long sys_ni_syscall /* sys_epoll_create1 */
+ /* 325 */
+ .long sys_ni_syscall /* sys_dup3 */
+ .long sys_ni_syscall /* sys_pipe2 */
+ .long sys_ni_syscall /* sys_inotify_init1 */
+ .rept NR_syscalls-(.-sys_call_table)/4
+ .long sys_ni_syscall
+ .endr
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/thread.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/thread.c
new file mode 100644
index 000000000..aaa5fbea4
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/thread.c
@@ -0,0 +1,228 @@
+/*
+ * arch/ubicom32/kernel/thread.c
+ * Ubicom32 architecture hardware thread support.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/types.h>
+#include <asm/ip5000.h>
+#include <asm/machdep.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread.h>
+
+/*
+ * TODO: At some point change the name here to be thread_ksp
+ */
+unsigned int sw_ksp[THREAD_ARCHITECTURAL_MAX];
+
+static unsigned int thread_mask = -1;
+static unsigned int thread_mainline_mask;
+
+/*
+ * thread_entry()
+ * Returning from the called function will disable the thread.
+ *
+ * This could be a naked call to allow for hwthreads that do not have stacks.
+ * However, with -O0, the code still writes to thex stack, and this was
+ * corrupting memory just after the callers stack.
+ */
+static void thread_entry(void *arg, thread_exec_fn_t exec)
+{
+ /*
+ * Call thread function
+ */
+ exec(arg);
+
+ /*
+ * Complete => Disable self
+ */
+ thread_disable(thread_get_self());
+}
+
+/*
+ * thread_start()
+ * Start the specified function on the specified hardware thread.
+ */
+thread_t thread_start(thread_t thread,
+ thread_exec_fn_t exec,
+ void *arg,
+ unsigned int *sp_high,
+ thread_type_t type)
+{
+ /*
+ * Sanity check
+ */
+ unsigned int enabled, mask, csr;
+ asm volatile (
+ "move.4 %0, MT_EN\n\t"
+ : "=m" (enabled)
+ );
+
+ mask = 1 << thread;
+ if (enabled & mask) {
+ printk(KERN_WARNING "request to enable a previously enabled thread\n");
+ return (thread_t)-1;
+ }
+
+ /*
+ * Update thread state
+ */
+ csr = (thread << 15) | (1 << 14);
+ asm volatile (
+ "setcsr %0 \n\t"
+ "setcsr_flush 0 \n\t"
+
+ "move.4 A0, #0 \n\t"
+ "move.4 A1, #0 \n\t"
+ "move.4 A2, #0 \n\t"
+ "move.4 A3, #0 \n\t"
+ "move.4 A4, #0 \n\t"
+ "move.4 A5, #0 \n\t"
+ "move.4 A6, #0 \n\t"
+ "move.4 SP, %4 \n\t" /* A7 is SP */
+
+ "move.4 D0, %3 \n\t"
+ "move.4 D1, %2 \n\t"
+ "move.4 D2, #0 \n\t"
+ "move.4 D3, #0 \n\t"
+ "move.4 D4, #0 \n\t"
+ "move.4 D5, #0 \n\t"
+ "move.4 D6, #0 \n\t"
+ "move.4 D7, #0 \n\t"
+ "move.4 D8, #0 \n\t"
+ "move.4 D9, #0 \n\t"
+ "move.4 D10, #0 \n\t"
+ "move.4 D11, #0 \n\t"
+ "move.4 D12, #0 \n\t"
+ "move.4 D13, #0 \n\t"
+ "move.4 D14, #0 \n\t"
+ "move.4 D15, #0 \n\t"
+
+ "move.4 INT_MASK0, #0 \n\t"
+ "move.4 INT_MASK1, #0 \n\t"
+ "move.4 PC, %1 \n\t"
+ "setcsr #0 \n\t"
+ "setcsr_flush 0 \n\t"
+ :
+ : "r" (csr), "r" (thread_entry), "r" (exec),
+ "r" (arg), "r" (sp_high)
+ );
+
+ /*
+ * Apply HRT state
+ */
+ if (type & THREAD_TYPE_HRT) {
+ asm volatile (
+ "or.4 MT_HRT, MT_HRT, %0\n\t"
+ :
+ : "d" (mask)
+ : "cc"
+ );
+ } else {
+ asm volatile (
+ "and.4 MT_HRT, MT_HRT, %0\n\t"
+ :
+ : "d" (~mask)
+ : "cc"
+ );
+ }
+
+ /*
+ * Set priority
+ */
+ asm volatile (
+ "or.4 MT_HPRI, MT_HPRI, %0\n\t"
+ :
+ : "d" (mask)
+ : "cc"
+ );
+
+ /*
+ * Enable thread
+ */
+ asm volatile (
+ "move.4 MT_ACTIVE_SET, %0 \n\t"
+ :
+ : "d" (mask)
+ );
+ thread_enable_mask(mask);
+ return thread;
+}
+
+/*
+ * thread_get_mainline()
+ * Return a mask of those threads that are Linux mainline threads.
+ */
+unsigned int thread_get_mainline(void)
+{
+ return thread_mainline_mask;
+}
+
+/*
+ * thread_set_mainline()
+ * Indicate that the specified thread is a Linux mainline thread.
+ */
+void thread_set_mainline(thread_t tid)
+{
+ thread_mainline_mask |= (1 << tid);
+}
+
+/*
+ * thread_alloc()
+ * Allocate an unused hardware thread.
+ */
+thread_t thread_alloc(void)
+{
+ thread_t tid;
+
+ /*
+ * If this is the first time we are here get the list of unused
+ * threads from the processor device tree node.
+ */
+ if (thread_mask == -1) {
+ thread_mask = processor_threads();
+ }
+
+ if (!thread_mask) {
+ return (thread_t)-1;
+ }
+
+ tid = ffs(thread_mask);
+ if (tid != 0) {
+ tid--;
+ thread_mask &= ~(1 << tid);
+ return tid;
+ }
+
+ return (thread_t)-1;
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/time.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/time.c
new file mode 100644
index 000000000..4a99284bd
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/time.c
@@ -0,0 +1,212 @@
+/*
+ * arch/ubicom32/kernel/time.c
+ * Initialize the timer list and start the appropriate timers.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/ip5000.h>
+#include <asm/machdep.h>
+
+/*
+ * A bitmap of the timers on the processor indicates
+ * that the timer is free or in-use.
+ */
+static unsigned int timers;
+
+/*
+ * timer_set()
+ * Init the specified compare register to go off <n> cycles from now.
+ */
+void timer_set(int timervector, unsigned int cycles)
+{
+ int idx = UBICOM32_VECTOR_TO_TIMER_INDEX(timervector);
+ UBICOM32_IO_TIMER->syscom[idx] =
+ UBICOM32_IO_TIMER->sysval + cycles;
+ ldsr_enable_vector(timervector);
+}
+
+/*
+ * timer_reset()
+ * Set/reset the timer to go off again.
+ *
+ * Because sysval is a continuous timer, this function is able
+ * to ensure that we do not have clock sku by using the previous
+ * value in syscom to set the next value for syscom.
+ *
+ * Returns the number of ticks that transpired since the last event.
+ */
+int timer_reset(int timervector, unsigned int cycles)
+{
+ /*
+ * Reset the timer in the LDSR thread to go off appropriately.
+ *
+ * Use the previous value of the timer to calculate the new stop
+ * time. This allows us to account for it taking an
+ * indeterminate amount of time to get here.
+ */
+ const int timer_index = UBICOM32_VECTOR_TO_TIMER_INDEX(timervector);
+ unsigned int prev = UBICOM32_IO_TIMER->syscom[timer_index];
+ unsigned int next = prev + cycles;
+ int scratchpad3;
+ int diff;
+ int ticks = 1;
+
+ /*
+ * If the difference is negative, we have missed at least one
+ * timer tick.
+ *
+ * TODO: Decide if we want to "ignore" time (as done below) or
+ * if we want to process time (unevenly) by calling timer_tick()
+ * lost_ticks times.
+ */
+ while (1) {
+ /*
+ * Set our future time first.
+ */
+ UBICOM32_IO_TIMER->syscom[timer_index] = next;
+
+ /*
+ * Then check if we are really set time in the futrue.
+ */
+ diff = (int)next - (int)UBICOM32_IO_TIMER->sysval;
+ if (diff >= 0) {
+ break;
+ }
+
+ /*
+ * Oops, we are too slow. Playing catch up.
+ *
+ * If the debugger is connected the there is a good
+ * chance that we lost time because we were in a
+ * break-point, so in this case we do not print out
+ * diagnostics.
+ */
+ asm volatile ("move.4 %0, scratchpad3"
+ : "=r" (scratchpad3));
+ if ((scratchpad3 & 0x1) == 0) {
+ /*
+ * No debugger attached, print to the console
+ */
+ printk(KERN_EMERG "diff: %d, timer has lost %u "
+ "ticks [rounded up]\n",
+ -diff,
+ (unsigned int)((-diff + cycles - 1) / cycles));
+ }
+
+ do {
+ next += cycles;
+ diff = (int)next - (int)UBICOM32_IO_TIMER->sysval;
+ ticks++;
+ } while (diff < 0);
+ }
+ return ticks;
+}
+
+/*
+ * sched_clock()
+ * Returns current time in nano-second units.
+ *
+ * Notes:
+ * 1) This is an override for the weak alias in
+ * kernel/sched_clock.c.
+ * 2) Do not use xtime_lock as this function is
+ * sometimes called with xtime_lock held.
+ * 3) We use a retry algorithm to ensure that
+ * we get a consistent value.
+ * 4) sched_clock must be overwritten if IRQ tracing
+ * is enabled because the default implementation uses
+ * the xtime_lock sequence while holding xtime_lock.
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long my_jiffies;
+ unsigned long jiffies_top;
+ unsigned long jiffies_bottom;
+
+ do {
+ jiffies_top = jiffies_64 >> 32;
+ jiffies_bottom = jiffies_64 & 0xffffffff;
+ } while (unlikely(jiffies_top != (unsigned long)(jiffies_64 >> 32)));
+
+ my_jiffies = ((unsigned long long)jiffies_top << 32) | (jiffies_bottom);
+ return (my_jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ);
+}
+
+/*
+ * timer_free()
+ * Free a hardware timer.
+ */
+void timer_free(int interrupt)
+{
+ unsigned int bit = interrupt - TIMER_INT(0);
+
+ /*
+ * The timer had not been allocated.
+ */
+ BUG_ON(timers & (1 << bit));
+ timers |= (1 << bit);
+}
+
+/*
+ * timer_alloc()
+ * Allocate a hardware timer.
+ */
+int timer_alloc(void)
+{
+ unsigned int bit = find_first_bit((unsigned long *)&timers, 32);
+ if (!bit) {
+ printk(KERN_WARNING "no more free timers\n");
+ return -1;
+ }
+
+ timers &= ~(1 << bit);
+ return bit + TIMER_INT(0);
+}
+
+/*
+ * time_init()
+ * Time init function.
+ */
+void time_init(void)
+{
+ /*
+ * Find the processor node and determine what timers are
+ * available for us.
+ */
+ timers = processor_timers();
+ if (timers == 0) {
+ printk(KERN_WARNING "no timers are available for Linux\n");
+ return;
+ }
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ timer_device_init();
+#else
+ timer_tick_init();
+#endif
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_broadcast.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_broadcast.c
new file mode 100644
index 000000000..8f0cdc4d5
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_broadcast.c
@@ -0,0 +1,102 @@
+/*
+ * arch/ubicom32/kernel/timer_broadcast.c
+ * Implements a dummy clock event for each cpu.
+ *
+ * Copyright (C) 2008 Paul Mundt
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ * arch/arm
+ * arch/sh
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/jiffies.h>
+#include <linux/percpu.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+
+static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
+
+/*
+ * The broadcast trick only works when the timer will be used in a periodic mode.
+ * If the user has configured either NO_HZ or HIGH_RES_TIMERS they must have
+ * a per cpu timer.
+ */
+#if defined(CONFIG_NO_HZ) || defined(CONFIG_HIGH_RES_TIMERS)
+#error "Tickless and High Resolution Timers require per-CPU local timers: CONFIG_LOCAL_TIMERS"
+#endif
+
+/*
+ * local_timer_interrupt()
+ * Used on SMP for local timer interrupt sent via an IPI.
+ */
+void local_timer_interrupt(void)
+{
+ struct clock_event_device *dev = &__get_cpu_var(local_clockevent);
+
+ dev->event_handler(dev);
+}
+
+/*
+ * dummy_timer_set_next_event()
+ * Cause the timer to go off "cycles" from now.
+ */
+static int dummy_timer_set_next_event(unsigned long cycles, struct clock_event_device *dev)
+{
+ return 0;
+}
+
+/*
+ * dummy_timer_set_mode()
+ * Do Nothing.
+ */
+static void dummy_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+}
+
+/*
+ * local_timer_setup()
+ * Adds a clock event for the specified cpu.
+ */
+int __cpuinit local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *dev = &per_cpu(local_clockevent, cpu);
+
+ dev->name = "timer-dummy";
+ dev->features = CLOCK_EVT_FEAT_DUMMY;
+ dev->rating = 200;
+ dev->mult = 1;
+ dev->set_mode = dummy_timer_set_mode;
+ dev->set_next_event = dummy_timer_set_next_event;
+ dev->broadcast = smp_timer_broadcast;
+ dev->cpumask = cpumask_of_cpu(cpu);
+ dev->irq = -1;
+ printk(KERN_NOTICE "timer[%d]: %s - created\n", dev->irq, dev->name);
+
+ clockevents_register_device(dev);
+ return 0;
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_device.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_device.c
new file mode 100644
index 000000000..1943cbb9e
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_device.c
@@ -0,0 +1,301 @@
+/*
+ * arch/ubicom32/kernel/timer_device.c
+ * Implements a Ubicom32 clock device and event devices.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/types.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/spinlock.h>
+#include <asm/ip5000.h>
+#include <asm/machdep.h>
+
+#if defined(CONFIG_SMP)
+#include <asm/smp.h>
+#endif
+
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+#define MAX_TIMERS (2 + CONFIG_TIMER_EXTRA_ALLOC)
+#else
+#define MAX_TIMERS (NR_CPUS + CONFIG_TIMER_EXTRA_ALLOC)
+#endif
+
+#if (MAX_TIMERS > 10)
+#error "Ubicom32 only has 10 timers"
+#endif
+
+static unsigned int frequency;
+static struct clock_event_device timer_device_devs[MAX_TIMERS];
+static struct irqaction timer_device_irqs[MAX_TIMERS];
+static int timer_device_next_timer = 0;
+
+DEFINE_SPINLOCK(timer_device_lock);
+
+/*
+ * timer_device_set_next_event()
+ * Cause the timer to go off "cycles" from now.
+ */
+static int timer_device_set_next_event(unsigned long cycles, struct clock_event_device *dev)
+{
+ timer_set(dev->irq, cycles);
+ return 0;
+}
+
+/*
+ * timer_device_set_mode()
+ * Handle the mode switch for a clock event device.
+ */
+static void timer_device_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ /*
+ * Make sure the vector is disabled
+ * until the next event is set.
+ */
+ printk(KERN_NOTICE "timer[%d]: shutdown\n", dev->irq);
+ ldsr_disable_vector(dev->irq);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /*
+ * Make sure the vector is disabled
+ * until the next event is set.
+ */
+ printk(KERN_NOTICE "timer[%d]: oneshot\n", dev->irq);
+ ldsr_disable_vector(dev->irq);
+ break;
+
+ case CLOCK_EVT_MODE_PERIODIC:
+ /*
+ * The periodic request is 1 per jiffies
+ */
+ printk(KERN_NOTICE "timer[%d]: periodic: %d cycles\n",
+ dev->irq, frequency / CONFIG_HZ);
+ timer_set(dev->irq, frequency / CONFIG_HZ);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_RESUME:
+ printk(KERN_WARNING "timer[%d]: unimplemented mode: %d\n",
+ dev->irq, mode);
+ break;
+ };
+}
+
+/*
+ * timer_device_event()
+ * Call the device's event handler.
+ *
+ * The pointer is initialized by the generic Linux code
+ * to the function to be called.
+ */
+static irqreturn_t timer_device_event(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = (struct clock_event_device *)dev_id;
+
+ if (dev->mode == CLOCK_EVT_MODE_PERIODIC) {
+ /*
+ * The periodic request is 1 per jiffies
+ */
+ timer_reset(dev->irq, frequency / CONFIG_HZ);
+ } else {
+ /*
+ * The timer will go off again at the rollover
+ * point. We must disable the IRQ to prevent
+ * getting a spurious interrupt.
+ */
+ ldsr_disable_vector(dev->irq);
+ }
+
+ if (!dev->event_handler) {
+ printk(KERN_CRIT "no registered event handler\n");
+ return IRQ_HANDLED;
+ }
+
+ dev->event_handler(dev);
+ return IRQ_HANDLED;
+}
+
+/*
+ * timer_device_clockbase_read()
+ * Provide a primary clocksource around the sysval timer.
+ */
+static cycle_t timer_device_clockbase_read(void)
+{
+ return (cycle_t)UBICOM32_IO_TIMER->sysval;
+}
+
+/*
+ * Primary Clock Source Description
+ *
+ * We use 24 for the shift factor because we want
+ * to ensure there are less than 2^24 clocks
+ * in a jiffie of 10 ms.
+ */
+static struct clocksource timer_device_clockbase = {
+ .name = "sysval",
+ .rating = 400,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 24,
+ .mult = 0,
+ .read = timer_device_clockbase_read,
+};
+
+/*
+ * timer_device_alloc_event()
+ * Allocate a timer device event.
+ */
+static int timer_device_alloc_event(const char *name, int cpuid, const struct cpumask *cpumask)
+{
+ struct clock_event_device *dev;
+ struct irqaction *action;
+
+ /*
+ * Are we out of configured timers?
+ */
+ spin_lock(&timer_device_lock);
+ if (timer_device_next_timer >= MAX_TIMERS) {
+ spin_unlock(&timer_device_lock);
+ printk(KERN_WARNING "out of timer event entries\n");
+ return -1;
+ }
+ dev = &timer_device_devs[timer_device_next_timer];
+ action = &timer_device_irqs[timer_device_next_timer];
+ timer_device_next_timer++;
+ spin_unlock(&timer_device_lock);
+
+ /*
+ * Now allocate a timer to ourselves.
+ */
+ dev->irq = timer_alloc();
+ if (dev->irq == -1) {
+ spin_lock(&timer_device_lock);
+ timer_device_next_timer--;
+ spin_unlock(&timer_device_lock);
+ printk(KERN_WARNING "out of hardware timers\n");
+ return -1;
+ }
+
+ /*
+ * Init the IRQ action structure. Make sure
+ * this in place before you register the clock
+ * event device.
+ */
+ action->name = name;
+ action->flags = IRQF_DISABLED | IRQF_TIMER;
+ action->handler = timer_device_event;
+ //cpumask_copy(&action->mask, mask);
+ action->dev_id = dev;
+ setup_irq(dev->irq, action);
+ irq_set_affinity(dev->irq, cpumask);
+ ldsr_disable_vector(dev->irq);
+
+ /*
+ * init clock dev structure.
+ *
+ * The min_delta_ns is chosen to ensure that setting next
+ * event will never be requested with too small of value.
+ */
+ dev->name = name;
+ dev->rating = timer_device_clockbase.rating;
+ dev->shift = timer_device_clockbase.shift;
+ dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ dev->set_mode = timer_device_set_mode;
+ dev->set_next_event = timer_device_set_next_event;
+ dev->mult = div_sc(frequency, NSEC_PER_SEC, dev->shift);
+ dev->max_delta_ns = clockevent_delta2ns(0xffffffff, dev);
+ dev->min_delta_ns = clockevent_delta2ns(100, dev);
+ //dev->cpumask = mask;
+ printk(KERN_NOTICE "timer[%d]: %s - created\n", dev->irq, dev->name);
+
+ /*
+ * Now register the device.
+ */
+ clockevents_register_device(dev);
+ return dev->irq;
+}
+
+#if defined(CONFIG_LOCAL_TIMERS)
+/*
+ * local_timer_setup()
+ * Allocation function for creating a per cpu local timer.
+ */
+int __cpuinit local_timer_setup(unsigned int cpu)
+{
+ return timer_device_alloc_event("timer-cpu", cpu);
+}
+#endif
+
+/*
+ * timer_device_init()
+ * Create and init a generic clock driver for Ubicom32.
+ */
+void timer_device_init(void)
+{
+ int i;
+
+ /*
+ * Get the frequency from the processor device tree node or use
+ * the default if not available. We will store this as the frequency
+ * of the timer to avoid future calculations.
+ */
+ frequency = processor_frequency();
+ if (frequency == 0) {
+ frequency = CLOCK_TICK_RATE;
+ }
+
+ /*
+ * Setup the primary clock source around sysval. Linux does not
+ * supply a Mhz multiplier so convert down to khz.
+ */
+ timer_device_clockbase.mult =
+ clocksource_khz2mult(frequency / 1000,
+ timer_device_clockbase.shift);
+ if (clocksource_register(&timer_device_clockbase)) {
+ printk(KERN_ERR "timer: clocksource failed to register\n");
+ return;
+ }
+
+ /*
+ * Always allocate a primary timer.
+ */
+ timer_device_alloc_event("timer-primary", -1, cpu_all_mask);
+
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+ /*
+ * If BROADCAST is selected we need to add a broadcast timer.
+ */
+ timer_device_alloc_event("timer-broadcast", -1, cpu_all_mask);
+#endif
+
+ /*
+ * Allocate extra timers that are requested.
+ */
+ for (i = 0; i < CONFIG_TIMER_EXTRA_ALLOC; i++) {
+ timer_device_alloc_event("timer-extra", -1, cpu_all_mask);
+ }
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_tick.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_tick.c
new file mode 100644
index 000000000..7a2ad4949
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/timer_tick.c
@@ -0,0 +1,109 @@
+/*
+ * arch/ubicom32/kernel/timer_tick.c
+ * Impelemets a perodic timer.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/profile.h>
+
+#include <asm/ip5000.h>
+#include <asm/machdep.h>
+#if defined(CONFIG_SMP)
+#include <asm/smp.h>
+#endif
+
+static unsigned int timervector;
+static unsigned int frequency;
+
+/*
+ * timer_tick()
+ * Kernel system timer support. Needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick.
+ */
+static irqreturn_t timer_tick(int irq, void *dummy)
+{
+ int ticks;
+
+ BUG_ON(!irqs_disabled());
+ ticks = timer_reset(timervector, frequency);
+
+ write_seqlock(&xtime_lock);
+ do_timer(ticks);
+ write_sequnlock(&xtime_lock);
+
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
+
+#if defined(CONFIG_SMP)
+ smp_send_timer_all();
+#endif
+ return(IRQ_HANDLED);
+}
+
+/*
+ * Data used by setup_irq for the timer.
+ */
+static struct irqaction timer_irq = {
+ .name = "timer",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = timer_tick,
+};
+
+/*
+ * timer_tick_init()
+ * Implements a periodic timer
+ *
+ * This implementation directly calls the timer_tick() and move
+ * the Linux kernel forward. This is used when the user has not
+ * selected GENERIC_CLOCKEVENTS.
+ */
+void timer_tick_init(void)
+{
+ /*
+ * Now allocate a timer to ourselves.
+ */
+ timervector = timer_alloc();
+ if (timervector == -1) {
+ printk(KERN_WARNING "where did the timer go?\n");
+ return;
+ }
+
+ setup_irq(timervector, &timer_irq);
+
+ /*
+ * Get the frequency from the processor device tree node or use
+ * the default if not available. We will store this as the frequency
+ * of the timer to avoid future calculations.
+ */
+ frequency = processor_frequency();
+ if (frequency == 0) {
+ frequency = CLOCK_TICK_RATE;
+ }
+ frequency /= CONFIG_HZ;
+
+ printk(KERN_NOTICE "timer will interrupt every: %d cycles\n", frequency);
+ timer_set(timervector, frequency);
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/topology.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/topology.c
new file mode 100644
index 000000000..0676a1658
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/topology.c
@@ -0,0 +1,47 @@
+/*
+ * arch/ubicom32/kernel/topology.c
+ * Ubicom32 architecture sysfs topology information.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cache.h>
+
+static struct cpu cpu_devices[NR_CPUS] __read_mostly;
+
+static int __init topology_init(void)
+{
+ int num;
+
+ for_each_present_cpu(num) {
+ cpu_devices[num].hotpluggable = 0;
+ register_cpu(&cpu_devices[num], num);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/traps.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/traps.c
new file mode 100644
index 000000000..8cb22e25e
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/traps.c
@@ -0,0 +1,514 @@
+/*
+ * arch/ubicom32/kernel/traps.c
+ * Ubicom32 architecture trap handling support.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/a.out.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+#include <linux/compiler.h>
+#include <linux/stacktrace.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/stacktrace.h>
+#include <asm/devtree.h>
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/machdep.h>
+#include <asm/siginfo.h>
+#include <asm/ip5000.h>
+#include <asm/thread.h>
+
+#define TRAP_MAX_STACK_DEPTH 20
+
+/*
+ * These symbols are filled in by the linker.
+ */
+extern unsigned long _stext;
+extern unsigned long _etext;
+
+extern unsigned long __ocm_text_run_begin;
+extern unsigned long __data_begin;
+
+extern void show_vmas(struct task_struct *task);
+
+const char *trap_cause_strings[] = {
+ /*0*/ "inst address decode error",
+ /*1*/ "inst sync error",
+ /*2*/ "inst illegal",
+ /*3*/ "src1 address decode error",
+ /*4*/ "dst address decode error",
+ /*5*/ "src1 alignment error",
+ /*6*/ "dst alignment error",
+ /*7*/ "src1 sync error",
+ /*8*/ "dst sync error",
+ /*9*/ "DCAPT error",
+ /*10*/ "inst range error",
+ /*11*/ "src1 range error",
+ /*12*/ "dst range error",
+};
+
+/*
+ * The device tree trap node definition.
+ */
+struct trapnode {
+ struct devtree_node dn;
+ unsigned int intthread;
+};
+
+static struct trapnode *tn;;
+
+/*
+ * trap_interrupt_handler()
+ * Software Interrupt to ensure that a trap is serviced.
+ */
+static irqreturn_t trap_interrupt_handler(int irq, void *dummy)
+{
+ /* Do Nothing */
+ return IRQ_HANDLED;
+}
+
+/*
+ * Data used by setup_irq for the timer.
+ */
+static struct irqaction trap_irq = {
+ .name = "trap",
+ .flags = IRQF_DISABLED,
+ .handler = trap_interrupt_handler,
+};
+
+/*
+ * trap_cause_to_str()
+ * Convert a trap_cause into a series of printk
+ */
+static void trap_cause_to_str(long status)
+{
+ int bit;
+
+ if ((status & ((1 << TRAP_CAUSE_TOTAL) - 1)) == 0) {
+ printk(KERN_NOTICE "decode: UNKNOWN CAUSES\n");
+ return;
+ }
+
+ for (bit = 0; bit < TRAP_CAUSE_TOTAL; bit++) {
+ if (status & (1 << bit)) {
+ printk(KERN_NOTICE "\tdecode: %08x %s\n",
+ 1 << bit, trap_cause_strings[bit]);
+ }
+ }
+}
+
+/*
+ * trap_print_information()
+ * Print the cause of the trap and additional info.
+ */
+static void trap_print_information(const char *str, struct pt_regs *regs)
+{
+ printk(KERN_WARNING "\n");
+
+ if (current) {
+ printk(KERN_WARNING "Process %s (pid: %d)\n",
+ current->comm, current->pid);
+ }
+
+ if (current && current->mm) {
+ printk(KERN_NOTICE "text = 0x%p-0x%p data = 0x%p-0x%p\n"
+ KERN_NOTICE "bss = 0x%p-0x%p user-stack = 0x%p\n"
+ KERN_NOTICE "\n",
+ (void *)current->mm->start_code,
+ (void *)current->mm->end_code,
+ (void *)current->mm->start_data,
+ (void *)current->mm->end_data,
+ (void *)current->mm->end_data,
+ (void *)current->mm->brk,
+ (void *)current->mm->start_stack);
+ }
+
+ printk(KERN_WARNING "%s: Causes: 0x%08x\n", str,
+ (unsigned int)regs->trap_cause);
+ trap_cause_to_str(regs->trap_cause);
+ show_regs(regs);
+ show_stack(NULL, (unsigned long *)regs->an[7]);
+ printk(KERN_NOTICE "--- End Trap --- \n");
+}
+
+/*
+ * dump_stack()
+ * Dump the stack of the current task.
+ */
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+/*
+ * show_stack()
+ * Print out information from the current stack.
+ */
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ /*
+ * Allocate just enough entries on the stack.
+ */
+ unsigned int calls[TRAP_MAX_STACK_DEPTH];
+ unsigned long code_start;
+ unsigned long code_end;
+ unsigned long ocm_code_start = (unsigned long)&__ocm_text_run_begin;
+ unsigned long ocm_code_end = (unsigned long)&__data_begin;
+ unsigned long stack_end = (unsigned long)(current->stack + THREAD_SIZE - 8);
+ unsigned long stack = (unsigned long)sp;
+ int kernel_stack = 1;
+
+ processor_dram(&code_start, &code_end);
+
+ /*
+ * Which task are we talking about.
+ */
+ if (!task) {
+ task = current;
+ }
+
+ /*
+ * Find the stack for the task if one was not specified. Otherwise
+ * use the specified stack.
+ */
+ if (!stack) {
+ if (task != current) {
+ stack = task->thread.sp;
+ stack_end = (unsigned long)task->stack + THREAD_SIZE - 8;
+ } else {
+ asm volatile (
+ "move.4 %0, SP \n\t"
+ : "=r" (stack)
+ );
+ }
+ }
+
+ printk(KERN_NOTICE "Starting backtrace: PID %d '%s'\n",
+ task->pid, task->comm);
+
+ /*
+ * We do 2 passes the first pass is Kernel stack is the second
+ * User stack.
+ */
+ while (kernel_stack) {
+ unsigned long *handle;
+ unsigned int i, idx = 0;
+ struct pt_regs *pt = task_pt_regs(task);
+
+ /*
+ * If the task is in user mode, reset the start
+ * and end values for text.
+ */
+ if (__user_mode(stack)) {
+ if (!(task->personality & FDPIC_FUNCPTRS)) {
+ printk(KERN_NOTICE " User Stack:\n");
+ code_start = task->mm->start_code;
+ code_end = task->mm->end_code;
+ } else {
+ printk(KERN_NOTICE " User Stack (fdpic):\n");
+ show_vmas(task);
+ }
+ stack_end = task->mm->start_stack;
+ ocm_code_end = ocm_code_start = 0;
+ kernel_stack = 0;
+ } else {
+ printk(KERN_NOTICE " Kernel Stack:\n");
+ }
+
+ /*
+ * Collect the stack back trace information.
+ */
+ printk(" code[0x%lx-0x%lx]", code_start, code_end);
+ if (ocm_code_start) {
+ printk(" ocm_code[0x%lx-0x%lx]",
+ ocm_code_start, ocm_code_end);
+ }
+ printk("\n stack[0x%lx-0x%lx]\n", stack, stack_end);
+
+ handle = (unsigned long*)stack;
+ while (idx < TRAP_MAX_STACK_DEPTH) {
+ calls[idx] = stacktrace_iterate(&handle,
+ code_start, code_end,
+ ocm_code_start, ocm_code_end,
+ (unsigned long)stack, stack_end);
+ if (calls[idx] == 0) {
+ break;
+ }
+ idx++;
+ }
+
+ /*
+ * Now print out the data.
+ */
+ printk(KERN_NOTICE " CALL && CALLI on stack:");
+ for (i = 0; i < idx; i++) {
+ printk("%s0x%x, ", (i & 0x3) == 0 ? "\n " : "",
+ calls[i]);
+ }
+ printk(idx == TRAP_MAX_STACK_DEPTH ? "...\n" : "\n");
+
+ /*
+ * If we are doing user stack we are done
+ */
+ if (!kernel_stack) {
+ break;
+ }
+
+ /*
+ * Does this kernel stack have a mm (i.e. is it user)
+ */
+ if (!task->mm) {
+ printk("No mm for userspace stack.\n");
+ break;
+ }
+ /*
+ * Get the user-mode stack (if any)
+ */
+ stack = pt->an[7];
+ printk(KERN_NOTICE "Userspace stack at 0x%lx frame type %d\n",
+ stack, (int)pt->frame_type);
+ if (!__user_mode(stack)) {
+ break;
+ }
+ }
+}
+
+/*
+ * die_if_kernel()
+ * Determine if we are in kernel mode and if so print stuff out and die.
+ */
+void die_if_kernel(char *str, struct pt_regs *regs, long trap_cause)
+{
+ unsigned int s3value;
+
+ if (user_mode(regs)) {
+ return;
+ }
+
+ console_verbose();
+ trap_print_information(str, regs);
+
+ /*
+ * If the debugger is attached via the hardware mailbox protocol,
+ * go into an infinite loop and the debugger will figure things out.
+ */
+ asm volatile (
+ "move.4 %0, scratchpad3"
+ : "=r" (s3value)
+ );
+ if (s3value) {
+ asm volatile("1: jmpt.t 1b");
+ }
+
+ /*
+ * Set the debug taint value.
+ */
+ add_taint(TAINT_DIE);
+ do_exit(SIGSEGV);
+}
+
+/*
+ * trap_handler()
+ * Handle traps.
+ *
+ * Traps are treated as interrupts and registered with the LDSR. When
+ * the LDSR takes the interrupt, it will determine if a trap has occurred
+ * and service the trap prior to servicing the interrupt.
+ *
+ * This function is directly called by the LDSR.
+ */
+void trap_handler(int irq, struct pt_regs *regs)
+{
+ int sig = SIGSEGV;
+ siginfo_t info;
+ unsigned int trap_cause = regs->trap_cause;
+
+ BUG_ON(!irqs_disabled());
+
+ /*
+ * test if in kernel and die.
+ */
+ die_if_kernel("Kernel Trap", regs, trap_cause);
+
+ /*
+ * User process problem, setup a signal for this process
+ */
+ if ((trap_cause & (1 << TRAP_CAUSE_DST_RANGE_ERR)) ||
+ (trap_cause & (1 << TRAP_CAUSE_SRC1_RANGE_ERR)) ||
+ (trap_cause & (1 << TRAP_CAUSE_I_RANGE_ERR))) {
+ sig = SIGSEGV;
+ info.si_code = SEGV_MAPERR;
+ } else if ((trap_cause & (1 << TRAP_CAUSE_DST_MISALIGNED)) ||
+ (trap_cause & (1 << TRAP_CAUSE_SRC1_MISALIGNED))) {
+ sig = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ } else if ((trap_cause & (1 << TRAP_CAUSE_DST_DECODE_ERR)) ||
+ (trap_cause & (1 << TRAP_CAUSE_SRC1_DECODE_ERR))) {
+ sig = SIGILL;
+ info.si_code = ILL_ILLOPN;
+ } else if ((trap_cause & (1 << TRAP_CAUSE_ILLEGAL_INST))) {
+ /*
+ * Check for software break point and if found signal trap
+ * not illegal instruction.
+ */
+ unsigned long instruction;
+ if (between(regs->pc, KERNELSTART, memory_end) &&
+ (regs->pc & 3) == 0 &&
+ get_user(instruction, (unsigned long *)regs->pc) == 0) {
+
+ /*
+ * This used to be 0xaabbccdd but it turns out
+ * that is now valid in ubicom32v4 isa so we
+ * have switched to 0xfabbccdd
+ */
+ if ((instruction == 0xfabbccdd) ||
+ (instruction == 0xaabbccdd)) {
+ sig = SIGTRAP;
+ info.si_code = TRAP_BRKPT;
+ goto send_signal;
+ }
+ }
+ sig = SIGILL;
+ info.si_code = ILL_ILLOPC;
+ } else if ((trap_cause & (1 << TRAP_CAUSE_I_DECODE_ERR))) {
+ sig = SIGILL;
+ info.si_code = ILL_ILLOPC;
+ } else if ((trap_cause & (1 << TRAP_CAUSE_DCAPT))) {
+ sig = SIGTRAP;
+ info.si_code = TRAP_TRACE;
+ }
+
+ /*
+ * Print a trap information block to the console, do not
+ * print this above the case because we don't want it
+ * printed for software break points.
+ */
+ trap_print_information("User Trap", regs);
+
+send_signal:
+
+ force_sig_info(sig, &info, current);
+
+ /*
+ * Interrupts are disabled, re-enable them now.
+ */
+ if (!irqs_disabled()) {
+ printk(KERN_EMERG "interrupts enabled on exit, irq=%d, regs=%p",
+ irq, regs);
+ BUG();
+ }
+}
+
+/*
+ * trap_init_interrupt()
+ * We need a 2nd trap handling init that will occur after init_IRQ().
+ */
+void __init trap_init_interrupt(void)
+{
+ int err;
+ unsigned char tirq;
+ struct devtree_node *dn = (struct devtree_node *)tn;
+
+ /*
+ * Now setup the Software IRQ so that if a trap occurs the LDSR
+ * is started. The irq is there just to "force" the LDSR to run.
+ */
+ if (!tn) {
+ printk(KERN_WARNING "trap_init_interrupt skipped.\n");
+ return;
+ }
+
+ err = devtree_irq(dn, NULL, &tirq);
+ if (err) {
+ printk(KERN_WARNING "error obtaining trap irq value: %d\n",
+ err);
+ return;
+ }
+
+ if (tirq == DEVTREE_IRQ_NONE) {
+ printk(KERN_WARNING "trap irq not available: %d\n", tirq);
+ return;
+ }
+
+ err = setup_irq(tirq, &trap_irq);
+ if (err) {
+ printk(KERN_WARNING "trap irq setup failed: %d\n", err);
+ return;
+ }
+
+ /*
+ * Let ultra know which thread is handling the traps and
+ * what the interrupt to use is.
+ */
+ tn->intthread = ldsr_get_threadid();
+
+ /*
+ * Tell the LDSR about our IRQ so that it will unsuspend
+ * if one occurs while waiting for the per thread lock.
+ */
+ ldsr_set_trap_irq(tirq);
+}
+
+/*
+ * trap_init()
+ * init trap handling
+ *
+ * Trap handling is done through the ldsr. Every time an interrupt
+ * occurs, the LDSR looks for threads that are listed in the TRAP
+ * register and forces a call to the trap handler.
+ */
+void __init trap_init(void)
+{
+ /*
+ * If we do not have a trap node in the device tree, we leave the fault
+ * handling to the underlying hardware.
+ */
+ tn = (struct trapnode *)devtree_find_node("traps");
+ if (!tn) {
+ printk(KERN_WARNING "traps are not handled by linux\n");
+ return;
+ }
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/uaccess.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/uaccess.c
new file mode 100644
index 000000000..2fe5f5f87
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/uaccess.c
@@ -0,0 +1,109 @@
+/*
+ * arch/ubicom32/include/asm/uaccess.c
+ * User space memory access functions for Ubicom32 architecture.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/module.h>
+
+#include <asm/segment.h>
+#include <asm/uaccess.h>
+
+extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
+
+/*
+ * __access_ok()
+ * Check that the address is in the current processes.
+ *
+ * NOTE: The kernel uses "pretend" user addresses that wind
+ * up calling access_ok() so this approach has only marginal
+ * value because you wind up with lots of false positives.
+ */
+int __access_ok(unsigned long addr, unsigned long size)
+{
+ // struct vm_area_struct *vma;
+
+ /*
+ * Don't do anything if we are not a running system yet.
+ */
+ if (system_state != SYSTEM_RUNNING) {
+ return 1;
+ }
+
+ /*
+ * It appears that Linux will call this function even when we are not
+ * in the context of a user space application that has a VM address
+ * space. So we must check that current and mm are valid before
+ * performing the check.
+ */
+ if ((!current) || (!current->mm)) {
+ return 1;
+ }
+
+ /*
+ * We perform some basic checks on the address to ensure that it
+ * is at least within the range of DRAM.
+ */
+ if ((addr < (int)&_etext) || (addr > memory_end)) {
+ printk(KERN_WARNING "pid=%d[%s]: range [%lx - %lx] not in memory area: [%lx - %lx]\n",
+ current->pid, current->comm,
+ addr, addr + size,
+ memory_start, memory_end);
+ return 0;
+ }
+
+ /*
+ * For nommu Linux we can check this by looking at the allowed
+ * memory map for the process.
+ *
+ * TODO: Since the kernel passes addresses in it's own space as though
+ * they were user address, we can not validate the addresses this way.
+ */
+#if 0
+ if (!down_read_trylock(&current->mm->mmap_sem)) {
+ return 1;
+ }
+ vma = find_vma(current->mm, addr);
+ if (!vma) {
+ up_read(&current->mm->mmap_sem);
+ printk(KERN_WARNING "pid=%d[%s]: possible invalid acesss on range: [%lx - %lx]\n",
+ current->pid, current->comm, addr, addr + size);
+ return 1;
+ }
+ if ((addr + size) > vma->vm_end) {
+ up_read(&current->mm->mmap_sem);
+ printk(KERN_WARNING "pid=%d[%s]: possible invalid length on range: [%lx - %lx]\n",
+ current->pid, current->comm, addr, addr + size);
+ return 1;
+ }
+ up_read(&current->mm->mmap_sem);
+#endif
+ return 1;
+}
+
+EXPORT_SYMBOL(__access_ok);
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_context_switch.S b/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_context_switch.S
new file mode 100644
index 000000000..08db4c057
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_context_switch.S
@@ -0,0 +1,359 @@
+/*
+ * arch/ubicom32/kernel/ubicom32_context_switch.S
+ * Implements context switch and return functions.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/ubicom32-common.h>
+#include <asm/ip5000.h>
+#include <asm/range-protect.h>
+
+/*
+ * begin_restore_context()
+ * Restore most of the context from sp (struct pt_reg *)
+ *
+ * This *can* be called without the global atomic lock. (because sp is
+ * not restored!) Only d15 and a3 are allowed to be used after this
+ * before calling complete_restore_context
+ */
+.macro begin_restore_context
+ move.4 d0, PT_D0(sp)
+ move.4 d1, PT_D1(sp)
+ move.4 d2, PT_D2(sp)
+ move.4 d3, PT_D3(sp)
+ move.4 d4, PT_D4(sp)
+ move.4 d5, PT_D5(sp)
+ move.4 d6, PT_D6(sp)
+ move.4 d7, PT_D7(sp)
+ move.4 d8, PT_D8(sp)
+ move.4 d9, PT_D9(sp)
+ move.4 d10, PT_D10(sp)
+ move.4 d11, PT_D11(sp)
+ move.4 d12, PT_D12(sp)
+ move.4 d13, PT_D13(sp)
+ move.4 d14, PT_D14(sp)
+;; move.4 d15, PT_D15(sp)
+ move.4 a0, PT_A0(sp)
+ move.4 a1, PT_A1(sp)
+ move.4 a2, PT_A2(sp)
+;; move.4 a3, PT_A3(sp)
+ move.4 a4, PT_A4(sp)
+ move.4 a5, PT_A5(sp)
+ move.4 a6, PT_A6(sp)
+ move.4 acc0_hi, PT_ACC0HI(sp)
+ move.4 acc0_lo, PT_ACC0LO(sp)
+ move.4 mac_rc16, PT_MAC_RC16(sp)
+ move.4 acc1_hi, PT_ACC1HI(sp)
+ move.4 acc1_lo, PT_ACC1LO(sp)
+ move.4 source3, PT_SOURCE3(sp)
+ move.4 int_mask0, PT_INT_MASK0(sp)
+ move.4 int_mask1, PT_INT_MASK1(sp)
+.endm
+
+/*
+ * complete_restore_context()
+ * Completely restore the context from sp (struct pt_reg *)
+ *
+ * Note: Recovered PC and CSR are saved on the stack and are to be
+ * popped off before returning.
+ */
+.macro complete_restore_context
+ move.4 a3, sp
+ move.4 d15, PT_D15(sp)
+ move.4 sp, PT_SP(a3) ; Recover Stack pointer from save area
+ move.4 -4(sp)++, PT_PC(a3) ; Recover saved PC and save to stack
+ move.4 -4(sp)++, PT_CSR(a3) ; Recover saved csr and save to stack
+ move.4 a3, PT_A3(a3)
+.endm
+
+/*
+ * old restore_context macro
+ */
+.macro restore_context
+ begin_restore_context
+ complete_restore_context
+.endm
+
+/*
+ * ldsr_thread_enable_interrupts()
+ * An assembly version of the enable interrupts function.
+ *
+ * The stack is fair game but all registers MUST be preserved.
+ *
+ */
+.macro ldsr_thread_enable_interrupts
+ move.4 -4(sp)++, d3 ; Push d3
+ move.4 -4(sp)++, a3 ; Push a3
+
+ /*
+ * Read the ROSR and obtain ~(1 << tid)
+ */
+ lsr.4 d3, rosr, #0x2 ; Move the thread portion of ROSR into d3
+ lsl.4 d3, #1, d3 ; perform a (1 << tid)
+ not.4 d3, d3 ; Negate the value of d3 == ~(1 << threadid)
+
+ /*
+ * Get the value of the ldsr_soft_irq_mask
+ */
+ moveai a3, #%hi(ldsr_soft_irq_mask)
+ move.4 a3, %lo(ldsr_soft_irq_mask)(a3)
+
+ /*
+ * Now re-enable interrupts for this thread and then
+ * wakeup the LDSR.
+ */
+ and.4 scratchpad1, scratchpad1, d3
+ move.4 int_set0, a3
+
+ /*
+ * Restore the registers.
+ */
+ move.4 a3, (sp)4++
+ move.4 d3, (sp)4++
+.endm
+
+/*
+ * ret_from_interrupt_to_kernel()
+ * RFI function that is where do_IRQ() returns to if the thread was
+ * in kernel space.
+ */
+ .section .text.ret_from_interrupt_to_kernel, "ax", @progbits
+ .global ret_from_interrupt_to_kernel
+ret_from_interrupt_to_kernel:
+ begin_restore_context ; Restore the thread context
+ atomic_lock_acquire ; Enter critical section
+ complete_restore_context ; Restore the thread context
+ atomic_lock_release ; Leave critical section
+ ldsr_thread_enable_interrupts ; enable the threads interrupts
+ move.4 csr, (sp)4++ ; Restore csr from the stack
+ ret (sp)4++
+
+/*
+ * ret_from_interrupt_to_user()
+ * RFI function that is where do_IRQ() returns to if the thread was
+ * in user space.
+ *
+ * TODO: Do we really need the critical section handling in this code?
+ *
+ */
+ .section .text.ret_from_interrupt_to_user, "ax", @progbits
+ .global ret_from_interrupt_to_user
+ret_from_interrupt_to_user:
+ ldsr_thread_enable_interrupts ; enable the threads interrupts
+ /*
+ * Set a1 to the thread info pointer, no need to save it as we are
+ * restoring userspace and will never return
+ */
+ movei d0, #(~(ASM_THREAD_SIZE-1))
+ and.4 a1, sp, d0
+
+ /*
+ * Test if the scheduler needs to be called.
+ */
+ btst TI_FLAGS(a1), #ASM_TIF_NEED_RESCHED
+ jmpeq.t 2f
+ call a5, schedule ; Call the scheduler. I will come back here.
+
+ /*
+ * See if we have pending signals and call do_signal
+ * if needed.
+ */
+2:
+ btst TI_FLAGS(a1), #ASM_TIF_SIGPENDING ; Any signals needed?
+ jmpeq.t 1f
+
+ /*
+ * Now call do_signal()
+ */
+ move.4 d0, #0 ; oldset pointer is NULL
+ move.4 d1, sp ; d1 is the regs pointer
+ call a5, do_signal ; Call do_signal()
+
+ /*
+ * Back from do_signal(), re-enter critical section.
+ */
+1:
+ begin_restore_context ; Restore the thread context
+ atomic_lock_acquire ; Enter critical section
+ call a3, __complete_and_return_to_userspace ; jump to unprotected section
+
+/*
+ * restore_all_registers()
+ *
+ * restore_all_registers will be the alternate exit route for
+ * preempted processes that have called a signal handler
+ * and are returning back to user space.
+ */
+ .section .text.restore_all_registers, "ax", @progbits
+ .global restore_all_registers
+restore_all_registers:
+ begin_restore_context ; Restore the thread context
+ atomic_lock_acquire ; Enter critical section
+ call a3, __complete_and_return_to_userspace
+
+/*
+ * __complete_and_return_to_userspace
+ *
+ * restores the second half of the context and returns
+ * You must have the atomic lock when you call this function
+ */
+ .section .kernel_unprotected, "ax", @progbits
+__complete_and_return_to_userspace:
+ disable_kernel_ranges_for_current d15 ; disable kernel ranges
+ complete_restore_context ; restore previous context
+ atomic_lock_release ; Leave critical section
+ move.4 csr, (sp)4++ ; Restore csr from the stack
+ ret (sp)4++
+
+/*
+ * ret_from_fork()
+ * Called on the child's return from fork system call.
+ */
+ .section .text.ret_from_fork, "ax", @progbits
+ .global ret_from_fork
+ret_from_fork:
+ ;;; d0 contains the arg for schedule_tail
+ ;;; the others we don't care about as they are in PT_REGS (sp)
+ call a5, schedule_tail
+
+ atomic_lock_acquire ; Enter critical section
+
+ move.4 a3, sp
+ move.4 d0, PT_D0(a3) ; Restore D0
+ move.4 d1, PT_D1(a3) ; Restore D1
+ move.4 d2, PT_D2(a3) ; Restore D2
+ move.4 d3, PT_D3(a3) ; Restore D3
+ move.4 d10, PT_D10(a3) ; Restore D10
+ move.4 d11, PT_D11(a3) ; Restore D11
+ move.4 d12, PT_D12(a3) ; Restore D12
+ move.4 d13, PT_D13(a3) ; Restore D13
+ move.4 a1, PT_A1(a3) ; Restore A1
+ move.4 a2, PT_A2(a3) ; Restore A2
+ move.4 a5, PT_A5(a3) ; Restore A5
+ move.4 a6, PT_A6(a3) ; Restore A6
+ ;; I think atomic_lock_acquire could be moved here..
+ move.4 sp, PT_SP(a3) ; Restore sp
+ move.4 a4, PT_PC(a3) ; Restore pc in register a4
+ move.4 PT_FRAME_TYPE(a3), #0 ; Clear frame_type to indicate it is invalid.
+
+#ifdef CONFIG_PROTECT_KERNEL
+ call a3, __ret_from_fork_bottom_half
+ .section .kernel_unprotected, "ax", @progbits
+__ret_from_fork_bottom_half:
+ disable_kernel_ranges_for_current d15
+#endif
+ atomic_lock_release ; Leave critical section
+ calli a4, 0(a4) ; Return.
+
+/*
+ * __switch_to()
+ *
+ * Call with:
+ * void *__switch_to(struct task_struct *prev, struct thread_struct *prev_switch,
+ * struct thread_struct *next_switch)
+ */
+ .section .text.__switch_to, "ax", @progbits
+ .global __switch_to
+__switch_to:
+
+ /*
+ * Set up register a3 to point to save area.
+ */
+ movea a3, d1 ; a3 now holds prev_switch
+ move.4 (a3)4++, d10
+ move.4 (a3)4++, d11
+ move.4 (a3)4++, d12
+ move.4 (a3)4++, d13
+ move.4 (a3)4++, a1
+ move.4 (a3)4++, a2
+ move.4 (a3)4++, a5
+ move.4 (a3)4++, a6
+ move.4 (a3)4++, a7
+
+ /*
+ * Set up register a3 to point to restore area.
+ */
+ movea a3, d2 ; a3 now holds next_switch
+ move.4 d10 , (a3)4++
+ move.4 d11 , (a3)4++
+ move.4 d12 , (a3)4++
+ move.4 d13 , (a3)4++
+ move.4 a1 , (a3)4++
+ move.4 a2 , (a3)4++
+ move.4 a5 , (a3)4++
+ move.4 a6 , (a3)4++
+ move.4 a7 , (a3)4++
+
+ /*
+ * Load the sw_ksp with the proper thread_info pointer.
+ */
+ movei d15, #(~(ASM_THREAD_SIZE-1))
+ and.4 a3, sp, d15 ; a3 now has the thread info pointer
+ moveai a4, #%hi(sw_ksp)
+ lea.1 a4, %lo(sw_ksp)(a4) ; a4 now has the base address of sw_ksp array
+ lsr.4 d15, ROSR, #2 ; Thread number - bit's 6 through 31 are zeroes anyway.
+ move.4 (a4, d15), a3 ; Load the thread info pointer into the hw_ksp array..
+
+ /*
+ * We are done with context switch. Time to return..
+ */
+ calli a5, 0(a5)
+ .size __switch_to, . - __switch_to
+
+/*
+ * ubicom32_emulate_insn()
+ * Emulates the instruction.
+ *
+ * Call with:
+ * unsigned int ubicom32_emulate_insn(int source1, int source2, int source3, int *save_acc, int *save_csr);
+ */
+ .section .text.ubicom32_emulate_insn, "ax", @progbits
+ .global ubicom32_emulate_insn
+ .global trap_emulate
+ubicom32_emulate_insn:
+ movea a3, d3 ; a3 holds save_acc pointer
+ movea a4, d4 ; a4 hods save_csr pointer
+ move.4 source3, d2
+ move.4 acc0_lo, (a3)
+ move.4 acc0_hi, 4(a3)
+ move.4 acc1_lo, 8(a3)
+ move.4 acc1_hi, 12(a3)
+ move.4 mac_rc16, 16(a3)
+ move.4 CSR, (a4)
+ setcsr_flush 0
+
+trap_emulate:
+ move.4 d0, d1
+ setcsr_flush 0
+ move.4 (a4), CSR ; Save csr
+ move.4 (a3), acc0_lo
+ move.4 4(a3), acc0_hi
+ move.4 8(a3), acc1_lo
+ move.4 12(a3), acc1_hi
+ move.4 16(a3), mac_rc16
+ ret a5
+ .size ubicom32_emulate_insn, . - ubicom32_emulate_insn
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_ksyms.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_ksyms.c
new file mode 100644
index 000000000..ea7eb1575
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_ksyms.c
@@ -0,0 +1,98 @@
+/*
+ * arch/ubicom32/kernel/ubicom32_ksyms.c
+ * Ubicom32 architecture compiler support and misc symbols.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/module.h>
+#include <linux/linkage.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+
+#include <asm/setup.h>
+#include <asm/machdep.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/checksum.h>
+#include <asm/current.h>
+
+/* platform dependent support */
+
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+
+EXPORT_SYMBOL(ip_fast_csum);
+
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+/* The following are special because they're not called
+ explicitly (the C compiler generates them). Fortunately,
+ their interface isn't gonna change any time soon now, so
+ it's OK to leave it out of version control. */
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+
+#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 4) || __GNUC__ > 4
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divdi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+
+/* gcc lib functions */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+#else
+extern void __libgcc_udivmodsi(void);
+extern void __libgcc_divmodsi(void);
+
+EXPORT_SYMBOL(__libgcc_udivmodsi);
+EXPORT_SYMBOL(__libgcc_divmodsi);
+#endif
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_syscall.S b/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_syscall.S
new file mode 100644
index 000000000..870f66c8f
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_syscall.S
@@ -0,0 +1,694 @@
+/*
+ * arch/ubicom32/kernel/ubicom32_syscall.S
+ * <TODO: Replace with short file description>
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <linux/unistd.h>
+
+#include <asm/ubicom32-common.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/range-protect.h>
+
+/*
+ * __old_system_call()
+ */
+ .section .old_syscall_entry.text, "ax", @progbits
+#ifdef CONFIG_OLD_40400010_SYSTEM_CALL
+__old_system_call:
+ call a3, system_call
+ .size __old_system_call, . - __old_system_call ;
+#else
+ /*
+ * something that will crash the userspace application, but
+ * should not take down the kernel, if protection is enabled
+ * this will never even get executed.
+ */
+ .long 0xFABBCCDE ; illegal instruction
+ bkpt #-1 ; we will never get here
+#endif
+
+/*
+ * system_call()
+ */
+ .section .syscall_entry.text, "ax", @progbits
+ .global system_call
+system_call:
+ /*
+ * Regular ABI rules for function calls apply for syscall. d8 holds
+ * the syscall number. We will use that to index into the syscall table.
+ * d0 - d5 hold the parameters.
+ *
+ * First we get the current thread_info and swap to the kernel stack.
+ * This is done by reading the current thread and looking up the ksp
+ * from the sw_ksp array and storing it in a3.
+ *
+ * Then we reserve space for the syscall context a struct pt_regs and
+ * save it using a4 initially and later as sp.
+ * Once sp is set to the kernel sp we can leave the critical section.
+ *
+ * For the user case the kernel stack will have the following layout.
+ *
+ * a3 ksp[0] +-----------------------+
+ * | Thread info area |
+ * | struct thread_info |
+ * +-----------------------+
+ * : :
+ * | Kernel Stack Area |
+ * | |
+ * a4 / sp >>> +-----------------------+
+ * | Context save area |
+ * | struct pt_reg |
+ * ksp[THREAD_SIZE-8] +-----------------------+
+ * | 8 Byte Buffer Zone |
+ * ksp[THREAD_SIZE] +-----------------------+
+
+ *
+ * For kernel syscalls the layout is as follows.
+ *
+ * a3 ksp[0] +-----------------------+
+ * | Thread info area |
+ * | struct thread_info |
+ * +-----------------------+
+ * : :
+ * | Kernel Stack Area |
+ * | |
+ * a4 / sp >>> +-----------------------+
+ * | Context save area |
+ * | struct pt_reg |
+ * sp at syscall entry +-----------------------+
+ * | Callers Kernel Stack |
+ * : :
+ *
+ * Once the context is saved we optionally call syscall_trace and setup
+ * the exit routine and jump to the syscall.
+ */
+
+ /*
+ * load the base address for sw_ksp into a3
+ * Note.. we cannot access it just yet as protection is still on.
+ */
+ moveai a3, #%hi(sw_ksp)
+ lea.1 a3, %lo(sw_ksp)(a3)
+
+ /*
+ * Enter critical section .
+ *
+ * The 'critical' aspects here are the switching the to the ksp and
+ * changing the protection registers, these both use per thread
+ * information so we need to protect from a context switch. For now this
+ * is done using the global atomic lock.
+ */
+ atomic_lock_acquire
+
+ thread_get_self d15 ; Load current thread number
+#ifdef CONFIG_PROTECT_KERNEL
+ lsl.4 d9, #1, d15 ; Convert to thread bit
+ enable_kernel_ranges d9
+#endif
+ /*
+ * in order to reduce the size of code in the syscall section we get
+ * out of it right now
+ */
+ call a4, __system_call_bottom_half
+ .size system_call, . - system_call
+
+ .section .text.__system_call_bottom_half, "ax", @progbits
+__system_call_bottom_half:
+
+ /*
+ * We need to Determine if this is a kernel syscall or user syscall.
+ * Start by loading the pointer for the thread_info structure for the
+ * current process in to a3.
+ */
+ move.4 a3, (a3, d15) ; a3 = sw_ksp[d15]
+
+ /*
+ * Now if this is a kernel thread the same value can be a acheived by
+ * masking off the lower bits on the current stack pointer.
+ */
+ movei d9, #(~(ASM_THREAD_SIZE-1)) ; load mask
+ and.4 d9, sp, d9 ; apply mask
+
+ /*
+ * d9 now has the masked version of the sp. If this is identical to
+ * what is in a3 then don't switch to ksp as we are already in the
+ * kernel.
+ */
+ sub.4 #0, a3, d9
+
+ /*
+ * if d9 and a3 are not equal. We are usespace and have to shift to
+ * ksp.
+ */
+ jmpne.t 1f
+
+ /*
+ * Kernel Syscall.
+ *
+ * The kernel has called this routine. We have to pdec space for pt_regs
+ * from sp.
+ */
+ pdec a4, PT_SIZE(sp) ; a4 = ksp - PT_SIZE
+ jmpt.t 2f
+
+ /*
+ * Userspace Syscall.
+ *
+ * Add THREAD_SIZE and subtract PT_SIZE to create the proper ksp
+ */
+1: movei d15, #(ASM_THREAD_SIZE - 8 - PT_SIZE)
+ lea.1 a4, (a3, d15) ; a4 = ksp + d15
+
+ /*
+ * Replace user stack pointer with kernel stack pointer (a4)
+ * Load -1 into frame_type in save area to indicate this is system call
+ * frame.
+ */
+2: move.4 PT_A7(a4), a7 ; Save old sp/A7 on kernel stack
+ move.4 PT_FRAME_TYPE(a4), #-1 ; Set the frame type.
+ move.4 sp, a4 ; Change to ksp.
+ /*
+ * We are now officially back in the kernel!
+ */
+
+ /*
+ * Now that we are on the ksp we can leave the critical section
+ */
+ atomic_lock_release
+
+ /*
+ * We need to save a0 because we need to be able to restore it in
+ * the event that we need to handle a signal. It's not generally
+ * a callee-saved register but is the GOT pointer.
+ */
+ move.4 PT_A0(sp), a0 ; Save A0 on kernel stack
+
+ /*
+ * We still need to save d10-d13, a1, a2, a5, a6 in the kernel frame
+ * for this process, we also save the system call params in the case of
+ * syscall restart. (note a7 was saved above)
+ */
+ move.4 PT_A1(sp), a1 ; Save A1 on kernel stack
+ move.4 PT_A2(sp), a2 ; Save A2 on kernel stack
+ move.4 PT_A5(sp), a5 ; Save A5 on kernel stack
+ move.4 PT_A6(sp), a6 ; Save A6 on kernel stack
+ move.4 PT_PC(sp), a5 ; Save A5 at the PC location
+ move.4 PT_D10(sp), d10 ; Save D10 on kernel stack
+ move.4 PT_D11(sp), d11 ; Save D11 on kernel stack
+ move.4 PT_D12(sp), d12 ; Save D12 on kernel stack
+ move.4 PT_D13(sp), d13 ; Save D13 on kernel stack
+
+ /*
+ * Now save the syscall parameters
+ */
+ move.4 PT_D0(sp), d0 ; Save d0 on kernel stack
+ move.4 PT_ORIGINAL_D0(sp), d0 ; Save d0 on kernel stack
+ move.4 PT_D1(sp), d1 ; Save d1 on kernel stack
+ move.4 PT_D2(sp), d2 ; Save d2 on kernel stack
+ move.4 PT_D3(sp), d3 ; Save d3 on kernel stack
+ move.4 PT_D4(sp), d4 ; Save d4 on kernel stack
+ move.4 PT_D5(sp), d5 ; Save d5 on kernel stack
+ move.4 PT_D8(sp), d8 ; Save d8 on kernel stack
+
+ /*
+ * Test if syscalls are being traced and if they are jump to syscall
+ * trace (it will comeback here)
+ */
+ btst TI_FLAGS(a3), #ASM_TIF_SYSCALL_TRACE
+ jmpne.f .Lsystem_call__trace
+.Lsystem_call__trace_complete:
+ /*
+ * Check for a valid call number [ 0 <= syscall_number < NR_syscalls ]
+ */
+ cmpi d8, #0
+ jmplt.f 3f
+ cmpi d8, #NR_syscalls
+ jmplt.t 4f
+
+ /*
+ * They have passed an invalid number. Call sys_ni_syscall staring by
+ * load a4 with the base address of sys_ni_syscall
+ */
+3: moveai a4, #%hi(sys_ni_syscall)
+ lea.1 a4, %lo(sys_ni_syscall)(a4)
+ jmpt.t 5f ; Jump to regular processing
+
+ /*
+ * Validated syscall, load the syscall table base address into a3 and
+ * read the syscall ptr out.
+ */
+4: moveai a3, #%hi(sys_call_table)
+ lea.1 a3, %lo(sys_call_table)(a3) ; a3 = sys_call_table
+ move.4 a4, (a3, d8) ; a4 = sys_call_table[d8]
+
+ /*
+ * Before calling the syscall, setup a5 so that syscall_exit is called
+ * on return from syscall
+ */
+5: moveai a5, #%hi(syscall_exit) ; Setup return address
+ lea.1 a5, %lo(syscall_exit)(a5) ; from system call
+
+ /*
+ * If the syscall is __NR_rt_rigreturn then we have to test d1 to
+ * figure out if we have to change change the return routine to restore
+ * all registers.
+ */
+ cmpi d8, #__NR_rt_sigreturn
+ jmpeq.f 6f
+
+ /*
+ * Launch system call (it will return through a5 - syscall_exit)
+ */
+ calli a3, 0(a4)
+
+ /*
+ * System call is rt_sigreturn. Test d1. If it is 1 we have to
+ * change the return address to restore_all_registers
+ */
+6: cmpi d1, #1
+ jmpne.t 7f
+
+ moveai a5, #%hi(restore_all_registers) ; Setup return address
+ lea.1 a5, %lo(restore_all_registers)(a5) ; to restore_all_registers.
+
+ /*
+ * Launch system call (it will return through a5)
+ */
+7: calli a3, 0(a4) ; Launch system call
+
+.Lsystem_call__trace:
+ /*
+ * Syscalls are being traced.
+ * Call syscall_trace, (return here)
+ */
+ call a5, syscall_trace
+
+ /*
+ * Restore syscall state (it would have been discarded during the
+ * syscall trace)
+ */
+ move.4 d0, PT_D0(sp) ; Restore d0 from kernel stack
+ move.4 d1, PT_D1(sp) ; Restore d1 from kernel stack
+ move.4 d2, PT_D2(sp) ; Restore d2 from kernel stack
+ move.4 d3, PT_D3(sp) ; Restore d3 from kernel stack
+ move.4 d4, PT_D4(sp) ; Restore d4 from kernel stack
+ move.4 d5, PT_D5(sp) ; Restore d5 from kernel stack
+ /* add this back if we ever have a syscall with 7 args */
+ move.4 d8, PT_D8(sp) ; Restore d8 from kernel stack
+
+ /*
+ * return to syscall
+ */
+ jmpt.t .Lsystem_call__trace_complete
+ .size __system_call_bottom_half, . - __system_call_bottom_half
+
+/*
+ * syscall_exit()
+ */
+ .section .text.syscall_exit
+ .global syscall_exit
+syscall_exit:
+ /*
+ * d0 contains the return value. We should move that into the kernel
+ * stack d0 location. We will be transitioning from kernel to user
+ * mode. Test the flags and see if we have to call schedule. If we are
+ * going to truly exit then all that has to be done is that from the
+ * kernel stack we have to restore d0, a0, a1, a2, a5, a6 and sp (a7)bb
+ * and then return via a5.
+ */
+
+ /*
+ * Save d0 to pt_regs
+ */
+ move.4 PT_D0(sp), d0 ; Save d0 into the kernel stack
+
+ /*
+ * load the thread_info structure by masking off the THREAD_SIZE
+ * bits.
+ *
+ * Note: we used to push a1, but now we don't as we are going
+ * to eventually restore it to the userspace a1.
+ */
+ movei d9, #(~(ASM_THREAD_SIZE-1))
+ and.4 a1, sp, d9
+
+ /*
+ * Are any interesting bits set on TI flags, if there are jump
+ * aside to post_processing.
+ */
+ move.4 d9, #(_TIF_SYSCALL_TRACE | _TIF_NEED_RESCHED | _TIF_SIGPENDING)
+ and.4 #0, TI_FLAGS(a1), d9
+ jmpne.f .Lsyscall_exit__post_processing ; jump to handler
+.Lsyscall_exit__post_processing_complete:
+
+ move.4 d0, PT_D0(sp) ; Restore D0 from kernel stack
+ move.4 d1, PT_D1(sp) ; Restore d1 from kernel stack
+ move.4 d2, PT_D2(sp) ; Restore d2 from kernel stack
+ move.4 d3, PT_D3(sp) ; Restore d3 from kernel stack
+ move.4 d4, PT_D4(sp) ; Restore d4 from kernel stack
+ move.4 d5, PT_D5(sp) ; Restore d5 from kernel stack
+ move.4 d8, PT_D8(sp) ; Restore d8 from kernel stack
+ move.4 d10, PT_D10(sp) ; Restore d10 from kernel stack
+ move.4 d11, PT_D11(sp) ; Restore d11 from kernel stack
+ move.4 d12, PT_D12(sp) ; Restore d12 from kernel stack
+ move.4 d13, PT_D13(sp) ; Restore d13 from kernel stack
+ move.4 a1, PT_A1(sp) ; Restore A1 from kernel stack
+ move.4 a2, PT_A2(sp) ; Restore A2 from kernel stack
+ move.4 a5, PT_A5(sp) ; Restore A5 from kernel stack
+ move.4 a6, PT_A6(sp) ; Restore A6 from kernel stack
+ move.4 a0, PT_A0(sp) ; Restore A6 from kernel stack
+
+ /*
+ * this is only for debug, and could be removed for production builds
+ */
+ move.4 PT_FRAME_TYPE(sp), #0 ; invalidate frame_type
+
+#ifdef CONFIG_PROTECT_KERNEL
+
+ call a4, __syscall_exit_bottom_half
+
+ .section .kernel_unprotected, "ax", @progbits
+__syscall_exit_bottom_half:
+ /*
+ * Enter critical section
+ */
+ atomic_lock_acquire
+ disable_kernel_ranges_for_current d15
+#endif
+ /*
+ * Lastly restore userspace stack ptr
+ *
+ * Note: that when protection is on we need to hold the lock around the
+ * stack swap as well because otherwise the protection could get
+ * inadvertently disabled again at the end of a context switch.
+ */
+ move.4 a7, PT_A7(sp) ; Restore A7 from kernel stack
+
+ /*
+ * We are now officially back in userspace!
+ */
+
+#ifdef CONFIG_PROTECT_KERNEL
+ /*
+ * Leave critical section and return to user space.
+ */
+ atomic_lock_release
+#endif
+ calli a5, 0(a5) ; Back to userspace code.
+
+ bkpt #-1 ; we will never get here
+
+ /*
+ * Post syscall processing. (unlikely part of syscall_exit)
+ *
+ * Are we tracing syscalls. If TIF_SYSCALL_TRACE is set, call
+ * syscall_trace routine and return here.
+ */
+ .section .text.syscall_exit, "ax", @progbits
+.Lsyscall_exit__post_processing:
+ btst TI_FLAGS(a1), #ASM_TIF_SYSCALL_TRACE
+ jmpeq.t 1f
+ call a5, syscall_trace
+
+ /*
+ * Do we need to resched ie call schedule. If TIF_NEED_RESCHED is set,
+ * call the scheduler, it will come back here.
+ */
+1: btst TI_FLAGS(a1), #ASM_TIF_NEED_RESCHED
+ jmpeq.t 2f
+ call a5, schedule
+
+ /*
+ * Do we need to post a signal, if TIF_SIGPENDING is set call the
+ * do_signal.
+ */
+2: btst TI_FLAGS(a1), #ASM_TIF_SIGPENDING
+ jmpeq.t .Lsyscall_exit__post_processing_complete
+
+ /*
+ * setup the do signal call
+ */
+ move.4 d0, #0 ; oldset pointer is NULL
+ lea.1 d1, (sp) ; d1 is the regs pointer.
+ call a5, do_signal
+
+ jmpt.t .Lsyscall_exit__post_processing_complete
+
+/* .size syscall_exit, . - syscall_exit */
+
+/*
+ * kernel_execve()
+ * kernel_execv is called when we the kernel is starting a
+ * userspace application.
+ */
+ .section .kernel_unprotected, "ax", @progbits
+ .global kernel_execve
+kernel_execve:
+ move.4 -4(sp)++, a5 ; Save return address
+ /*
+ * Call execve
+ */
+ movei d8, #__NR_execve ; call execve
+ call a5, system_call
+ move.4 a5, (sp)4++
+
+ /*
+ * protection was enabled again at syscall exit, but we want
+ * to return to kernel so we enable it again.
+ */
+#ifdef CONFIG_PROTECT_KERNEL
+ /*
+ * We are entering the kernel so we need to disable the protection.
+ * Enter critical section, disable ranges and leave critical section.
+ */
+ call a3, __enable_kernel_ranges ; and jump back to kernel
+#else
+ ret a5 ; jump back to the kernel
+#endif
+
+ .size kernel_execve, . - kernel_execve
+
+/*
+ * signal_trampoline()
+ *
+ * Deals with transitioning from to userspace signal handlers and returning
+ * to userspace, only called from the kernel.
+ *
+ */
+ .section .kernel_unprotected, "ax", @progbits
+ .global signal_trampoline
+signal_trampoline:
+ /*
+ * signal_trampoline is called when we are jumping from the kernel to
+ * the userspace signal handler.
+ *
+ * The following registers are relevant. (set setup_rt_frame)
+ * sp is the user space stack not the kernel stack
+ * d0 = signal number
+ * d1 = siginfo_t *
+ * d2 = ucontext *
+ * d3 = the user space signal handler
+ * a0 is set to the GOT if userspace application is FDPIC, otherwise 0
+ * a3 is set to the FD for the signal if userspace application is FDPIC
+ */
+#ifdef CONFIG_PROTECT_KERNEL
+ /*
+ * We are leaving the kernel so we need to enable the protection.
+ * Enter critical section, disable ranges and leave critical section.
+ */
+ atomic_lock_acquire ; Enter critical section
+ disable_kernel_ranges_for_current d15 ; disable kernel ranges
+ atomic_lock_release ; Leave critical section
+#endif
+ /*
+ * The signal handler pointer is in register d3 so tranfer it to a4 and
+ * call it
+ */
+ movea a4, d3 ; signal handler
+ calli a5, 0(a4)
+
+ /*
+ * Return to userspace through rt_syscall which is stored on top of the
+ * stack d1 contains ret_via_interrupt status.
+ */
+ move.4 d8, (sp) ; d8 (syscall #) = rt_syscall
+ move.4 d1, 4(sp) ; d1 = ret_via_interrupt
+ call a5, system_call ; as we are 'in' the kernel
+ ; we can call kernel_syscall
+
+ bkpt #-1 ; will never get here.
+ .size signal_trampoline, . - signal_trampoline
+
+/*
+ * kernel_thread_helper()
+ *
+ * Entry point for kernel threads (only referenced by kernel_thread()).
+ *
+ * On execution d0 will be 0, d1 will be the argument to be passed to the
+ * kernel function.
+ * d2 contains the kernel function that needs to get called.
+ * d3 will contain address to do_exit which needs to get moved into a5.
+ *
+ * On return from fork the child thread d0 will be 0. We call this dummy
+ * function which in turn loads the argument
+ */
+ .section .kernel_unprotected, "ax", @progbits
+ .global kernel_thread_helper
+kernel_thread_helper:
+ /*
+ * Create a kernel thread. This is called from ret_from_vfork (a
+ * userspace return routine) so we need to put it in an unprotected
+ * section and re-enable protection before calling the vector in d2.
+ */
+
+#ifdef CONFIG_PROTECT_KERNEL
+ /*
+ * We are entering the kernel so we need to disable the protection.
+ * Enter critical section, disable ranges and leave critical section.
+ */
+ call a5, __enable_kernel_ranges
+#endif
+ /*
+ * Move argument for kernel function into d0, and set a5 return address
+ * (a5) to do_exit and return through a2
+ */
+ move.4 d0, d1 ; d0 = arg
+ move.4 a5, d3 ; a5 = do_exit
+ ret d2 ; call function ptr in d2
+ .size kernel_thread_helper, . - kernel_thread_helper
+
+#ifdef CONFIG_PROTECT_KERNEL
+ .section .kernel_unprotected, "ax", @progbits
+__enable_kernel_ranges:
+ atomic_lock_acquire ; Enter critical section
+ enable_kernel_ranges_for_current d15
+ atomic_lock_release ; Leave critical section
+ calli a5, 0(a5)
+ .size __enable_kernel_ranges, . - __enable_kernel_ranges
+
+#endif
+
+/*
+ * The following system call intercept functions where we setup the
+ * input to the real system call. In all cases these are just taking
+ * the current sp which is pointing to pt_regs and pushing it into the
+ * last arg of the system call.
+ *
+ * i.e. the public definition of sys_execv is
+ * sys_execve( char *name,
+ * char **argv,
+ * char **envp )
+ * but process.c defines it as
+ * sys_execve( char *name,
+ * char **argv,
+ * char **envp,
+ * struct pt_regs *regs )
+ *
+ * so execve_intercept needs to populate the 4th arg with pt_regs*,
+ * which is the stack pointer as we know we must be coming out of
+ * system_call
+ *
+ * The intercept vectors are referenced by syscalltable.S
+ */
+
+/*
+ * execve_intercept()
+ */
+ .section .text.execve_intercept, "ax", @progbits
+ .global execve_intercept
+execve_intercept:
+ move.4 d3, sp ; Save pt_regs address
+ call a3, sys_execve
+
+ .size execve_intercept, . - execve_intercept
+
+/*
+ * vfork_intercept()
+ */
+ .section .text.vfork_intercept, "ax", @progbits
+ .global vfork_intercept
+vfork_intercept:
+ move.4 d0, sp ; Save pt_regs address
+ call a3, sys_vfork
+
+ .size vfork_intercept, . - vfork_intercept
+
+/*
+ * clone_intercept()
+ */
+ .section .text.clone_intercept, "ax", @progbits
+ .global clone_intercept
+clone_intercept:
+ move.4 d2, sp ; Save pt_regs address
+ call a3, sys_clone
+
+ .size clone_intercept, . - clone_intercept
+
+/*
+ * sys_sigsuspend()
+ */
+ .section .text.sigclone_intercept, "ax", @progbits
+ .global sys_sigsuspend
+sys_sigsuspend:
+ move.4 d0, sp ; Pass pointer to pt_regs in d0
+ call a3, do_sigsuspend
+
+ .size sys_sigsuspend, . - sys_sigsuspend
+
+/*
+ * sys_rt_sigsuspend()
+ */
+ .section .text.sys_rt_sigsuspend, "ax", @progbits
+ .global sys_rt_sigsuspend
+sys_rt_sigsuspend:
+ move.4 d0, sp ; Pass pointer to pt_regs in d0
+ call a3, do_rt_sigsuspend
+
+ .size sys_rt_sigsuspend, . - sys_rt_sigsuspend
+
+/*
+ * sys_rt_sigreturn()
+ */
+ .section .text.sys_rt_sigreturn, "ax", @progbits
+ .global sys_rt_sigreturn
+sys_rt_sigreturn:
+ move.4 d0, sp ; Pass pointer to pt_regs in d0
+ call a3, do_rt_sigreturn
+
+ .size sys_rt_sigreturn, . - sys_rt_sigreturn
+
+/*
+ * sys_sigaltstack()
+ */
+ .section .text.sys_sigaltstack, "ax", @progbits
+ .global sys_sigaltstack
+sys_sigaltstack:
+ move.4 d0, sp ; Pass pointer to pt_regs in d0
+ call a3, do_sys_sigaltstack
+
+ .size sys_sigaltstack, . - sys_sigaltstack
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/unaligned_trap.c b/target/linux/ubicom32/files/arch/ubicom32/kernel/unaligned_trap.c
new file mode 100644
index 000000000..d856d061d
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/unaligned_trap.c
@@ -0,0 +1,698 @@
+/*
+ * arch/ubicom32/kernel/unaligned_trap.c
+ * Handle unaligned traps in both user or kernel space.
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+#define FALSE 0
+#define TRUE 1
+
+/* no possible trap */
+#define UNUSED 0
+/* possible source operand trap */
+#define SRC 1
+#define SRC_2 2
+/* possible destination operand trap */
+#define DEST 3
+#define DEST_2 4
+/* can be either source or destination or both */
+#define TWO_OP 5
+#define TWO_OP_2 6
+
+/* TODO: What is the real value here, put something in to make it compile for
+ * now */
+#define MOVE_2 0x0d
+#define LSL_2 0x11
+#define LSR_2 0x13
+#define MOVEI 0x19
+#define CMPI 0x18
+
+static int op_format[32] =
+{
+ TWO_OP, /* 0x00 */
+ UNUSED,
+ SRC,
+ UNUSED,
+ TWO_OP, /* 0x04 */
+ TWO_OP,
+ SRC,
+ UNUSED,
+ TWO_OP_2, /* 0x08 */
+ TWO_OP,
+ TWO_OP_2,
+ TWO_OP,
+ TWO_OP_2, /* 0x0C */
+ TWO_OP,
+ TWO_OP_2,
+ TWO_OP,
+ TWO_OP, /* 0x10 */
+ TWO_OP_2,
+ TWO_OP,
+ TWO_OP,
+ UNUSED, /* 0x14 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ SRC_2, /* 0x18 */
+ DEST_2,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x1C */
+ UNUSED,
+ UNUSED, /* unaligned CALLI will not be fixed. */
+ UNUSED
+};
+
+static int op_0_format[32] =
+{
+ UNUSED, /* 0x00 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x04 - ret don't fix - bad ret is always wrong */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x08 */
+ UNUSED,
+ TWO_OP,
+ TWO_OP_2,
+ TWO_OP, /* 0x0c */
+ TWO_OP_2,
+ TWO_OP,
+ UNUSED, /* .1 can't trap */
+ UNUSED, /* 0x10 */
+ UNUSED,
+ SRC,
+ UNUSED,
+ UNUSED, /* 0x14 */
+ TWO_OP_2,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x18 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ DEST, /* 0x1c */
+ DEST,
+ DEST,
+ DEST, /* all lea have 32-bit destination */
+};
+
+static int op_2_format[32] =
+{
+ UNUSED, /* 0x00 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x04 */
+ UNUSED,
+ SRC,
+ UNUSED,
+ UNUSED, /* 0x08 crcgen is .1 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x0c */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ SRC, /* 0x10 */
+ SRC_2,
+ SRC,
+ SRC_2,
+ SRC, /* 0x14 */
+ SRC_2,
+ SRC,
+ UNUSED,
+ UNUSED, /* 0x18 */
+ UNUSED,
+ SRC,
+ UNUSED,
+ SRC, /* 0x1c */
+ UNUSED,
+ SRC_2,
+ UNUSED,
+};
+
+static int op_6_format[32] =
+{
+ SRC_2, /* 0x00 */
+ SRC_2,
+ SRC_2,
+ SRC_2,
+ SRC_2, /* 0x04 */
+ SRC_2,
+ UNUSED,
+ SRC_2,
+ SRC, /* 0x08 MULS.4 */
+ SRC_2,
+ SRC,
+ UNUSED,
+ UNUSED, /* 0x0c */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ SRC, /* 0x10 */
+ SRC_2,
+ SRC,
+ SRC_2,
+ UNUSED, /* 0x14 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x18 */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+ UNUSED, /* 0x1c */
+ UNUSED,
+ UNUSED,
+ UNUSED,
+};
+
+/*
+ * unaligned_get_address()
+ * get an address using save_an and save_dn registers, and updates save_an
+ * with side effects
+ */
+unsigned char *unaligned_get_address(int thread, int specifier, int four_byte,
+ unsigned int save_an[],
+ unsigned int save_dn[], int *write_back_an)
+{
+ unsigned char *address;
+
+ int areg = (specifier >> 5) & 7;
+ if ((specifier >> 8) == 2) {
+ int offset = specifier & 0xf;
+ offset = ((offset << 28) >> 28);
+ if (likely(four_byte)) {
+ offset <<= 2;
+ } else {
+ offset <<= 1;
+ }
+ if (specifier & 0x10) {
+ address = (unsigned char *)(save_an[areg] + offset);
+ } else {
+ address = (unsigned char *)save_an[areg];
+ }
+ save_an[areg] = save_an[areg] + offset;
+
+ /*
+ * Let caller know An registers have been modified.
+ */
+ *write_back_an = 1;
+ } else if ((specifier >> 8) == 3) {
+ int dreg = specifier & 0xf;
+ if (likely(four_byte)) {
+ address = (unsigned char *)(save_an[areg] +
+ (save_dn[dreg] << 2));
+ } else {
+ address = (unsigned char *)(save_an[areg] +
+ (save_dn[dreg] << 1));
+ }
+ } else {
+ int offset = ((specifier >> 3) & 0x60) | (specifier & 0x1f);
+ if (likely(four_byte)) {
+ address = (unsigned char *)(save_an[areg] +
+ (offset << 2));
+ } else {
+ address = (unsigned char *)(save_an[areg] +
+ (offset << 1));
+ }
+ }
+
+ return address;
+}
+
+static int save_dn[16];
+static int save_an[8];
+static int save_acc[5];
+
+/*
+ * unaligned_emulate()
+ * emulate the instruction at thread's pc that has taken an unaligned data
+ * trap.
+ *
+ * source or destination or both might be unaligned
+ * the instruction must have a memory source or destination or both
+ * the emulated instruction is copied and executed in this thread
+ *
+ * TODO: Protection is handled outside of this function
+ * TODO: handling simultaneous unaligned and memory protection traps
+ *
+ * Get thread state
+ * the PC and instruction (and local copy, emulate_inst), and An
+ * and Dn registers
+ * All implicit soruce state (source3, CSR, accumulators)
+
+ * if the instruction has a memory source
+ * Use the instruction, An and Dn registers to form src_address
+ * get unaligned source data from src_address (usually sign
+ * extended)
+ * (2 bytes, with or without sign extension, or 4 bytes)
+ * modify emulate_inst to use d0 as source
+ * else
+ * get the soure operand from one of thread's registers
+ * if instruction has a memory destination
+ * Use the instruction, An and Dn registers to form dest_address
+ * modify emulate_inst to use d0 as destination
+ * if there was a memory source
+ * put the source data in thread's d0
+ * get the source-2 Dn operand and source 3 operand from thread
+ * execute modified inst
+ * (save it, flush caches, set up local values for implicit
+ * sources, execute, save explicit and implicit results)
+ * if inst has destination address
+ * copy result to dest_address, possibly unaligned, 1, 2, or 4
+ * bytes
+ * restore thread's implicit results (modified address registers, CSR,
+ * accumulators) add 4 to thread's pc
+ */
+void unaligned_emulate(unsigned int thread)
+{
+ unsigned int pc;
+ unsigned int inst;
+ unsigned int op;
+ unsigned int subop;
+ int format;
+ unsigned int emulate_inst;
+ int four_byte;
+ int src_operand, dest_operand;
+ int save_csr;
+ int source3;
+ unsigned int source1;
+ unsigned int source_data;
+ unsigned char *dest_address = NULL;
+ int source2 = 0;
+ unsigned int result;
+ unsigned int write_back_an = 0;
+ unsigned int chip_id_copy;
+
+ extern unsigned int trap_emulate;
+ extern unsigned int ubicom32_emulate_insn(int source1, int source2,
+ int source3, int *save_acc,
+ int *save_csr);
+
+ /*
+ * get the chip_id
+ */
+ asm volatile (
+ " move.4 %0, chip_id \n\t" /* get chip_id. */
+ : "=r"(chip_id_copy)
+ :
+ );
+
+ /*
+ * get the pc
+ */
+ asm volatile (
+ " move.4 CSR, %1 \n\t" /* set source thread in
+ * CSR */
+ " setcsr_flush 0 \n\t"
+ " move.4 %0, pc \n\t"
+ " move.4 CSR, #0 \n\t" /* restore CSR */
+ " setcsr_flush 0 \n\t"
+ : "=a"(pc)
+ : "d" ((1 << 8) | (thread << 9))
+ : "cc"
+ );
+
+ inst = *((unsigned int *)pc);
+ op = inst >> 27;
+ if (unlikely(op == 2 || op == 6)) {
+ subop = (inst >> 21) & 0x1f;
+ } else {
+ subop = (inst >> 11) & 0x1f;
+ }
+ format = op_format[op];
+ emulate_inst = inst;
+
+ if (op == 0) {
+ format = op_0_format[subop];
+ } else if (op == 2) {
+ format = op_2_format[subop];
+ } else if (op == 6) {
+ format = op_6_format[subop];
+ }
+
+ if (unlikely(format == UNUSED)) {
+ /*
+ * We are not going to emulate this. Bump PC by 4 and move on.
+ */
+ asm volatile (
+ " move.4 CSR, %0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 pc, %1 \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ :
+ : "d"((1 << 14) | (thread << 15)), "d"(pc + 4)
+ : "cc"
+ );
+ return;
+ }
+
+ four_byte = (format == TWO_OP || format == DEST || format == SRC);
+
+ /*
+ * source or destination memory operand needs emulation
+ */
+ src_operand = (format == SRC ||
+ format == SRC_2 ||
+ format == TWO_OP ||
+ format == TWO_OP_2) &&
+ ((inst >> 8) & 7) > 1;
+
+ dest_operand = (format == DEST ||
+ format == DEST_2 ||
+ format == TWO_OP ||
+ format == TWO_OP_2) &&
+ ((inst >> 24) & 7) > 1;
+
+ /*
+ * get thread's implicit sources (not covered by source context select).
+ * data and address registers and CSR (for flag bits) and src3 and
+ * accumulators
+ */
+ asm volatile (
+ " move.4 CSR, %2 \n\t" /* set source thread in
+ * CSR */
+ " setcsr_flush 0 \n\t"
+ " move.4 (%3), d0 \n\t" /* get dn registers */
+ " move.4 4(%3), d1 \n\t"
+ " move.4 8(%3), d2 \n\t"
+ " move.4 12(%3), d3 \n\t"
+ " move.4 16(%3), d4 \n\t"
+ " move.4 20(%3), d5 \n\t"
+ " move.4 24(%3), d6 \n\t"
+ " move.4 28(%3), d7 \n\t"
+ " move.4 32(%3), d8 \n\t"
+ " move.4 36(%3), d9 \n\t"
+ " move.4 40(%3), d10 \n\t"
+ " move.4 44(%3), d11 \n\t"
+ " move.4 48(%3), d12 \n\t"
+ " move.4 52(%3), d13 \n\t"
+ " move.4 56(%3), d14 \n\t"
+ " move.4 60(%3), d15 \n\t"
+ " move.4 (%4), a0 \n\t" /* get an registers */
+ " move.4 4(%4), a1 \n\t"
+ " move.4 8(%4), a2 \n\t"
+ " move.4 12(%4), a3 \n\t"
+ " move.4 16(%4), a4 \n\t"
+ " move.4 20(%4), a5 \n\t"
+ " move.4 24(%4), a6 \n\t"
+ " move.4 28(%4), a7 \n\t"
+ " move.4 %0, CSR \n\t" /* get csr and source3
+ * implicit operands */
+ " move.4 %1, source3 \n\t"
+ " move.4 (%5), acc0_lo \n\t" /* get accumulators */
+ " move.4 4(%5), acc0_hi \n\t"
+ " move.4 8(%5), acc1_lo \n\t"
+ " move.4 12(%5), acc1_hi \n\t"
+ " move.4 16(%5), mac_rc16 \n\t"
+ " move.4 CSR, #0 \n\t" /* restore CSR */
+ " setcsr_flush 0 \n\t"
+ : "=m"(save_csr), "=m"(source3)
+ : "d"((1 << 8) | (thread << 9)),
+ "a"(save_dn), "a"(save_an), "a"(save_acc)
+ : "cc"
+ );
+
+ /*
+ * turn off thread select bits if they were on
+ */
+ BUG_ON((save_csr & 0x04100) != 0);
+ if (unlikely(save_csr & 0x04100)) {
+ /*
+ * Things are in funny state as thread select bits are on in
+ * csr. PANIC.
+ */
+ panic("In unaligned trap handler. Trap thread CSR has thread "
+ "select bits on.\n");
+ }
+
+ save_csr = save_csr & 0x1000ff;
+
+ /*
+ * get the source1 operand
+ */
+ source1 = 0;
+ if (src_operand) {
+ unsigned char *src_address;
+
+ /*
+ * source1 comes from memory
+ */
+ BUG_ON(!(format == TWO_OP || format == TWO_OP_2 ||
+ format == SRC || format == SRC_2));
+ src_address = unaligned_get_address(thread, inst & 0x7ff,
+ four_byte, save_an,
+ save_dn, &write_back_an);
+
+ /*
+ * get data (possibly unaligned)
+ */
+ if (likely(four_byte)) {
+ source_data = (*src_address << 24) |
+ (*(src_address + 1) << 16) |
+ (*(src_address + 2) << 8) |
+ *(src_address + 3);
+ source1 = source_data;
+ } else {
+ source1 = *src_address << 8 |
+ *(src_address + 1);
+
+ /*
+ * Source is not extended if the instrution is MOVE.2 or
+ * if the cpu CHIP_ID >= 0x30000 and the instruction is
+ * either LSL.2 or LSR.2. All other cases have to be
+ * sign extended.
+ */
+ if ((!(op == 2 && subop == MOVE_2)) &&
+ (!((chip_id_copy >= 0x30000) &&
+ (subop == LSL_2 || subop == LSR_2)))) {
+ /*
+ * Have to sign extend the .2 entry.
+ */
+ source1 = ((unsigned int)
+ ((signed int)
+ ((signed short) source1)));
+ }
+ }
+ } else if (likely(op != MOVEI)) {
+ /*
+ * source1 comes from a register, using move.4 d0, src1
+ * unaligned_emulate_get_source is pointer to code to insert remulated instruction
+ */
+ extern unsigned int unaligned_emulate_get_src;
+ *((int *)&unaligned_emulate_get_src) &= ~(0x7ff);
+ *((int *)&unaligned_emulate_get_src) |= (inst & 0x7ff);
+ flush_dcache_range((unsigned long)(&unaligned_emulate_get_src),
+ (unsigned long)(&unaligned_emulate_get_src) + 4);
+
+ asm volatile (
+ /* source1 uses thread's registers */
+ " move.4 CSR, %1 \n\t"
+ " setcsr_flush 0 \n\t"
+ "unaligned_emulate_get_src: \n\t"
+ " move.4 %0, #0 \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ : "=d" (source1)
+ : "d" ((1 << 8) | (thread << 9))
+ : "cc"
+ );
+ }
+
+ /*
+ * get the destination address
+ */
+ if (dest_operand) {
+ BUG_ON(!(format == TWO_OP || format == TWO_OP_2 ||
+ format == DEST || format == DEST_2));
+ dest_address = unaligned_get_address(thread,
+ ((inst >> 16) & 0x7ff),
+ four_byte, save_an,
+ save_dn, &write_back_an);
+ }
+
+ if (write_back_an) {
+ /*
+ * restore any modified An registers
+ */
+ asm volatile (
+ " move.4 CSR, %0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 a0, (%1) \n\t"
+ " move.4 a1, 4(%1) \n\t"
+ " move.4 a2, 8(%1) \n\t"
+ " move.4 a3, 12(%1) \n\t"
+ " move.4 a4, 16(%1) \n\t"
+ " move.4 a5, 20(%1) \n\t"
+ " move.4 a6, 24(%1) \n\t"
+ " move.4 a7, 28(%1) \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ :
+ : "d" ((1 << 14) | (thread << 15)), "a" (save_an)
+ : "cc"
+ );
+ }
+
+ /*
+ * get source 2 register if needed, and modify inst to use d1 for
+ * source-2 source-2 will come from this thread, not the trapping thread
+ */
+ source2 = 0;
+ if ((op >= 8 && op <= 0x17) ||
+ ((op == 2 || op == 6) && (inst & 0x4000000))) {
+ int src_dn = (inst >> 11) & 0xf;
+ source2 = save_dn[src_dn];
+ /*
+ * force the emulated instruction to use d1 for source2 operand
+ */
+ emulate_inst = (emulate_inst & 0xffff07ff) | 0x800;
+ }
+
+ if (likely(op != MOVEI)) {
+ /*
+ * change emulated instruction source1 to d0
+ */
+ emulate_inst &= ~0x7ff;
+ emulate_inst |= 1 << 8;
+ }
+
+ if (unlikely(op == 6 || op == 2)) {
+ /*
+ * Set destination to d0
+ */
+ emulate_inst &= ~(0xf << 16);
+ } else if (likely(op != CMPI)) {
+ /*
+ * Set general destination field to d0.
+ */
+ emulate_inst &= ~(0x7ff << 16);
+ emulate_inst |= 1 << 24;
+ }
+
+ /*
+ * execute emulated instruction d0, to d0, no memory access
+ * source2 if needed will be in d1
+ * source3, CSR, and accumulators are set up before execution
+ */
+ *((unsigned int *)&trap_emulate) = emulate_inst;
+ flush_dcache_range((unsigned long)(&trap_emulate),
+ (unsigned long)(&trap_emulate) + 4);
+
+ result = ubicom32_emulate_insn(source1, source2, source3,
+ save_acc, &save_csr);
+
+ /*
+ * set the result value
+ */
+ if (dest_operand) {
+ /*
+ * copy result to memory
+ */
+ if (four_byte) {
+ *dest_address++ =
+ (unsigned char)((result >> 24) & 0xff);
+ *dest_address++ =
+ (unsigned char)((result >> 16) & 0xff);
+ }
+ *dest_address++ = (unsigned char)((result >> 8) & 0xff);
+ *dest_address = (unsigned char)(result & 0xff);
+ } else if (likely(op != CMPI)) {
+ /*
+ * copy result to a register, using move.4 dest, result
+ */
+ extern unsigned int unaligned_trap_set_result;
+ *((unsigned int *)&unaligned_trap_set_result) &= ~0x7ff0000;
+
+ if (op == 2 || op == 6) {
+ *((unsigned int *)&unaligned_trap_set_result) |=
+ ((inst & 0x000f0000) | 0x01000000);
+ } else {
+ *((unsigned int *)&unaligned_trap_set_result) |=
+ (inst & 0x7ff0000);
+ }
+ flush_dcache_range((unsigned long)&unaligned_trap_set_result,
+ ((unsigned long)(&unaligned_trap_set_result) + 4));
+
+ asm volatile (
+ /* result uses thread's registers */
+ " move.4 CSR, %1 \n\t"
+ " setcsr_flush 0 \n\t"
+ "unaligned_trap_set_result: \n\t"
+ " move.4 #0, %0 \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ :
+ : "d"(result), "d" ((1 << 14) | (thread << 15))
+ : "cc"
+ );
+ }
+
+ /*
+ * bump PC in thread and restore implicit register changes
+ */
+ asm volatile (
+ " move.4 CSR, %0 \n\t"
+ " setcsr_flush 0 \n\t"
+ " move.4 pc, %1 \n\t"
+ " move.4 acc0_lo, (%3) \n\t"
+ " move.4 acc0_hi, 4(%3) \n\t"
+ " move.4 acc1_lo, 8(%3) \n\t"
+ " move.4 acc1_hi, 12(%3) \n\t"
+ " move.4 mac_rc16, 16(%3) \n\t"
+ " move.4 CSR, %2 \n\t"
+ " setcsr #0 \n\t"
+ " setcsr_flush 0 \n\t"
+ :
+ : "d"((1 << 14) | (thread << 15)),
+ "d"(pc + 4), "d"(save_csr), "a"(save_acc)
+ : "cc"
+ );
+}
+
+/*
+ * unaligned_only()
+ * Return true if either of the unaligned causes are set (and no others).
+ */
+int unaligned_only(unsigned int cause)
+{
+ unsigned int unaligned_cause_mask =
+ (1 << TRAP_CAUSE_DST_MISALIGNED) |
+ (1 << TRAP_CAUSE_SRC1_MISALIGNED);
+
+ BUG_ON(cause == 0);
+ return (cause & unaligned_cause_mask) == cause;
+}
diff --git a/target/linux/ubicom32/files/arch/ubicom32/kernel/vmlinux.lds.S b/target/linux/ubicom32/files/arch/ubicom32/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..cd646772c
--- /dev/null
+++ b/target/linux/ubicom32/files/arch/ubicom32/kernel/vmlinux.lds.S
@@ -0,0 +1,370 @@
+/*
+ * arch/ubicom32/kernel/vmlinux.lds.S
+ * vmlinux primary linker script
+ *
+ * (C) Copyright 2009, Ubicom, Inc.
+ *
+ * This file is part of the Ubicom32 Linux Kernel Port.
+ *
+ * The Ubicom32 Linux Kernel Port is free software: you can redistribute
+ * it and/or modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * The Ubicom32 Linux Kernel Port is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the Ubicom32 Linux Kernel Port. If not,
+ * see <http://www.gnu.org/licenses/>.
+ *
+ * Ubicom32 implementation derived from (with many thanks):
+ * arch/m68knommu
+ * arch/blackfin
+ * arch/parisc
+ */
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/ocm_size.h>
+#include <asm/memory_map.h>
+#include <asm/thread_info.h>
+#include <linux/threads.h>
+
+/*
+ * Sanity checks to prevent errors later on that are much harder to understand
+ */
+#if !defined APP_OCM_CODE_SIZE
+#error APP_OCM_CODE_SIZE has not been defined in ocm_size.h
+#endif
+
+#if !defined APP_OCM_DATA_SIZE
+#error APP_OCM_DATA_SIZE has not been defined in ocm_size.h
+#endif
+
+/*
+ * The `free' ocm area that ultra does not use.
+ */
+#if APP_OCM_CODE_SIZE || APP_OCM_DATA_SIZE
+#define OCM_FREE_START (OCMSTART + APP_OCM_CODE_SIZE)
+#define OCM_FREE_LENGTH (OCMSIZE - APP_OCM_CODE_SIZE - APP_OCM_DATA_SIZE)
+#else
+#define OCM_FREE_START OCMEND
+#define OCM_FREE_LENGTH 0
+#endif
+
+/*
+ * If you want to limit OCM use for text/data or completely disable it
+ * you can change these values.
+ */
+#define OCM_TEXT_LENGTH OCM_FREE_LENGTH
+#define OCM_DATA_LENGTH OCM_FREE_LENGTH
+
+#define RAM_START KERNELSTART
+#define RAM_LENGTH ((SDRAMSTART + CONFIG_MIN_RAMSIZE) - RAM_START)
+#define TEXT ram
+#define DATA ram
+#define INIT ram
+#define BSS ram
+
+#ifndef DATA_ADDR
+#define DATA_ADDR
+#endif
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_ARCH(ubicom32)
+ENTRY(_start)
+
+MEMORY {
+ ram : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
+ syscall : ORIGIN = OS_SYSCALL_BEGIN, LENGTH = (OS_SYSCALL_END - OS_SYSCALL_BEGIN)
+ ocm : ORIGIN = OCM_FREE_START, LENGTH = OCM_FREE_LENGTH
+}
+
+jiffies = jiffies_64 + 4;
+
+/*
+ * Fixed locations required by gdb coredumps.
+ *
+ * Note that the names are what gdb is expecting so renaming will break
+ * the toolchain.
+ */
+__ocm_begin = OCMSTART;
+__ocm_limit = __ocm_begin + OCMSIZE;
+__sdram_begin = SDRAMSTART;
+__sdram_limit = __sdram_begin + CONFIG_MIN_RAMSIZE;
+__filemedia_begin_addr = FLASHSTART;
+__filemedia_end_addr = __filemedia_begin_addr + 0x00800000;
+
+/*
+ * For internal diagnostics
+ */
+__os_syscall_begin = OS_SYSCALL_BEGIN;
+__os_syscall_end = OS_SYSCALL_END;
+
+SECTIONS {
+
+ .fixed_text : {
+ _begin = .;
+ *(.skip_syscall)
+ *(.old_syscall_entry.text)
+ __fixed_text_end = .;
+ } > TEXT
+ . = _begin + SIZEOF(.fixed_text) ;
+
+ /*
+ * System call text in lower ocm (fixed location, can never change)
+ */
+ __syscall_text_load_begin = .;
+ __syscall_text_run_begin = OS_SYSCALL_BEGIN;
+
+ .syscall_text __syscall_text_run_begin : AT(__syscall_text_load_begin) {
+ *(.syscall_entry.text) /* Must be at OS_SYSCALL_BEGIN 0x3ffc0040 */
+ *(.kernel_unprotected)
+ . = ALIGN(4);
+ __syscall_text_run_end = .;
+ } > syscall /* .syscall_text */
+ . = __syscall_text_load_begin + __syscall_text_run_end - __syscall_text_run_begin ;
+ __ocm_text_load_begin = .;
+ __ocm_text_run_begin = OCM_FREE_START ;
+ .ocm_text __ocm_text_run_begin : AT(__ocm_text_load_begin) {
+#if OCM_TEXT_LENGTH
+ *(.ocm_text)
+ *(.sched.text)
+ *(.spinlock.text)
+#include <asm/ocm_text.lds.inc>
+ . = ALIGN(4);
+#endif
+ __ocm_text_run_end = .;
+ __data_begin = ALIGN(OCM_SECTOR_SIZE);
+ } > ocm /* .ocm_text */
+
+ .ocm_module_text __ocm_text_run_end (NOLOAD) : AT(__ocm_text_run_end) {
+ __ocm_inst_heap_begin = .;
+ /* Reserve the min requested */
+ . += (CONFIG_OCM_MODULES_RESERVATION) * 1024;
+#ifdef CONFIG_OCM_MODULES_MAY_CONSUME_REMAINING_CODESPACE
+ /* Round up to OCM sector size (we cannot use it for data) */
+ . = ALIGN(OCM_SECTOR_SIZE);
+#endif
+ __ocm_inst_heap_end = .;
+ /* update __data_begin */
+ __data_begin = ALIGN(OCM_SECTOR_SIZE);
+ } > ocm /* .ocm_module_text */
+
+ . = __ocm_text_load_begin + __ocm_text_run_end - __ocm_text_run_begin ;
+ __ocm_text_load_end = .;
+
+ __ocm_data_load_begin = .;
+ __ocm_data_run_begin = __data_begin ;
+#if OCM_DATA_LENGTH
+ .ocm_data __ocm_data_run_begin : AT(__ocm_data_load_begin) {
+#if defined(CONFIG_IRQSTACKS_USEOCM)
+ percpu_irq_stacks = .;
+ . += NR_CPUS * THREAD_SIZE;
+#endif
+ *(.ocm_data)
+ . = ALIGN(4) ;
+ __ocm_data_run_end = .;
+ } > ocm
+ . = __ocm_data_load_begin + __ocm_data_run_end - __ocm_data_run_begin ;
+#else
+ __ocm_data_run_end = __ocm_data_run_begin;
+#endif
+ __ocm_data_load_end = .;
+
+ __ocm_free_begin = __ocm_data_run_end;
+ __ocm_free_end = OCM_FREE_START + OCM_FREE_LENGTH;
+
+ .text __ocm_data_load_end : AT(__ocm_data_load_end) {
+ . = ALIGN(4);
+ _stext = .;
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.text.lock)
+ *(.text.__libgcc_udivmodsi)
+ *(.text.__libgcc_divmodsi)
+ *(.text.__libgcc_muldi3)
+ *(.text.__libgcc_udivmoddi)
+ *(.text.__libgcc_divmoddi)
+ *(.text.*)
+#if OCM_TEXT_LENGTH == 0
+ *(.ocm_text)
+ *(.sched.text)
+ *(.spinlock.text)
+#endif
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+
+ *(.rodata) *(.rodata.*)
+ *(__vermagic) /* Kernel version magic */
+ *(__markers_strings)
+ *(.rodata1)
+ *(.rodata.str1.1)
+ *(__tracepoints_strings)
+
+ /* PCI quirks */
+ __start_pci_fixups_early = . ;
+ *(.pci_fixup_early)
+ __end_pci_fixups_early = . ;
+ __start_pci_fixups_header = . ;
+ *(.pci_fixup_header)
+ __end_pci_fixups_header = . ;
+ __start_pci_fixups_final = . ;
+ *(.pci_fixup_final)
+ __end_pci_fixups_final = . ;
+ __start_pci_fixups_enable = . ;
+ *(.pci_fixup_enable)
+ __end_pci_fixups_enable = . ;
+ __start_pci_fixups_resume = . ;
+ *(.pci_fixup_resume)
+ __end_pci_fixups_resume = . ;
+ __start_pci_fixups_resume_early = . ;
+ *(.pci_fixup_resume_early)
+ __end_pci_fixups_resume_early = . ;
+ __start_pci_fixups_suspend = . ;
+ *(.pci_fixup_suspend)
+ __end_pci_fixups_suspend = . ;
+
+ __start_builtin_fw = . ;
+ *(.builtin_fw)
+ __end_builtin_fw = . ;
+
+
+ /* Kernel symbol table: Normal symbols */
+ . = ALIGN(4);
+ __start___ksymtab = .;
+ *(__ksymtab)
+ __stop___ksymtab = .;
+
+ /* Kernel symbol table: GPL-only symbols */
+ __start___ksymtab_gpl = .;
+ *(__ksymtab_gpl)
+ __stop___ksymtab_gpl = .;
+
+ /* Kernel symbol table: Normal unused symbols */
+ __start___ksymtab_unused = .;
+ *(__ksymtab_unused)
+ __stop___ksymtab_unused = .;
+
+ /* Kernel symbol table: GPL-only unused symbols */
+ __start___ksymtab_unused_gpl = .;
+ *(__ksymtab_unused_gpl)
+ __stop___ksymtab_unused_gpl = .;
+
+ /* Kernel symbol table: GPL-future symbols */
+ __start___ksymtab_gpl_future = .;
+ *(__ksymtab_gpl_future)
+ __stop___ksymtab_gpl_future = .;
+
+ /* Kernel symbol table: Normal symbols */
+ __start___kcrctab = .;
+ *(__kcrctab)
+ __stop___kcrctab = .;
+
+ /* Kernel symbol table: GPL-only symbols */
+ __start___kcrctab_gpl = .;
+ *(__kcrctab_gpl)
+ __stop___kcrctab_gpl = .;
+
+ /* Kernel symbol table: GPL-future symbols */
+ __start___kcrctab_gpl_future = .;
+ *(__kcrctab_gpl_future)
+ __stop___kcrctab_gpl_future = .;
+
+ /* Kernel symbol table: strings */
+ *(__ksymtab_strings)
+
+ /* Built-in module parameters */
+ . = ALIGN(4) ;
+ __start___param = .;
+ *(__param)
+ __stop___param = .;
+
+ . = ALIGN(4) ;
+ _etext = . ;
+ } > TEXT
+
+ .data DATA_ADDR : {
+ . = ALIGN(4);
+ _sdata = . ;
+ DATA_DATA
+#if OCM_DATA_LENGTH == 0
+ *(.ocm_data)
+#endif
+ . = ALIGN(8192) ;
+ _data_protection_end = .;
+ *(.data.init_task)
+ . = ALIGN(4);
+ _edata = . ;
+ } > DATA
+
+ .init : {
+ . = ALIGN(4096);
+ __init_begin = .;
+ _sinittext = .;
+ INIT_TEXT
+ _einittext = .;
+ *(.init.rodata)
+ INIT_DATA
+ . = ALIGN(16);
+ __setup_start = .;
+ *(.init.setup)
+ __setup_end = .;
+ __initcall_start = .;
+ INITCALLS
+ __initcall_end = .;
+ __con_initcall_start = .;
+ *(.con_initcall.init)
+ __con_initcall_end = .;
+ ___security_initcall_start = .;
+ *(.security_initcall.init)
+ ___security_initcall_end = .;
+#ifdef CONFIG_BLK_DEV_INITRD
+ . = ALIGN(4);
+ __initramfs_start = .;
+ *(.init.ramfs)
+ __initramfs_end = .;
+#endif
+ . = ALIGN(4096);
+ __per_cpu_start = .;
+ *(.data.percpu)
+ *(.data.percpu.shared_aligned)
+ __per_cpu_end = .;
+
+ . = ALIGN(4096);
+ __init_end = .;
+ } > INIT
+
+ .eh_frame :
+ {
+ PROVIDE (___eh_frame_begin = .);
+ *(.eh_frame)
+ LONG (0);
+ PROVIDE (___eh_frame_end = .);
+ } > INIT
+
+ /DISCARD/ : {
+ EXIT_TEXT
+ EXIT_DATA
+ *(.exitcall.exit)
+ }
+
+ .bss : {
+ . = ALIGN(4);
+ _sbss = . ;
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(4) ;
+ _ebss = . ;
+ _end = . ;
+ } > BSS
+
+ NOTES > BSS
+
+}