diff options
author | blogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2012-10-05 10:12:53 +0000 |
---|---|---|
committer | blogic <blogic@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2012-10-05 10:12:53 +0000 |
commit | 5c105d9f3fd086aff195d3849dcf847d6b0bd927 (patch) | |
tree | 1229a11f725bfa58aa7c57a76898553bb5f6654a /target/linux/coldfire/patches/012-Add-vDSO-support-for-Coldfire-platform.patch | |
download | openwrt-5c105d9f3fd086aff195d3849dcf847d6b0bd927.tar.gz openwrt-5c105d9f3fd086aff195d3849dcf847d6b0bd927.zip |
branch Attitude Adjustment
git-svn-id: svn://svn.openwrt.org/openwrt/branches/attitude_adjustment@33625 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/coldfire/patches/012-Add-vDSO-support-for-Coldfire-platform.patch')
-rw-r--r-- | target/linux/coldfire/patches/012-Add-vDSO-support-for-Coldfire-platform.patch | 497 |
1 files changed, 497 insertions, 0 deletions
diff --git a/target/linux/coldfire/patches/012-Add-vDSO-support-for-Coldfire-platform.patch b/target/linux/coldfire/patches/012-Add-vDSO-support-for-Coldfire-platform.patch new file mode 100644 index 000000000..8a7ead563 --- /dev/null +++ b/target/linux/coldfire/patches/012-Add-vDSO-support-for-Coldfire-platform.patch @@ -0,0 +1,497 @@ +From 1ba9968246337836bf0acdbf7733a628da5c5e42 Mon Sep 17 00:00:00 2001 +From: Alison Wang <b18965@freescale.com> +Date: Thu, 4 Aug 2011 09:59:42 +0800 +Subject: [PATCH 12/52] Add vDSO support for Coldfire platform + +This patch adds vDSO support for Coldfire platform. + +Signed-off-by: Alison Wang <b18965@freescale.com> +--- + arch/m68k/coldfire/common/entry.S | 36 ++++++++++ + arch/m68k/coldfire/vdso/Makefile | 46 +++++++++++++ + arch/m68k/coldfire/vdso/vdso-bin.S | 14 ++++ + arch/m68k/coldfire/vdso/vdso-lib.S | 57 ++++++++++++++++ + arch/m68k/coldfire/vdso/vdso-note.S | 27 ++++++++ + arch/m68k/coldfire/vdso/vdso.c | 124 +++++++++++++++++++++++++++++++++++ + arch/m68k/coldfire/vdso/vdso.lds.S | 89 +++++++++++++++++++++++++ + arch/m68k/include/asm/auxvec.h | 9 +++ + arch/m68k/mm/init.c | 3 + + 9 files changed, 405 insertions(+), 0 deletions(-) + create mode 100644 arch/m68k/coldfire/vdso/Makefile + create mode 100644 arch/m68k/coldfire/vdso/vdso-bin.S + create mode 100644 arch/m68k/coldfire/vdso/vdso-lib.S + create mode 100644 arch/m68k/coldfire/vdso/vdso-note.S + create mode 100644 arch/m68k/coldfire/vdso/vdso.c + create mode 100644 arch/m68k/coldfire/vdso/vdso.lds.S + +--- a/arch/m68k/coldfire/common/entry.S ++++ b/arch/m68k/coldfire/common/entry.S +@@ -69,6 +69,9 @@ ENTRY(buserr) + movew #0x2700,%sr /* lock interrupts */ + #endif + SAVE_ALL_INT ++#ifdef CONFIG_VDSO ++ jsr check_vdso_atomic_cmpxchg_32 ++#endif + #ifdef CONFIG_COLDFIRE_FOO + movew PT_0FF_SR(%sp),%d3 /* get original %sr */ + oril #0x2000,%d3 /* set supervisor mode in it */ +@@ -82,6 +85,9 @@ ENTRY(buserr) + + ENTRY(trap) + SAVE_ALL_INT ++#ifdef CONFIG_VDSO ++ jsr check_vdso_atomic_cmpxchg_32 ++#endif + GET_CURRENT(%d0) + movel %sp,%sp@- /* stack frame pointer argument */ + jsr trap_c +@@ -213,6 +219,9 @@ do_delayed_trace: + */ + ENTRY(inthandler) + SAVE_ALL_INT ++#ifdef CONFIG_VDSO ++ jsr check_vdso_atomic_cmpxchg_32 ++#endif + GET_CURRENT(%d0) + movel %curptr@(TASK_INFO+TINFO_PREEMPT),%d0 + addil #0x10000,%d0 +@@ -398,8 +407,35 @@ resume: + movew %a1@(TASK_THREAD+THREAD_SR),%d0 + movew %d0,%sr + ++#ifdef CONFIG_VDSO ++ /* save thread pointer */ ++ lea _vdso_tp,%a0 ++ movel (%a0),%a0 ++ movel %a1@(TASK_INFO+TINFO_TP_VALUE),(%a0) ++#endif + rts + ++#ifdef CONFIG_VDSO ++/* if interrupted PC is between 0x5fffe40c to 0x5ffffe412 */ ++/* then set PC back to 0x5fffe40c (start addr of __kernel_atomic_cmpxchg_32) */ ++/* note: these absolute addresses depend on vdso-lib.S and vdso.lds.S */ ++ENTRY(check_vdso_atomic_cmpxchg_32) ++ movel %sp@(PT_OFF_PC),%d0 ++ cmpil #0x5fffe414,%d0 /* __kernel_atomic_cmpxchg_32: line 4 */ ++ ble label1 ++ cmpil #0x5fffe412,%d0 /* __kernel_atomic_cmpxchg_32: line 3 */ ++ beql label0 ++ cmpil #0x5fffe40e,%d0 /* __kernel_atomic_cmpxchg_32: line 2 */ ++ beql label0 ++ jra label1 ++label0: ++ /* __kernel_atomic_cmpxchg_32: line 1 */ ++ movel #0x5fffe40c,%d0 ++ movel %d0,%sp@(PT_OFF_PC) ++label1: ++ rts ++#endif ++ + .data + ALIGN + sys_call_table: +--- /dev/null ++++ b/arch/m68k/coldfire/vdso/Makefile +@@ -0,0 +1,46 @@ ++# ++# Makefile for arch/m68k/coldfire/vdso with special rules ++# for building the DSO lib ++# ++# Based on arch/sh/kernel/vsyscall/Makefile ++# ++# Kurt Mahan <kmahan@freescale.com> ++# ++ ++obj-y := vdso.o vdso-bin.o ++ ++$(obj)/vdso-bin.o: $(obj)/vdso.lds $(obj)/vdso-lib.so ++ ++# ++# The DSO Lib ++# ++ ++# special linker script for building DSO images ++quiet_cmd_vdso = VDSO $@ ++ cmd_vdso = $(LD) -nostdlib --eh-frame-hdr $(SYSCFLAGS_$(@F)) \ ++ -T $(obj)/vdso.lds $^ -o $@ ++ ++vdso-flags = -shared -s -soname=linux-gate.so.1 ++ ++SYSCFLAGS_vdso-lib.so = $(vdso-flags) ++ ++$(obj)/vdso-lib.so: $(obj)/vdso-lib.o $(obj)/vdso-note.o ++ $(call if_changed,vdso) ++ ++$(obj)/vdso.lds: $(srctree)/arch/m68k/coldfire/vdso/vdso.lds.S ++ cp $< $@ ++ ++# ++# Create a special relocatable object that should mirror the ++# symbol table and layout of the linked DSO lib. With ld -R ++# these symbols can be refered to in the kernel code rather ++# than as hand-coded addresses ++# ++# extra-y += vdso-syms.o ++# $(obj)/built-in.o: $(obj)/vdso-syms.o ++# $(obj)/built-in.o: ld_flags += -R (obj)/vdso-syms.o ++ ++# SYSCFLAGS_vdso-syms.o = -r ++# $(obj)/vdso-syms.o: $(src)/vdso.lds \ ++# $(obj)/vdso-lib.o $(obj)/vdso-note.o FORCE ++# $(call if_changed,vdso) +--- /dev/null ++++ b/arch/m68k/coldfire/vdso/vdso-bin.S +@@ -0,0 +1,14 @@ ++/* ++ * Setup vdso lib (.so) as binary image. ++ */ ++ ++#include <linux/init.h> ++ ++__INITDATA ++ ++ .globl vdso_bin_start, vdso_bin_end ++vdso_bin_start: ++ .incbin "arch/m68k/coldfire/vdso/vdso-lib.so" ++vdso_bin_end: ++ ++__FINIT +--- /dev/null ++++ b/arch/m68k/coldfire/vdso/vdso-lib.S +@@ -0,0 +1,57 @@ ++/* ++ * VDSO userspace code ++ * ++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved. ++ * Kurt Mahan <kmahan@freescale.com> ++ * ++ * This is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include <linux/linkage.h> ++#include <asm/unistd.h> ++ ++ .text ++/* ++ * Read the thread pointer into A0 (and D0, for compatibility). ++ */ ++ENTRY(__kernel_read_tp) ++ .cfi_startproc ++ lea kuser_vdso_tp,%a0 ++ movel (%a0), %d0 ++ movel %d0,%a0 ++ rts ++ .cfi_endproc ++ .size __kernel_read_tp,.-__kernel_read_tp ++ ++/* ++ * Atomic compare exchange. Can not clobber any registers ++ * other than conditional codes. ++ */ ++ENTRY(__kernel_atomic_cmpxchg_32) ++ .cfi_startproc ++ cmpl (%a0),%d0 ++ bne label0 ++ movel %d1, (%a0) ++ jmp label1 ++label0: ++ movel (%a0),%d0 ++label1: ++ rts ++ .cfi_endproc ++ .size __kernel_atomic_cmpxchg_32,.-__kernel_atomic_cmpxchg_32 ++ ++/* ++ * Atomic memory barrier. Can not clobber any registers ++ * other than condition codes. ++ */ ++ENTRY(__kernel_atomic_barrier) ++ .cfi_startproc ++ /* no code needed for uniprocs */ ++ rts ++ .cfi_endproc ++ .size __kernel_atomic_barrier,.-__kernel_atomic_barrier ++ ++ .previous +--- /dev/null ++++ b/arch/m68k/coldfire/vdso/vdso-note.S +@@ -0,0 +1,27 @@ ++/* ++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. ++ * Here we can supply some information useful to userland. ++ * ++ * Based on arch/sh/kernel/vsyscall/vsyscall-note.S ++ */ ++ ++#include <linux/uts.h> ++#include <linux/version.h> ++ ++#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ ++ .section name, flags; \ ++ .balign 4; \ ++ .long 1f - 0f; /* name length */ \ ++ .long 3f - 2f; /* data length */ \ ++ .long type; /* note type */ \ ++0: .asciz vendor; /* vendor name */ \ ++1: .balign 4; \ ++2: ++ ++#define ASM_ELF_NOTE_END \ ++3: .balign 4; /* pad out section */ \ ++ .previous ++ ++ ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) ++ .long LINUX_VERSION_CODE ++ ASM_ELF_NOTE_END +--- /dev/null ++++ b/arch/m68k/coldfire/vdso/vdso.c +@@ -0,0 +1,124 @@ ++/* ++ * arch/m68k/coldfire/vdso/vdso.c ++ * ++ * Based on arch/sh/kernel/vsyscall/vsyscall.c ++ * ++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved. ++ * Kurt Mahan <kmahan@freescale.com> ++ * ++ * This is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include <linux/mm.h> ++#include <linux/slab.h> ++#include <linux/kernel.h> ++#include <linux/init.h> ++#include <linux/gfp.h> ++#include <linux/module.h> ++#include <linux/elf.h> ++#include <linux/sched.h> ++#include <linux/err.h> ++ ++/* Mapping vDSO at the default address (what would've been returned ++ * if VDSO_MBASE was 0) makes it impossible to extend data segment ++ * (through brk()) for static binaries. The vDSO fits into one page, ++ * so map it just before TASK_UNMAPPED_BASE. ++ */ ++#define VDSO_MBASE (TASK_UNMAPPED_BASE - PAGE_SIZE) ++#define VDSO_VAR_OFFSET 4096 ++ ++unsigned int vdso_enabled = 1; ++EXPORT_SYMBOL_GPL(vdso_enabled); ++ ++static struct page *vdso_pages[1]; ++ ++/* _vdso_var_start: vdso_page_start + offset_4K */ ++/* it's used to save key values from kernel */ ++void *_vdso_var_start; ++void *_vdso_tp; ++ ++extern const char vdso_bin_start, vdso_bin_end; ++ ++int __init vdso_init(void) ++{ ++ void *vdso_page = (void *)get_zeroed_page(GFP_ATOMIC); ++ vdso_pages[0] = virt_to_page(vdso_page); ++ ++ _vdso_var_start = (void *)(vdso_page + VDSO_VAR_OFFSET); ++ _vdso_tp = _vdso_var_start; ++ ++ printk(KERN_INFO "** VDSO_INIT\n"); ++ ++ /* copy dso bin in */ ++ memcpy(vdso_page, ++ &vdso_bin_start, &vdso_bin_end - &vdso_bin_start); ++ ++ return 0; ++} ++ ++/* setup VMA at program startup for the vdso page */ ++int arch_setup_additional_pages(struct linux_binprm *bprm, ++ int executable_stack) ++{ ++ struct mm_struct *mm = current->mm; ++ unsigned long addr; ++ int ret; ++ ++ current->mm->context.vdso = 0; ++ ++ down_write(&mm->mmap_sem); ++ addr = get_unmapped_area(NULL, VDSO_MBASE, PAGE_SIZE, 0, 0); ++ if (IS_ERR_VALUE(addr)) { ++ ret = addr; ++ goto up_fail; ++ } ++ ++ ret = install_special_mapping(mm, addr, PAGE_SIZE, ++ VM_READ | VM_EXEC | ++ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | ++ VM_ALWAYSDUMP, ++ vdso_pages); ++ ++ if (unlikely(ret)) ++ goto up_fail; ++ ++ current->mm->context.vdso = (void *)addr; ++ ++up_fail: ++#ifdef DEBUG ++ printk(KERN_DEBUG "arch_setup_additional_pages: addr: %lx; ret: %d\n", ++ addr, ret); ++#endif ++ ++ up_write(&mm->mmap_sem); ++ return ret; ++} ++ ++/* ++ * check vma name ++ */ ++const char *arch_vma_name(struct vm_area_struct *vma) ++{ ++ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ return "[vdso]"; ++ ++ return NULL; ++} ++ ++struct vm_area_struct *get_gate_vma(struct task_struct *task) ++{ ++ return NULL; ++} ++ ++int in_gate_area(struct task_struct *task, unsigned long address) ++{ ++ return 0; ++} ++ ++int in_gate_area_no_task(unsigned long address) ++{ ++ return 0; ++} +--- /dev/null ++++ b/arch/m68k/coldfire/vdso/vdso.lds.S +@@ -0,0 +1,89 @@ ++/* ++ * Linker script for vdso DSO. The vdso page is an ELF shared ++ * object prelinked to its virtual address, and with only one read-only ++ * segment (that fits in one page). This script controls its layout. ++ * ++ * Based on arch/sh/kernel/vsyscall/vsyscall.lds.S ++ * ++ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All Rights Reserved. ++ * Kurt Mahan <kmahan@freescale.com> ++ * ++ * This is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k") ++OUTPUT_ARCH(m68k) ++ ++/* The ELF entry point can be used to set the AT_SYSINFO value. */ ++ENTRY(__kernel_read_tp); ++ENTRY(__kernel_atomic_cmpxchg_32); ++ENTRY(__kernel_atomic_barrier); ++ ++SECTIONS ++{ ++ . = 0x5fffe000 + SIZEOF_HEADERS; ++ ++ .hash : { *(.hash) } :text ++ .gnu.hash : { *(.gnu.hash) } ++ .dynsym : { *(.dynsym) } ++ .dynstr : { *(.dynstr) } ++ .gnu.version : { *(.gnu.version) } ++ .gnu.version_d : { *(.gnu.version_d) } ++ .gnu.version_r : { *(.gnu.version_r) } ++ ++ /* ++ * This linker script is used both with -r and with -shared. ++ * For the layouts to match, we need to skip more than enough ++ * space for the dynamic symbol table et al. If this amount ++ * is insufficient, ld -shared will barf. Just increase it here. ++ */ ++ . = 0x5fffe000 + 0x400; ++ ++ .text : { *(.text) } :text ++ .note : { *(.note.*) } :text :note ++ .eh_frame_hdr : { *(.eh_frame_hdr ) } :text :eh_frame_hdr ++ .eh_frame : { ++ KEEP (*(.eh_frame)) ++ LONG (0) ++ } :text ++ .dynamic : { *(.dynamic) } :text :dynamic ++ .useless : { ++ *(.got.plt) *(.got) ++ *(.data .data.* .gnu.linkonce.d.*) ++ *(.dynbss) ++ *(.bss .bss.* .gnu.linkonce.b.*) ++ } :text ++ ++ . = 0x5fffe000 + 0x1000; ++ kuser_vdso_tp = .; ++} ++ ++/* ++ * We must supply the ELF program headers explicitly to get just one ++ * PT_LOAD segment, and set the flags explicitly to make segments read-only. ++ */ ++PHDRS ++{ ++ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ ++ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ ++ note PT_NOTE FLAGS(4); /* PF_R */ ++ eh_frame_hdr PT_GNU_EH_FRAME; ++} ++ ++/* ++ * This controls what symbols we export from the DSO. ++ */ ++VERSION ++{ ++ LINUX_2.6 { ++ global: ++ __kernel_read_tp; ++ __kernel_atomic_cmpxchg_32; ++ __kernel_atomic_barrier; ++ ++ local: *; ++ }; ++} +--- a/arch/m68k/include/asm/auxvec.h ++++ b/arch/m68k/include/asm/auxvec.h +@@ -1,4 +1,13 @@ + #ifndef __ASMm68k_AUXVEC_H + #define __ASMm68k_AUXVEC_H ++/* ++ * Architecture-neutral AT_ values in 0-17, leave some room ++ * for more of them. ++ */ ++ ++#ifdef CONFIG_VDSO ++/* Entry point to the vdso page */ ++#define AT_SYSINFO_EHDR 33 ++#endif + + #endif +--- a/arch/m68k/mm/init.c ++++ b/arch/m68k/mm/init.c +@@ -39,6 +39,9 @@ + #include <asm/sections.h> + #include <asm/tlb.h> + ++#ifdef CONFIG_VDSO ++int vdso_init(void); ++#endif + + DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + |