aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ubicom32/files/arch/ubicom32/kernel/ubicom32_syscall.S
blob: 870f66c8f4c1cbef31db60e5310d8331ca7aecb0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
/*
 * arch/ubicom32/kernel/ubicom32_syscall.S
 *	<TODO: Replace with short file description>
 *
 * (C) Copyright 2009, Ubicom, Inc.
 *
 * This file is part of the Ubicom32 Linux Kernel Port.
 *
 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
 * it and/or modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation, either version 2 of the
 * License, or (at your option) any later version.
 *
 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 * the GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with the Ubicom32 Linux Kernel Port.  If not,
 * see <http://www.gnu.org/licenses/>.
 *
 * Ubicom32 implementation derived from (with many thanks):
 *   arch/m68knommu
 *   arch/blackfin
 *   arch/parisc
 */
#include <linux/sys.h>
#include <linux/linkage.h>
#include <linux/unistd.h>

#include <asm/ubicom32-common.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/range-protect.h>

/*
 * __old_system_call()
 */
	.section .old_syscall_entry.text, "ax", @progbits
#ifdef CONFIG_OLD_40400010_SYSTEM_CALL
__old_system_call:
	call a3, system_call
	.size __old_system_call, . - __old_system_call ;
#else
	/*
	 * something that will crash the userspace application, but
	 * should not take down the kernel, if protection is enabled
	 * this will never even get executed.
	 */
	.long	0xFABBCCDE			; illegal instruction
	bkpt #-1				; we will never get here
#endif

/*
 * system_call()
 */
	.section .syscall_entry.text, "ax", @progbits
	.global system_call
system_call:
	/*
	 * Regular ABI rules for function calls apply for syscall.  d8 holds
	 * the syscall number. We will use that to index into the syscall table.
	 * d0 - d5 hold the parameters.
	 *
	 * First we get the current thread_info and swap to the kernel stack.
	 * This is done by reading the current thread and looking up the ksp
	 * from the sw_ksp array and storing it in a3.
	 *
	 * Then we reserve space for the syscall context a struct pt_regs and
	 * save it using a4 initially and later as sp.
	 * Once sp is set to the kernel sp we can leave the critical section.
	 *
	 * For the user case the kernel stack will have the following layout.
	 *
	 *  a3		 ksp[0] +-----------------------+
	 *			| Thread info area	|
	 *			| struct thread_info	|
	 *			+-----------------------+
	 *			:			:
	 *			|   Kernel Stack Area	|
	 *			|			|
	 *  a4 / sp >>>		+-----------------------+
	 *			| Context save area	|
	 *			| struct pt_reg		|
	 *  ksp[THREAD_SIZE-8]  +-----------------------+
	 *			| 8 Byte Buffer Zone	|
	 *  ksp[THREAD_SIZE]    +-----------------------+

	 *
	 * For kernel syscalls the layout is as follows.
	 *
	 *  a3		 ksp[0] +-----------------------+
	 *			| Thread info area	|
	 *			| struct thread_info	|
	 *			+-----------------------+
	 *			:			:
	 *			|   Kernel Stack Area	|
	 *			|			|
	 *  a4 / sp >>>		+-----------------------+
	 *			| Context save area	|
	 *			| struct pt_reg		|
	 * sp at syscall entry  +-----------------------+
	 *			| Callers Kernel Stack	|
	 *			:			:
	 *
	 * Once the context is saved we optionally call syscall_trace and setup
	 * the exit routine and jump to the syscall.
	 */

	/*
	 * load the base address for sw_ksp into a3
	 * Note.. we cannot access it just yet as protection is still on.
	 */
	moveai	a3, #%hi(sw_ksp)
	lea.1	a3, %lo(sw_ksp)(a3)

	/*
	 * Enter critical section .
	 *
	 * The 'critical' aspects here are the switching the to the ksp and
	 * changing the protection registers, these both use per thread
	 * information so we need to protect from a context switch. For now this
	 * is done using the global atomic lock.
	 */
	atomic_lock_acquire

	thread_get_self d15			; Load current thread number
#ifdef CONFIG_PROTECT_KERNEL
	lsl.4	d9, #1, d15			; Convert to thread bit
	enable_kernel_ranges d9
#endif
	/*
	 * in order to reduce the size of code in the syscall section we get
	 * out of it right now
	 */
	call a4, __system_call_bottom_half
	.size system_call, . - system_call

	.section .text.__system_call_bottom_half, "ax", @progbits
__system_call_bottom_half:

	/*
	 * We need to Determine if this is a kernel syscall or user syscall.
	 * Start by loading the pointer for the thread_info structure for the
	 * current process in to a3.
	 */
	move.4	a3, (a3, d15)			; a3 = sw_ksp[d15]

	/*
	 * Now if this is a kernel thread the same value can be a acheived by
	 * masking off the lower bits on the current stack pointer.
	 */
	movei	d9, #(~(ASM_THREAD_SIZE-1))	; load mask
	and.4	d9, sp, d9			; apply mask

	/*
	 * d9 now has the masked version of the sp. If this is identical to
	 * what is in a3 then don't switch to ksp as we are already in the
	 * kernel.
	 */
	sub.4	#0, a3, d9

	/*
	 * if d9 and a3 are not equal. We are usespace and have to shift to
	 * ksp.
	 */
	jmpne.t	1f

	/*
	 * Kernel Syscall.
	 *
	 * The kernel has called this routine. We have to pdec space for pt_regs
	 * from sp.
	 */
	pdec	a4, PT_SIZE(sp)			; a4 = ksp - PT_SIZE
	jmpt.t	2f

	/*
	 * Userspace Syscall.
	 *
	 * Add THREAD_SIZE and subtract PT_SIZE to create the proper ksp
	 */
1:	movei	d15, #(ASM_THREAD_SIZE - 8 - PT_SIZE)
	lea.1	a4, (a3, d15)			; a4 = ksp + d15

	/*
	 * Replace user stack pointer with kernel stack pointer (a4)
	 * Load -1 into frame_type in save area to indicate this is system call
	 * frame.
	 */
2:	move.4	PT_A7(a4), a7			; Save old sp/A7 on kernel stack
	move.4	PT_FRAME_TYPE(a4), #-1		; Set the frame type.
	move.4	sp, a4				; Change to ksp.
	/*
	 * We are now officially back in the kernel!
	 */

	/*
	 * Now that we are on the ksp we can leave the critical section
	 */
	atomic_lock_release

	/*
	 * We need to save a0 because we need to be able to restore it in
	 * the event that we need to handle a signal.  It's not generally
	 * a callee-saved register but is the GOT pointer.
	 */
	move.4	PT_A0(sp), a0			; Save A0 on kernel stack

	/*
	 * We still need to save d10-d13, a1, a2, a5, a6 in the kernel frame
	 * for this process, we also save the system call params in the case of
	 * syscall restart. (note a7 was saved above)
	 */
	move.4	PT_A1(sp), a1			; Save A1 on kernel stack
	move.4	PT_A2(sp), a2			; Save A2 on kernel stack
	move.4	PT_A5(sp), a5			; Save A5 on kernel stack
	move.4	PT_A6(sp), a6			; Save A6 on kernel stack
	move.4	PT_PC(sp), a5			; Save A5 at the PC location
	move.4	PT_D10(sp), d10			; Save D10 on kernel stack
	move.4	PT_D11(sp), d11			; Save D11 on kernel stack
	move.4	PT_D12(sp), d12			; Save D12 on kernel stack
	move.4	PT_D13(sp), d13			; Save D13 on kernel stack

	/*
	 * Now save the syscall parameters
	 */
	move.4	PT_D0(sp), d0			; Save d0 on kernel stack
	move.4	PT_ORIGINAL_D0(sp), d0		; Save d0 on kernel stack
	move.4	PT_D1(sp), d1			; Save d1 on kernel stack
	move.4	PT_D2(sp), d2			; Save d2 on kernel stack
	move.4	PT_D3(sp), d3			; Save d3 on kernel stack
	move.4	PT_D4(sp), d4			; Save d4 on kernel stack
	move.4	PT_D5(sp), d5			; Save d5 on kernel stack
	move.4	PT_D8(sp), d8			; Save d8 on kernel stack

	/*
	 * Test if syscalls are being traced and if they are jump to syscall
	 * trace (it will comeback here)
	 */
	btst	TI_FLAGS(a3), #ASM_TIF_SYSCALL_TRACE
	jmpne.f .Lsystem_call__trace
.Lsystem_call__trace_complete:
	/*
	 * Check for a valid call number [ 0 <= syscall_number < NR_syscalls ]
	 */
	cmpi	d8, #0
	jmplt.f 3f
	cmpi	d8, #NR_syscalls
	jmplt.t	4f

	/*
	 * They have passed an invalid number. Call sys_ni_syscall staring by
	 * load a4 with the base address of sys_ni_syscall
	 */
3:	moveai	a4, #%hi(sys_ni_syscall)
	lea.1	a4, %lo(sys_ni_syscall)(a4)
	jmpt.t	5f				; Jump to regular processing

	/*
	 * Validated syscall, load the syscall table base address into a3 and
	 * read the syscall ptr out.
	 */
4:	moveai	a3, #%hi(sys_call_table)
	lea.1	a3, %lo(sys_call_table)(a3)	; a3 = sys_call_table
	move.4	a4, (a3, d8)			; a4 = sys_call_table[d8]

	/*
	 * Before calling the syscall, setup a5 so that syscall_exit is called
	 * on return from syscall
	 */
5:	moveai	a5, #%hi(syscall_exit)		; Setup return address
	lea.1	a5, %lo(syscall_exit)(a5)	; from system call

	/*
	 * If the syscall is __NR_rt_rigreturn then we have to test d1 to
	 * figure out if we have to change change the return routine to restore
	 * all registers.
	 */
	cmpi	d8, #__NR_rt_sigreturn
	jmpeq.f	6f

	/*
	 * Launch system call (it will return through a5 - syscall_exit)
	 */
	calli	a3, 0(a4)

	/*
	 * System call is rt_sigreturn. Test d1. If it is 1 we have to
	 * change the return address to restore_all_registers
	 */
6:	cmpi	d1, #1
	jmpne.t	7f

	moveai	a5, #%hi(restore_all_registers)	 ; Setup return address
	lea.1	a5, %lo(restore_all_registers)(a5) ; to restore_all_registers.

	/*
	 * Launch system call  (it will return through a5)
	 */
7:	calli	a3, 0(a4)			 ; Launch system call

.Lsystem_call__trace:
	/*
	 * Syscalls are being traced.
	 * Call syscall_trace, (return here)
	 */
	call	a5, syscall_trace

	/*
	 * Restore syscall state (it would have been discarded during the
	 * syscall trace)
	 */
	move.4	d0, PT_D0(sp)			; Restore d0 from kernel stack
	move.4	d1, PT_D1(sp)			; Restore d1 from kernel stack
	move.4	d2, PT_D2(sp)			; Restore d2 from kernel stack
	move.4	d3, PT_D3(sp)			; Restore d3 from kernel stack
	move.4	d4, PT_D4(sp)			; Restore d4 from kernel stack
	move.4	d5, PT_D5(sp)			; Restore d5 from kernel stack
	/* add this back if we ever have a syscall with 7 args */
	move.4	d8, PT_D8(sp)			; Restore d8 from kernel stack

	/*
	 * return to syscall
	 */
	jmpt.t .Lsystem_call__trace_complete
	.size __system_call_bottom_half, . - __system_call_bottom_half

/*
 * syscall_exit()
 */
	.section .text.syscall_exit
	.global syscall_exit
syscall_exit:
	/*
	 * d0 contains the return value. We should move that into the kernel
	 * stack d0 location.  We will be transitioning from kernel to user
	 * mode. Test the flags and see if we have to call schedule. If we are
	 * going to truly exit then all that has to be done is that from the
	 * kernel stack we have to restore d0, a0, a1, a2, a5, a6 and sp (a7)bb
	 * and then return via a5.
	 */

	/*
	 * Save d0 to pt_regs
	 */
	move.4	PT_D0(sp), d0			; Save d0 into the kernel stack

	/*
	 * load the thread_info structure by masking off the THREAD_SIZE
	 * bits.
	 *
	 * Note: we used to push a1, but now we don't as we are going
	 * to eventually restore it to the userspace a1.
	 */
	movei	d9, #(~(ASM_THREAD_SIZE-1))
	and.4	a1, sp, d9

	/*
	 * Are any interesting bits set on TI flags, if there are jump
	 * aside to post_processing.
	 */
	move.4	d9, #(_TIF_SYSCALL_TRACE | _TIF_NEED_RESCHED | _TIF_SIGPENDING)
	and.4	#0, TI_FLAGS(a1), d9
	jmpne.f	.Lsyscall_exit__post_processing ; jump to handler
.Lsyscall_exit__post_processing_complete:

	move.4	d0, PT_D0(sp)			; Restore D0 from kernel stack
	move.4	d1, PT_D1(sp)			; Restore d1 from kernel stack
	move.4	d2, PT_D2(sp)			; Restore d2 from kernel stack
	move.4	d3, PT_D3(sp)			; Restore d3 from kernel stack
	move.4	d4, PT_D4(sp)			; Restore d4 from kernel stack
	move.4	d5, PT_D5(sp)			; Restore d5 from kernel stack
	move.4	d8, PT_D8(sp)			; Restore d8 from kernel stack
	move.4	d10, PT_D10(sp)			; Restore d10 from kernel stack
	move.4	d11, PT_D11(sp)			; Restore d11 from kernel stack
	move.4	d12, PT_D12(sp)			; Restore d12 from kernel stack
	move.4	d13, PT_D13(sp)			; Restore d13 from kernel stack
	move.4	a1, PT_A1(sp)			; Restore A1 from kernel stack
	move.4	a2, PT_A2(sp)			; Restore A2 from kernel stack
	move.4	a5, PT_A5(sp)			; Restore A5 from kernel stack
	move.4	a6, PT_A6(sp)			; Restore A6 from kernel stack
	move.4	a0, PT_A0(sp)			; Restore A6 from kernel stack

	/*
	 * this is only for debug, and could be removed for production builds
	 */
	move.4	PT_FRAME_TYPE(sp), #0		; invalidate frame_type

#ifdef CONFIG_PROTECT_KERNEL

	call a4, __syscall_exit_bottom_half

	.section .kernel_unprotected, "ax", @progbits
__syscall_exit_bottom_half:
	/*
	 * Enter critical section
	 */
	atomic_lock_acquire
	disable_kernel_ranges_for_current d15
#endif
	/*
	 * Lastly restore userspace stack ptr
	 *
	 * Note: that when protection is on we need to hold the lock around the
	 * stack swap as well because otherwise the protection could get
	 * inadvertently disabled again at the end of a context switch.
	 */
	move.4	a7, PT_A7(sp)			; Restore A7 from kernel stack

	/*
	 * We are now officially back in userspace!
	 */

#ifdef CONFIG_PROTECT_KERNEL
	/*
	 * Leave critical section and return to user space.
	 */
	atomic_lock_release
#endif
	calli	a5, 0(a5)			; Back to userspace code.

	bkpt #-1				; we will never get here

	/*
	 * Post syscall processing. (unlikely part of syscall_exit)
	 *
	 * Are we tracing syscalls. If TIF_SYSCALL_TRACE is set, call
	 * syscall_trace routine and return here.
	 */
	.section .text.syscall_exit, "ax", @progbits
.Lsyscall_exit__post_processing:
	btst	TI_FLAGS(a1), #ASM_TIF_SYSCALL_TRACE
	jmpeq.t	1f
	call	a5, syscall_trace

	/*
	 * Do we need to resched ie call schedule. If TIF_NEED_RESCHED is set,
	 * call the scheduler, it will come back here.
	 */
1:	btst	TI_FLAGS(a1), #ASM_TIF_NEED_RESCHED
	jmpeq.t	2f
	call	a5, schedule

	/*
	 * Do we need to post a signal, if TIF_SIGPENDING is set call the
	 * do_signal.
	 */
2:	btst	TI_FLAGS(a1), #ASM_TIF_SIGPENDING
	jmpeq.t	.Lsyscall_exit__post_processing_complete

	/*
	 * setup the do signal call
	 */
	move.4	d0, #0				; oldset pointer is NULL
	lea.1	d1, (sp)			; d1 is the regs pointer.
	call	a5, do_signal

	jmpt.t  .Lsyscall_exit__post_processing_complete

/*	.size syscall_exit, . - syscall_exit */

/*
 * kernel_execve()
 *	kernel_execv is called when we the kernel is starting a
 *	userspace application.
 */
	.section .kernel_unprotected, "ax", @progbits
	.global kernel_execve
kernel_execve:
	move.4	-4(sp)++, a5			; Save return address
	/*
	 * Call execve
	 */
	movei	d8, #__NR_execve		; call execve
	call	a5, system_call
	move.4	a5, (sp)4++

	/*
	 * protection was enabled again at syscall exit, but we want
	 * to return to kernel so we enable it again.
	 */
#ifdef CONFIG_PROTECT_KERNEL
	/*
	 * We are entering the kernel so we need to disable the protection.
	 * Enter critical section, disable ranges and leave critical section.
	 */
	call a3, __enable_kernel_ranges ;  and jump back to kernel
#else
	ret a5					; jump back to the kernel
#endif

	.size kernel_execve, . - kernel_execve

/*
 * signal_trampoline()
 *
 *	Deals with transitioning from to userspace signal handlers and returning
 *	to userspace, only called from the kernel.
 *
 */
	.section .kernel_unprotected, "ax", @progbits
	.global signal_trampoline
signal_trampoline:
	/*
	 * signal_trampoline is called when we are jumping from the kernel to
	 * the userspace signal handler.
	 *
	 * The following registers are relevant. (set setup_rt_frame)
	 *   sp is the user space stack not the kernel stack
	 *  d0 = signal number
	 *  d1 = siginfo_t *
	 *  d2 = ucontext *
	 *  d3 = the user space signal handler
	 *  a0 is set to the GOT if userspace application is FDPIC, otherwise 0
	 *  a3 is set to the FD for the signal if userspace application is FDPIC
	 */
#ifdef CONFIG_PROTECT_KERNEL
	/*
	 * We are leaving the kernel so we need to enable the protection.
	 * Enter critical section, disable ranges and leave critical section.
	 */
	atomic_lock_acquire			; Enter critical section
	disable_kernel_ranges_for_current d15	; disable kernel ranges
	atomic_lock_release			; Leave critical section
#endif
	/*
	 * The signal handler pointer is in register d3 so tranfer it to a4 and
	 * call it
	 */
	movea	a4, d3				; signal handler
	calli	a5, 0(a4)

	/*
	 * Return to userspace through rt_syscall which is stored on top of the
	 * stack d1 contains ret_via_interrupt status.
	 */
	move.4	d8, (sp)			; d8 (syscall #) = rt_syscall
	move.4	d1, 4(sp)			; d1 = ret_via_interrupt
	call	a5, system_call		; as we are 'in' the kernel
						; we can call kernel_syscall

	bkpt #-1				; will never get here.
	.size signal_trampoline, . - signal_trampoline

/*
 * kernel_thread_helper()
 *
 *	Entry point for kernel threads (only referenced by kernel_thread()).
 *
 *	On execution d0 will be 0, d1 will be the argument to be passed to the
 *	kernel function.
 *	d2 contains the kernel function that needs to get called.
 *	d3 will contain address to do_exit which needs to get moved into a5.
 *
 *	On return from fork the child thread d0 will be 0. We call this dummy
 *	function which in turn loads the argument
 */
	.section .kernel_unprotected, "ax", @progbits
	.global kernel_thread_helper
kernel_thread_helper:
	/*
	 * Create a kernel thread. This is called from ret_from_vfork (a
	 * userspace return routine) so we need to put it in an unprotected
	 * section and re-enable protection before calling the vector in d2.
	 */

#ifdef CONFIG_PROTECT_KERNEL
	/*
	 * We are entering the kernel so we need to disable the protection.
	 * Enter critical section, disable ranges and leave critical section.
	 */
	call a5, __enable_kernel_ranges
#endif
	/*
	 * Move argument for kernel function into d0, and set a5 return address
	 * (a5) to do_exit and return through a2
	 */
	move.4  d0, d1				; d0 = arg
	move.4  a5, d3				; a5 = do_exit
	ret	d2				; call function ptr in d2
	.size kernel_thread_helper, . - kernel_thread_helper

#ifdef CONFIG_PROTECT_KERNEL
	.section .kernel_unprotected, "ax", @progbits
__enable_kernel_ranges:
	atomic_lock_acquire			; Enter critical section
	enable_kernel_ranges_for_current d15
	atomic_lock_release			; Leave critical section
	calli a5, 0(a5)
	.size __enable_kernel_ranges, . - __enable_kernel_ranges

#endif

/*
 * The following system call intercept functions where we setup the
 * input to the real system call.  In all cases these are just taking
 * the current sp which is pointing to pt_regs and pushing it into the
 * last arg of the system call.
 *
 * i.e. the public definition of sys_execv is
 *	sys_execve(	char *name,
 *			char **argv,
 *			char **envp )
 * but process.c defines it as
 *	sys_execve(	char *name,
 *			char **argv,
 *			char **envp,
 *			struct pt_regs *regs )
 *
 * so execve_intercept needs to populate the 4th arg with pt_regs*,
 * which is the stack pointer as we know we must be coming out of
 * system_call
 *
 * The intercept vectors are referenced by syscalltable.S
 */

/*
 * execve_intercept()
 */
	.section .text.execve_intercept, "ax", @progbits
	.global execve_intercept
execve_intercept:
	move.4	d3, sp	; Save pt_regs address
	call	a3, sys_execve

	.size execve_intercept, . - execve_intercept

/*
 * vfork_intercept()
 */
	.section .text.vfork_intercept, "ax", @progbits
	.global vfork_intercept
vfork_intercept:
	move.4	d0, sp	; Save pt_regs address
	call	a3, sys_vfork

	.size vfork_intercept, . - vfork_intercept

/*
 * clone_intercept()
 */
	.section .text.clone_intercept, "ax", @progbits
	.global clone_intercept
clone_intercept:
	move.4	d2, sp	; Save pt_regs address
	call	a3, sys_clone

	.size clone_intercept, . - clone_intercept

/*
 * sys_sigsuspend()
 */
	.section .text.sigclone_intercept, "ax", @progbits
	.global sys_sigsuspend
sys_sigsuspend:
	move.4	d0, sp	; Pass pointer to pt_regs in d0
	call	a3, do_sigsuspend

	.size sys_sigsuspend, . - sys_sigsuspend

/*
 * sys_rt_sigsuspend()
 */
	.section .text.sys_rt_sigsuspend, "ax", @progbits
	.global sys_rt_sigsuspend
sys_rt_sigsuspend:
	move.4	d0, sp	; Pass pointer to pt_regs in d0
	call	a3, do_rt_sigsuspend

	.size sys_rt_sigsuspend, . - sys_rt_sigsuspend

/*
 * sys_rt_sigreturn()
 */
	.section .text.sys_rt_sigreturn, "ax", @progbits
	.global sys_rt_sigreturn
sys_rt_sigreturn:
	move.4	d0, sp	; Pass pointer to pt_regs in d0
	call	a3, do_rt_sigreturn

	.size sys_rt_sigreturn, . - sys_rt_sigreturn

/*
 * sys_sigaltstack()
 */
	.section .text.sys_sigaltstack, "ax", @progbits
	.global sys_sigaltstack
sys_sigaltstack:
	move.4	d0, sp	; Pass pointer to pt_regs in d0
	call	a3, do_sys_sigaltstack

	.size sys_sigaltstack, . - sys_sigaltstack