|
|
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
43 #include <linux/config.h>
44 #include <linux/linkage.h>
45 #include <asm/thread_info.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
48 #include <asm/smp.h>
49 #include <asm/page.h>
50 #include "irq_vectors.h"
51
52 #define nr_syscalls ((syscall_table_size)/4)
53
54 EBX = 0x00
55 ECX = 0x04
56 EDX = 0x08
57 ESI = 0x0C
58 EDI = 0x10
59 EBP = 0x14
60 EAX = 0x18
61 DS = 0x1C
62 ES = 0x20
63 ORIG_EAX = 0x24
64 EIP = 0x28
65 CS = 0x2C
66 EFLAGS = 0x30
67 OLDESP = 0x34
68 OLDSS = 0x38
69
70 CF_MASK = 0x00000001
71 TF_MASK = 0x00000100
72 IF_MASK = 0x00000200
73 DF_MASK = 0x00000400
74 NT_MASK = 0x00004000
75 VM_MASK = 0x00020000
76
77 #ifdef CONFIG_PREEMPT
78 #define preempt_stop cli
79 #else
80 #define preempt_stop
81 #define resume_kernel restore_all
82 #endif
83
84 #define SAVE_ALL \
85 cld; \
86 pushl %es; \
87 pushl %ds; \
88 pushl %eax; \
89 pushl %ebp; \
90 pushl %edi; \
91 pushl %esi; \
92 pushl %edx; \
93 pushl %ecx; \
94 pushl %ebx; \
95 movl $(__USER_DS), %edx; \
96 movl %edx, %ds; \
97 movl %edx, %es;
98
99 #define RESTORE_INT_REGS \
100 popl %ebx; \
101 popl %ecx; \
102 popl %edx; \
103 popl %esi; \
104 popl %edi; \
105 popl %ebp; \
106 popl %eax
107
108 #define RESTORE_REGS \
109 RESTORE_INT_REGS; \
110 1: popl %ds; \
111 2: popl %es; \
112 .section .fixup,"ax"; \
113 3: movl $0,(%esp); \
114 jmp 1b; \
115 4: movl $0,(%esp); \
116 jmp 2b; \
117 .previous; \
118 .section __ex_table,"a";\
119 .align 4; \
120 .long 1b,3b; \
121 .long 2b,4b; \
122 .previous
123
124
125 #define RESTORE_ALL \
126 RESTORE_REGS \
127 addl $4, %esp; \
128 1: iret; \
129 .section .fixup,"ax"; \
130 2: sti; \
131 movl $(__USER_DS), %edx; \
132 movl %edx, %ds; \
133 movl %edx, %es; \
134 movl $11,%eax; \
135 call do_exit; \
136 .previous; \
137 .section __ex_table,"a";\
138 .align 4; \
139 .long 1b,2b; \
140 .previous
141
142
143 ENTRY(ret_from_fork)
144 pushl %eax
145 call schedule_tail
146 GET_THREAD_INFO(%ebp)
147 popl %eax
148 jmp syscall_exit
149
150 /*
151 * Return to user mode is not as complex as all this looks,
152 * but we want the default path for a system call return to
153 * go as quickly as possible which is why some of this is
154 * less clear than it otherwise should be.
155 */
156
157 # userspace resumption stub bypassing syscall exit tracing
158 ALIGN
159 ret_from_exception:
160 preempt_stop
161 ret_from_intr:
162 GET_THREAD_INFO(%ebp)
163 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
164 movb CS(%esp), %al
165 testl $(VM_MASK | 3), %eax
166 jz resume_kernel # returning to kernel or vm86-space
167 ENTRY(resume_userspace)
168 cli # make sure we don't miss an interrupt
169 # setting need_resched or sigpending
170 # between sampling and the iret
171 movl TI_flags(%ebp), %ecx
172 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
173 # int/exception return?
174 jne work_pending
175 jmp restore_all
176
177 #ifdef CONFIG_PREEMPT
178 ENTRY(resume_kernel)
179 cli
180 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
181 jnz restore_all
182 need_resched:
183 movl TI_flags(%ebp), %ecx # need_resched set ?
184 testb $_TIF_NEED_RESCHED, %cl
185 jz restore_all
186 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
187 jz restore_all
188 call preempt_schedule_irq
189 jmp need_resched
190 #endif
191
192 /* SYSENTER_RETURN points to after the "sysenter" instruction in
193 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
194
195 # sysenter call handler stub
196 ENTRY(sysenter_entry)
197 movl TSS_sysenter_esp0(%esp),%esp
198 sysenter_past_esp:
199 sti
200 pushl $(__USER_DS)
201 pushl %ebp
202 pushfl
203 pushl $(__USER_CS)
204 pushl $SYSENTER_RETURN
205
206 /*
207 * Load the potential sixth argument from user stack.
208 * Careful about security.
209 */
210 cmpl $__PAGE_OFFSET-3,%ebp
211 jae syscall_fault
212 1: movl (%ebp),%ebp
213 .section __ex_table,"a"
214 .align 4
215 .long 1b,syscall_fault
216 .previous
217
218 pushl %eax
219 SAVE_ALL
220 GET_THREAD_INFO(%ebp)
221
222 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
223 jnz syscall_trace_entry
224 cmpl $(nr_syscalls), %eax
225 jae syscall_badsys
226 call *sys_call_table(,%eax,4)
227 movl %eax,EAX(%esp)
228 cli
229 movl TI_flags(%ebp), %ecx
230 testw $_TIF_ALLWORK_MASK, %cx
231 jne syscall_exit_work
232 /* if something modifies registers it must also disable sysexit */
233 movl EIP(%esp), %edx
234 movl OLDESP(%esp), %ecx
235 xorl %ebp,%ebp
236 sti
237 sysexit
238
239
240 # system call handler stub
241 ENTRY(system_call)
242 pushl %eax # save orig_eax
243 SAVE_ALL
244 GET_THREAD_INFO(%ebp)
245 # system call tracing in operation
246 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
247 jnz syscall_trace_entry
248 cmpl $(nr_syscalls), %eax
249 jae syscall_badsys
250 syscall_call:
251 call *sys_call_table(,%eax,4)
252 movl %eax,EAX(%esp) # store the return value
253 syscall_exit:
254 cli # make sure we don't miss an interrupt
255 # setting need_resched or sigpending
256 # between sampling and the iret
257 movl TI_flags(%ebp), %ecx
258 testw $_TIF_ALLWORK_MASK, %cx # current->work
259 jne syscall_exit_work
260 restore_all:
261 RESTORE_ALL
262
263 # perform work that needs to be done immediately before resumption
264 ALIGN
265 work_pending:
266 testb $_TIF_NEED_RESCHED, %cl
267 jz work_notifysig
268 work_resched:
269 call schedule
270 cli # make sure we don't miss an interrupt
271 # setting need_resched or sigpending
272 # between sampling and the iret
273 movl TI_flags(%ebp), %ecx
274 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
275 # than syscall tracing?
276 jz restore_all
277 testb $_TIF_NEED_RESCHED, %cl
278 jnz work_resched
279
280 work_notifysig: # deal with pending signals and
281 # notify-resume requests
282 testl $VM_MASK, EFLAGS(%esp)
283 movl %esp, %eax
284 jne work_notifysig_v86 # returning to kernel-space or
285 # vm86-space
286 xorl %edx, %edx
287 call do_notify_resume
288 jmp restore_all
289
290 ALIGN
291 work_notifysig_v86:
292 pushl %ecx # save ti_flags for do_notify_resume
293 call save_v86_state # %eax contains pt_regs pointer
294 popl %ecx
295 movl %eax, %esp
296 xorl %edx, %edx
297 call do_notify_resume
298 jmp restore_all
299
300 # perform syscall exit tracing
301 ALIGN
302 syscall_trace_entry:
303 movl $-ENOSYS,EAX(%esp)
304 movl %esp, %eax
305 xorl %edx,%edx
306 call do_syscall_trace
307 movl ORIG_EAX(%esp), %eax
308 cmpl $(nr_syscalls), %eax
309 jnae syscall_call
310 jmp syscall_exit
311
312 # perform syscall exit tracing
313 ALIGN
314 syscall_exit_work:
315 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
316 jz work_pending
317 sti # could let do_syscall_trace() call
318 # schedule() instead
319 movl %esp, %eax
320 movl $1, %edx
321 call do_syscall_trace
322 jmp resume_userspace
323
324 ALIGN
325 syscall_fault:
326 pushl %eax # save orig_eax
327 SAVE_ALL
328 GET_THREAD_INFO(%ebp)
329 movl $-EFAULT,EAX(%esp)
330 jmp resume_userspace
331
332 ALIGN
333 syscall_badsys:
334 movl $-ENOSYS,EAX(%esp)
335 jmp resume_userspace
336
337 /*
338 * Build the entry stubs and pointer table with
339 * some assembler magic.
340 */
341 .data
342 ENTRY(interrupt)
343 .text
344
345 vector=0
346 ENTRY(irq_entries_start)
347 .rept NR_IRQS
348 ALIGN
349 1: pushl $vector-256
350 jmp common_interrupt
351 .data
352 .long 1b
353 .text
354 vector=vector+1
355 .endr
356
357 ALIGN
358 common_interrupt:
359 SAVE_ALL
360 movl %esp,%eax
361 call do_IRQ
362 jmp ret_from_intr
363
364 #define BUILD_INTERRUPT(name, nr) \
365 ENTRY(name) \
366 pushl $nr-256; \
367 SAVE_ALL \
368 movl %esp,%eax; \
369 call smp_/**/name; \
370 jmp ret_from_intr;
371
372 /* The include is where all of the SMP etc. interrupts come from */
373 #include "entry_arch.h"
374
375 ENTRY(divide_error)
376 pushl $0 # no error code
377 pushl $do_divide_error
378 ALIGN
379 error_code:
380 pushl %ds
381 pushl %eax
382 xorl %eax, %eax
383 pushl %ebp
384 pushl %edi
385 pushl %esi
386 pushl %edx
387 decl %eax # eax = -1
388 pushl %ecx
389 pushl %ebx
390 cld
391 movl %es, %ecx
392 movl ES(%esp), %edi # get the function address
393 movl ORIG_EAX(%esp), %edx # get the error code
394 movl %eax, ORIG_EAX(%esp)
395 movl %ecx, ES(%esp)
396 movl $(__USER_DS), %ecx
397 movl %ecx, %ds
398 movl %ecx, %es
399 movl %esp,%eax # pt_regs pointer
400 call *%edi
401 jmp ret_from_exception
402
403 ENTRY(coprocessor_error)
404 pushl $0
405 pushl $do_coprocessor_error
406 jmp error_code
407
408 ENTRY(simd_coprocessor_error)
409 pushl $0
410 pushl $do_simd_coprocessor_error
411 jmp error_code
412
413 ENTRY(device_not_available)
414 pushl $-1 # mark this as an int
415 SAVE_ALL
416 movl %cr0, %eax
417 testl $0x4, %eax # EM (math emulation bit)
418 jne device_not_available_emulate
419 preempt_stop
420 call math_state_restore
421 jmp ret_from_exception
422 device_not_available_emulate:
423 pushl $0 # temporary storage for ORIG_EIP
424 call math_emulate
425 addl $4, %esp
426 jmp ret_from_exception
427
428 /*
429 * Debug traps and NMI can happen at the one SYSENTER instruction
430 * that sets up the real kernel stack. Check here, since we can't
431 * allow the wrong stack to be used.
432 *
433 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
434 * already pushed 3 words if it hits on the sysenter instruction:
435 * eflags, cs and eip.
436 *
437 * We just load the right stack, and push the three (known) values
438 * by hand onto the new stack - while updating the return eip past
439 * the instruction that would have done it for sysenter.
440 */
441 #define FIX_STACK(offset, ok, label) \
442 cmpw $__KERNEL_CS,4(%esp); \
443 jne ok; \
444 label: \
445 movl TSS_sysenter_esp0+offset(%esp),%esp; \
446 pushfl; \
447 pushl $__KERNEL_CS; \
448 pushl $sysenter_past_esp
449
450 ENTRY(debug)
451 cmpl $sysenter_entry,(%esp)
452 jne debug_stack_correct
453 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
454 debug_stack_correct:
455 pushl $-1 # mark this as an int
456 SAVE_ALL
457 xorl %edx,%edx # error code 0
458 movl %esp,%eax # pt_regs pointer
459 call do_debug
460 testl %eax,%eax
461 jnz restore_all
462 jmp ret_from_exception
463
464 /*
465 * NMI is doubly nasty. It can happen _while_ we're handling
466 * a debug fault, and the debug fault hasn't yet been able to
467 * clear up the stack. So we first check whether we got an
468 * NMI on the sysenter entry path, but after that we need to
469 * check whether we got an NMI on the debug path where the debug
470 * fault happened on the sysenter path.
471 */
472 ENTRY(nmi)
473 cmpl $sysenter_entry,(%esp)
474 je nmi_stack_fixup
475 pushl %eax
476 movl %esp,%eax
477 /* Do not access memory above the end of our stack page,
478 * it might not exist.
479 */
480 andl $(THREAD_SIZE-1),%eax
481 cmpl $(THREAD_SIZE-20),%eax
482 popl %eax
483 jae nmi_stack_correct
484 cmpl $sysenter_entry,12(%esp)
485 je nmi_debug_stack_check
486 nmi_stack_correct:
487 pushl %eax
488 SAVE_ALL
489 xorl %edx,%edx # zero error code
490 movl %esp,%eax # pt_regs pointer
491 call do_nmi
492 RESTORE_ALL
493
494 nmi_stack_fixup:
495 FIX_STACK(12,nmi_stack_correct, 1)
496 jmp nmi_stack_correct
497 nmi_debug_stack_check:
498 cmpw $__KERNEL_CS,16(%esp)
499 jne nmi_stack_correct
500 cmpl $debug - 1,(%esp)
501 jle nmi_stack_correct
502 cmpl $debug_esp_fix_insn,(%esp)
503 jle nmi_debug_stack_fixup
504 nmi_debug_stack_fixup:
505 FIX_STACK(24,nmi_stack_correct, 1)
506 jmp nmi_stack_correct
507
508 ENTRY(int3)
509 pushl $-1 # mark this as an int
510 SAVE_ALL
511 xorl %edx,%edx # zero error code
512 movl %esp,%eax # pt_regs pointer
513 call do_int3
514 testl %eax,%eax
515 jnz restore_all
516 jmp ret_from_exception
517
518 ENTRY(overflow)
519 pushl $0
520 pushl $do_overflow
521 jmp error_code
522
523 ENTRY(bounds)
524 pushl $0
525 pushl $do_bounds
526 jmp error_code
527
528 ENTRY(invalid_op)
529 pushl $0
530 pushl $do_invalid_op
531 jmp error_code
532
533 ENTRY(coprocessor_segment_overrun)
534 pushl $0
535 pushl $do_coprocessor_segment_overrun
536 jmp error_code
537
538 ENTRY(invalid_TSS)
539 pushl $do_invalid_TSS
540 jmp error_code
541
542 ENTRY(segment_not_present)
543 pushl $do_segment_not_present
544 jmp error_code
545
546 ENTRY(stack_segment)
547 pushl $do_stack_segment
548 jmp error_code
549
550 ENTRY(general_protection)
551 pushl $do_general_protection
552 jmp error_code
553
554 ENTRY(alignment_check)
555 pushl $do_alignment_check
556 jmp error_code
557
558 ENTRY(page_fault)
559 pushl $do_page_fault
560 jmp error_code
561
562 #ifdef CONFIG_X86_MCE
563 ENTRY(machine_check)
564 pushl $0
565 pushl machine_check_vector
566 jmp error_code
567 #endif
568
569 ENTRY(spurious_interrupt_bug)
570 pushl $0
571 pushl $do_spurious_interrupt_bug
572 jmp error_code
573
574 .data
575 ENTRY(sys_call_table)
576 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
577 .long sys_exit
578 .long sys_fork
579 .long sys_read
580 .long sys_write
581 .long sys_open /* 5 */
582 .long sys_close
583 .long sys_waitpid
584 .long sys_creat
585 .long sys_link
586 .long sys_unlink /* 10 */
587 .long sys_execve
588 .long sys_chdir
589 .long sys_time
590 .long sys_mknod
591 .long sys_chmod /* 15 */
592 .long sys_lchown16
593 .long sys_ni_syscall /* old break syscall holder */
594 .long sys_stat
595 .long sys_lseek
596 .long sys_getpid /* 20 */
597 .long sys_mount
598 .long sys_oldumount
599 .long sys_setuid16
600 .long sys_getuid16
601 .long sys_stime /* 25 */
602 .long sys_ptrace
603 .long sys_alarm
604 .long sys_fstat
605 .long sys_pause
606 .long sys_utime /* 30 */
607 .long sys_ni_syscall /* old stty syscall holder */
608 .long sys_ni_syscall /* old gtty syscall holder */
609 .long sys_access
610 .long sys_nice
611 .long sys_ni_syscall /* 35 - old ftime syscall holder */
612 .long sys_sync
613 .long sys_kill
614 .long sys_rename
615 .long sys_mkdir
616 .long sys_rmdir /* 40 */
617 .long sys_dup
618 .long sys_pipe
619 .long sys_times
620 .long sys_ni_syscall /* old prof syscall holder */
621 .long sys_brk /* 45 */
622 .long sys_setgid16
623 .long sys_getgid16
624 .long sys_signal
625 .long sys_geteuid16
626 .long sys_getegid16 /* 50 */
627 .long sys_acct
628 .long sys_umount /* recycled never used phys() */
629 .long sys_ni_syscall /* old lock syscall holder */
630 .long sys_ioctl
631 .long sys_fcntl /* 55 */
632 .long sys_ni_syscall /* old mpx syscall holder */
633 .long sys_setpgid
634 .long sys_ni_syscall /* old ulimit syscall holder */
635 .long sys_olduname
636 .long sys_umask /* 60 */
637 .long sys_chroot
638 .long sys_ustat
639 .long sys_dup2
640 .long sys_getppid
641 .long sys_getpgrp /* 65 */
642 .long sys_setsid
643 .long sys_sigaction
644 .long sys_sgetmask
645 .long sys_ssetmask
646 .long sys_setreuid16 /* 70 */
647 .long sys_setregid16
648 .long sys_sigsuspend
649 .long sys_sigpending
650 .long sys_sethostname
651 .long sys_setrlimit /* 75 */
652 .long sys_old_getrlimit
653 .long sys_getrusage
654 .long sys_gettimeofday
655 .long sys_settimeofday
656 .long sys_getgroups16 /* 80 */
657 .long sys_setgroups16
658 .long old_select
659 .long sys_symlink
660 .long sys_lstat
661 .long sys_readlink /* 85 */
662 .long sys_uselib
663 .long sys_swapon
664 .long sys_reboot
665 .long old_readdir
666 .long old_mmap /* 90 */
667 .long sys_munmap
668 .long sys_truncate
669 .long sys_ftruncate
670 .long sys_fchmod
671 .long sys_fchown16 /* 95 */
672 .long sys_getpriority
673 .long sys_setpriority
674 .long sys_ni_syscall /* old profil syscall holder */
675 .long sys_statfs
676 .long sys_fstatfs /* 100 */
677 .long sys_ioperm
678 .long sys_socketcall
679 .long sys_syslog
680 .long sys_setitimer
681 .long sys_getitimer /* 105 */
682 .long sys_newstat
683 .long sys_newlstat
684 .long sys_newfstat
685 .long sys_uname
686 .long sys_iopl /* 110 */
687 .long sys_vhangup
688 .long sys_ni_syscall /* old "idle" system call */
689 .long sys_vm86old
690 .long sys_wait4
691 .long sys_swapoff /* 115 */
692 .long sys_sysinfo
693 .long sys_ipc
694 .long sys_fsync
695 .long sys_sigreturn
696 .long sys_clone /* 120 */
697 .long sys_setdomainname
698 .long sys_newuname
699 .long sys_modify_ldt
700 .long sys_adjtimex
701 .long sys_mprotect /* 125 */
702 .long sys_sigprocmask
703 .long sys_ni_syscall /* old "create_module" */
704 .long sys_init_module
705 .long sys_delete_module
706 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
707 .long sys_quotactl
708 .long sys_getpgid
709 .long sys_fchdir
710 .long sys_bdflush
711 .long sys_sysfs /* 135 */
712 .long sys_personality
713 .long sys_ni_syscall /* reserved for afs_syscall */
714 .long sys_setfsuid16
715 .long sys_setfsgid16
716 .long sys_llseek /* 140 */
717 .long sys_getdents
718 .long sys_select
719 .long sys_flock
720 .long sys_msync
721 .long sys_readv /* 145 */
722 .long sys_writev
723 .long sys_getsid
724 .long sys_fdatasync
725 .long sys_sysctl
726 .long sys_mlock /* 150 */
727 .long sys_munlock
728 .long sys_mlockall
729 .long sys_munlockall
730 .long sys_sched_setparam
731 .long sys_sched_getparam /* 155 */
732 .long sys_sched_setscheduler
733 .long sys_sched_getscheduler
734 .long sys_sched_yield
735 .long sys_sched_get_priority_max
736 .long sys_sched_get_priority_min /* 160 */
737 .long sys_sched_rr_get_interval
738 .long sys_nanosleep
739 .long sys_mremap
740 .long sys_setresuid16
741 .long sys_getresuid16 /* 165 */
742 .long sys_vm86
743 .long sys_ni_syscall /* Old sys_query_module */
744 .long sys_poll
745 .long sys_nfsservctl
746 .long sys_setresgid16 /* 170 */
747 .long sys_getresgid16
748 .long sys_prctl
749 .long sys_rt_sigreturn
750 .long sys_rt_sigaction
751 .long sys_rt_sigprocmask /* 175 */
752 .long sys_rt_sigpending
753 .long sys_rt_sigtimedwait
754 .long sys_rt_sigqueueinfo
755 .long sys_rt_sigsuspend
756 .long sys_pread64 /* 180 */
757 .long sys_pwrite64
758 .long sys_chown16
759 .long sys_getcwd
760 .long sys_capget
761 .long sys_capset /* 185 */
762 .long sys_sigaltstack
763 .long sys_sendfile
764 .long sys_ni_syscall /* reserved for streams1 */
765 .long sys_ni_syscall /* reserved for streams2 */
766 .long sys_vfork /* 190 */
767 .long sys_getrlimit
768 .long sys_mmap2
769 .long sys_truncate64
770 .long sys_ftruncate64
771 .long sys_stat64 /* 195 */
772 .long sys_lstat64
773 .long sys_fstat64
774 .long sys_lchown
775 .long sys_getuid
776 .long sys_getgid /* 200 */
777 .long sys_geteuid
778 .long sys_getegid
779 .long sys_setreuid
780 .long sys_setregid
781 .long sys_getgroups /* 205 */
782 .long sys_setgroups
783 .long sys_fchown
784 .long sys_setresuid
785 .long sys_getresuid
786 .long sys_setresgid /* 210 */
787 .long sys_getresgid
788 .long sys_chown
789 .long sys_setuid
790 .long sys_setgid
791 .long sys_setfsuid /* 215 */
792 .long sys_setfsgid
793 .long sys_pivot_root
794 .long sys_mincore
795 .long sys_madvise
796 .long sys_getdents64 /* 220 */
797 .long sys_fcntl64
798 .long sys_ni_syscall /* reserved for TUX */
799 .long sys_ni_syscall
800 .long sys_gettid
801 .long sys_readahead /* 225 */
802 .long sys_setxattr
803 .long sys_lsetxattr
804 .long sys_fsetxattr
805 .long sys_getxattr
806 .long sys_lgetxattr /* 230 */
807 .long sys_fgetxattr
808 .long sys_listxattr
809 .long sys_llistxattr
810 .long sys_flistxattr
811 .long sys_removexattr /* 235 */
812 .long sys_lremovexattr
813 .long sys_fremovexattr
814 .long sys_tkill
815 .long sys_sendfile64
816 .long sys_futex /* 240 */
817 .long sys_sched_setaffinity
818 .long sys_sched_getaffinity
819 .long sys_set_thread_area
820 .long sys_get_thread_area
821 .long sys_io_setup /* 245 */
822 .long sys_io_destroy
823 .long sys_io_getevents
824 .long sys_io_submit
825 .long sys_io_cancel
826 .long sys_fadvise64 /* 250 */
827 .long sys_ni_syscall
828 .long sys_exit_group
829 .long sys_lookup_dcookie
830 .long sys_epoll_create
831 .long sys_epoll_ctl /* 255 */
832 .long sys_epoll_wait
833 .long sys_remap_file_pages
834 .long sys_set_tid_address
835 .long sys_timer_create
836 .long sys_timer_settime /* 260 */
837 .long sys_timer_gettime
838 .long sys_timer_getoverrun
839 .long sys_timer_delete
840 .long sys_clock_settime
841 .long sys_clock_gettime /* 265 */
842 .long sys_clock_getres
843 .long sys_clock_nanosleep
844 .long sys_statfs64
845 .long sys_fstatfs64
846 .long sys_tgkill /* 270 */
847 .long sys_utimes
848 .long sys_fadvise64_64
849 .long sys_ni_syscall /* sys_vserver */
850 .long sys_mbind
851 .long sys_get_mempolicy
852 .long sys_set_mempolicy
853 .long sys_mq_open
854 .long sys_mq_unlink
855 .long sys_mq_timedsend
856 .long sys_mq_timedreceive /* 280 */
857 .long sys_mq_notify
858 .long sys_mq_getsetattr
859 .long sys_ni_syscall /* reserved for kexec */
860 .long sys_waitid
861 .long sys_ni_syscall /* 285 */ /* available */
862 .long sys_add_key
863 .long sys_request_key
864 .long sys_keyctl
865
866 syscall_table_size=(.-sys_call_table)