Commit f1e39a4a authored by Ralf Baechle's avatar Ralf Baechle
Browse files

MIPS: Rewrite sysmips(MIPS_ATOMIC_SET, ...) in C with inline assembler



This way it doesn't have to use CONFIG_CPU_HAS_LLSC anymore.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent f4c6b6bc
......@@ -32,6 +32,9 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti);
struct task_struct;
extern unsigned int ll_bit;
extern struct task_struct *ll_task;
#ifdef CONFIG_MIPS_MT_FPAFF
/*
......
......@@ -187,78 +187,6 @@ illegal_syscall:
j o32_syscall_exit
END(handle_sys)
LEAF(mips_atomic_set)
andi v0, a1, 3 # must be word aligned
bnez v0, bad_alignment
lw v1, TI_ADDR_LIMIT($28) # in legal address range?
addiu a0, a1, 4
or a0, a0, a1
and a0, a0, v1
bltz a0, bad_address
#ifdef CONFIG_CPU_HAS_LLSC
/* Ok, this is the ll/sc case. World is sane :-) */
1: ll v0, (a1)
move a0, a2
2: sc a0, (a1)
#if R10000_LLSC_WAR
beqzl a0, 1b
#else
beqz a0, 1b
#endif
.section __ex_table,"a"
PTR 1b, bad_stack
PTR 2b, bad_stack
.previous
#else
sw a1, 16(sp)
sw a2, 20(sp)
move a0, sp
move a2, a1
li a1, 1
jal do_page_fault
lw a1, 16(sp)
lw a2, 20(sp)
/*
* At this point the page should be readable and writable unless
* there was no more memory available.
*/
1: lw v0, (a1)
2: sw a2, (a1)
.section __ex_table,"a"
PTR 1b, no_mem
PTR 2b, no_mem
.previous
#endif
sw zero, PT_R7(sp) # success
sw v0, PT_R2(sp) # result
j o32_syscall_exit # continue like a normal syscall
no_mem: li v0, -ENOMEM
jr ra
bad_address:
li v0, -EFAULT
jr ra
bad_alignment:
li v0, -EINVAL
jr ra
END(mips_atomic_set)
LEAF(sys_sysmips)
beq a0, MIPS_ATOMIC_SET, mips_atomic_set
j _sys_sysmips
END(sys_sysmips)
LEAF(sys_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
......
......@@ -124,78 +124,6 @@ illegal_syscall:
j n64_syscall_exit
END(handle_sys64)
LEAF(mips_atomic_set)
andi v0, a1, 3 # must be word aligned
bnez v0, bad_alignment
LONG_L v1, TI_ADDR_LIMIT($28) # in legal address range?
LONG_ADDIU a0, a1, 4
or a0, a0, a1
and a0, a0, v1
bltz a0, bad_address
#ifdef CONFIG_CPU_HAS_LLSC
/* Ok, this is the ll/sc case. World is sane :-) */
1: ll v0, (a1)
move a0, a2
2: sc a0, (a1)
#if R10000_LLSC_WAR
beqzl a0, 1b
#else
beqz a0, 1b
#endif
.section __ex_table,"a"
PTR 1b, bad_stack
PTR 2b, bad_stack
.previous
#else
sw a1, 16(sp)
sw a2, 20(sp)
move a0, sp
move a2, a1
li a1, 1
jal do_page_fault
lw a1, 16(sp)
lw a2, 20(sp)
/*
* At this point the page should be readable and writable unless
* there was no more memory available.
*/
1: lw v0, (a1)
2: sw a2, (a1)
.section __ex_table,"a"
PTR 1b, no_mem
PTR 2b, no_mem
.previous
#endif
sd zero, PT_R7(sp) # success
sd v0, PT_R2(sp) # result
j n64_syscall_exit # continue like a normal syscall
no_mem: li v0, -ENOMEM
jr ra
bad_address:
li v0, -EFAULT
jr ra
bad_alignment:
li v0, -EINVAL
jr ra
END(mips_atomic_set)
LEAF(sys_sysmips)
beq a0, MIPS_ATOMIC_SET, mips_atomic_set
j _sys_sysmips
END(sys_sysmips)
.align 3
sys_call_table:
PTR sys_read /* 5000 */
......
......@@ -28,7 +28,9 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/ipc.h>
#include <linux/uaccess.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
......@@ -290,12 +292,116 @@ SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
return 0;
}
asmlinkage int _sys_sysmips(long cmd, long arg1, long arg2, long arg3)
static inline int mips_atomic_set(struct pt_regs *regs,
unsigned long addr, unsigned long new)
{
unsigned long old, tmp;
unsigned int err;
if (unlikely(addr & 3))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
return -EINVAL;
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ (
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
" beqzl %[tmp], 1b \n"
"3: \n"
" .section .fixup,\"ax\" \n"
"4: li %[err], %[efault] \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 4b \n"
" "STR(PTR)" 2b, 4b \n"
" .previous \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
: [addr] "r" (addr),
[new] "r" (new),
[efault] "i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
" bnez %[tmp], 4f \n"
"3: \n"
" .subsection 2 \n"
"4: b 1b \n"
" .previous \n"
" \n"
" .section .fixup,\"ax\" \n"
"5: li %[err], %[efault] \n"
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" "STR(PTR)" 1b, 5b \n"
" "STR(PTR)" 2b, 5b \n"
" .previous \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
: [addr] "r" (addr),
[new] "r" (new),
[efault] "i" (-EFAULT)
: "memory");
} else {
do {
preempt_disable();
ll_bit = 1;
ll_task = current;
preempt_enable();
err = __get_user(old, (unsigned int *) addr);
err |= __put_user(new, (unsigned int *) addr);
if (err)
break;
rmb();
} while (!ll_bit);
}
if (unlikely(err))
return err;
regs->regs[2] = old;
regs->regs[7] = 0; /* No error */
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
" move $29, %0 \n"
" j syscall_exit \n"
: /* no outputs */
: "r" (regs));
/* unreached. Honestly. */
while (1);
}
save_static_function(sys_sysmips);
static int __used noinline
_sys_sysmips(nabi_no_regargs struct pt_regs regs)
{
long cmd, arg1, arg2, arg3;
cmd = regs.regs[4];
arg1 = regs.regs[5];
arg2 = regs.regs[6];
arg3 = regs.regs[7];
switch (cmd) {
case MIPS_ATOMIC_SET:
printk(KERN_CRIT "How did I get here?\n");
return -EINVAL;
return mips_atomic_set(&regs, arg1, arg2);
case MIPS_FIXADE:
if (arg1 & ~3)
......
......@@ -466,9 +466,8 @@ asmlinkage void do_be(struct pt_regs *regs)
* The ll_bit is cleared by r*_switch.S
*/
unsigned long ll_bit;
static struct task_struct *ll_task = NULL;
unsigned int ll_bit;
struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment