2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* linux/arch/m32r/kernel/sys_m32r.c
|
|
|
|
*
|
|
|
|
* This file contains various random system calls that
|
|
|
|
* have a non-standard calling sequence on the Linux/M32R platform.
|
|
|
|
*
|
|
|
|
* Taken from i386 version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/smp_lock.h>
|
|
|
|
#include <linux/sem.h>
|
|
|
|
#include <linux/msg.h>
|
|
|
|
#include <linux/shm.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/cachectl.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/ipc.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_tas() - test-and-set
|
|
|
|
*/
|
|
|
|
asmlinkage int sys_tas(int *addr)
|
|
|
|
{
|
|
|
|
int oldval;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
[PATCH] m32r: update sys_tas() routine
This patch updates and fixes sys_tas() routine for m32r.
In the previous implementation, a lockup rarely caused at sys_tas()
routine in SMP environment.
> > The problem is that touching *addr will generate an oops if that page isn't
> > paged in. If we convert it to use get_user() then that's an improvement,
> > but we must not run get_user() under spinlock or local_irq_disable().
I rewrote sys_tas() routine by using "lock -> unlock" instructions, and
utilizing the m32r's interrupt handling characteristics; the m32r processor
can accept interrupts only at the 32-bit instruction boundary. So, the
"unlock" instruction can be executed continuously after the "lock"
instruction execution without any interruptions.
In addition, to solve such a page_fault problem, I use a fixup code like
get_user().
And, as for the kernel lockup problem, we found that a calling
do_page_fault() routine with disabling interrupts might cause a lockup at
flush_tlb_others(), because we checked a completion of IPI handler's
operations in a spin-locked critical section.
Therefore, by using "lock -> unlock" code, we can implement the sys_tas()
rouitine without disabling interrupts explicitly, then no lockups would
happen at flush_tlb_others(), I hope.
Compile check and some working test in SMP environment have done.
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-02-21 10:28:17 +08:00
|
|
|
/* atomic operation:
|
|
|
|
* oldval = *addr; *addr = 1;
|
|
|
|
*/
|
|
|
|
__asm__ __volatile__ (
|
|
|
|
DCACHE_CLEAR("%0", "r4", "%1")
|
|
|
|
" .fillinsn\n"
|
|
|
|
"1:\n"
|
|
|
|
" lock %0, @%1 -> unlock %2, @%1\n"
|
|
|
|
"2:\n"
|
|
|
|
/* NOTE:
|
|
|
|
* The m32r processor can accept interrupts only
|
|
|
|
* at the 32-bit instruction boundary.
|
|
|
|
* So, in the above code, the "unlock" instruction
|
|
|
|
* can be executed continuously after the "lock"
|
|
|
|
* instruction execution without any interruptions.
|
|
|
|
*/
|
|
|
|
".section .fixup,\"ax\"\n"
|
|
|
|
" .balign 4\n"
|
|
|
|
"3: ldi %0, #%3\n"
|
|
|
|
" seth r14, #high(2b)\n"
|
|
|
|
" or3 r14, r14, #low(2b)\n"
|
|
|
|
" jmp r14\n"
|
|
|
|
".previous\n"
|
|
|
|
".section __ex_table,\"a\"\n"
|
|
|
|
" .balign 4\n"
|
|
|
|
" .long 1b,3b\n"
|
|
|
|
".previous\n"
|
|
|
|
: "=&r" (oldval)
|
|
|
|
: "r" (addr), "r" (1), "i"(-EFAULT)
|
|
|
|
: "r14", "memory"
|
|
|
|
#ifdef CONFIG_CHIP_M32700_TS1
|
|
|
|
, "r4"
|
|
|
|
#endif /* CONFIG_CHIP_M32700_TS1 */
|
|
|
|
);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return oldval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_pipe() is the normal C calling standard for creating
|
|
|
|
* a pipe. It's not the way Unix traditionally does this, though.
|
|
|
|
*/
|
|
|
|
asmlinkage int
|
|
|
|
sys_pipe(unsigned long r0, unsigned long r1, unsigned long r2,
|
|
|
|
unsigned long r3, unsigned long r4, unsigned long r5,
|
|
|
|
unsigned long r6, struct pt_regs regs)
|
|
|
|
{
|
|
|
|
int fd[2];
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = do_pipe(fd);
|
|
|
|
if (!error) {
|
|
|
|
if (copy_to_user((void *)r0, (void *)fd, 2*sizeof(int)))
|
|
|
|
error = -EFAULT;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
|
|
|
|
unsigned long prot, unsigned long flags,
|
|
|
|
unsigned long fd, unsigned long pgoff)
|
|
|
|
{
|
|
|
|
int error = -EBADF;
|
|
|
|
struct file *file = NULL;
|
|
|
|
|
|
|
|
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
|
|
|
if (!(flags & MAP_ANONYMOUS)) {
|
|
|
|
file = fget(fd);
|
|
|
|
if (!file)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
|
|
|
|
if (file)
|
|
|
|
fput(file);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
|
|
|
|
*
|
|
|
|
* This is really horribly ugly.
|
|
|
|
*/
|
|
|
|
asmlinkage int sys_ipc(uint call, int first, int second,
|
|
|
|
int third, void __user *ptr, long fifth)
|
|
|
|
{
|
|
|
|
int version, ret;
|
|
|
|
|
|
|
|
version = call >> 16; /* hack for backward compatibility */
|
|
|
|
call &= 0xffff;
|
|
|
|
|
|
|
|
switch (call) {
|
|
|
|
case SEMOP:
|
|
|
|
return sys_semtimedop(first, (struct sembuf __user *)ptr,
|
|
|
|
second, NULL);
|
|
|
|
case SEMTIMEDOP:
|
|
|
|
return sys_semtimedop(first, (struct sembuf __user *)ptr,
|
|
|
|
second, (const struct timespec __user *)fifth);
|
|
|
|
case SEMGET:
|
|
|
|
return sys_semget (first, second, third);
|
|
|
|
case SEMCTL: {
|
|
|
|
union semun fourth;
|
|
|
|
if (!ptr)
|
|
|
|
return -EINVAL;
|
|
|
|
if (get_user(fourth.__pad, (void __user * __user *) ptr))
|
|
|
|
return -EFAULT;
|
|
|
|
return sys_semctl (first, second, third, fourth);
|
|
|
|
}
|
|
|
|
|
|
|
|
case MSGSND:
|
|
|
|
return sys_msgsnd (first, (struct msgbuf __user *) ptr,
|
|
|
|
second, third);
|
|
|
|
case MSGRCV:
|
|
|
|
switch (version) {
|
|
|
|
case 0: {
|
|
|
|
struct ipc_kludge tmp;
|
|
|
|
if (!ptr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(&tmp,
|
|
|
|
(struct ipc_kludge __user *) ptr,
|
|
|
|
sizeof (tmp)))
|
|
|
|
return -EFAULT;
|
|
|
|
return sys_msgrcv (first, tmp.msgp, second,
|
|
|
|
tmp.msgtyp, third);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return sys_msgrcv (first,
|
|
|
|
(struct msgbuf __user *) ptr,
|
|
|
|
second, fifth, third);
|
|
|
|
}
|
|
|
|
case MSGGET:
|
|
|
|
return sys_msgget ((key_t) first, second);
|
|
|
|
case MSGCTL:
|
|
|
|
return sys_msgctl (first, second,
|
|
|
|
(struct msqid_ds __user *) ptr);
|
|
|
|
case SHMAT: {
|
|
|
|
ulong raddr;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_WRITE, (ulong __user *) third,
|
|
|
|
sizeof(ulong)))
|
|
|
|
return -EFAULT;
|
|
|
|
ret = do_shmat (first, (char __user *) ptr, second, &raddr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return put_user (raddr, (ulong __user *) third);
|
|
|
|
}
|
|
|
|
case SHMDT:
|
|
|
|
return sys_shmdt ((char __user *)ptr);
|
|
|
|
case SHMGET:
|
|
|
|
return sys_shmget (first, second, third);
|
|
|
|
case SHMCTL:
|
|
|
|
return sys_shmctl (first, second,
|
|
|
|
(struct shmid_ds __user *) ptr);
|
|
|
|
default:
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage int sys_uname(struct old_utsname * name)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
if (!name)
|
|
|
|
return -EFAULT;
|
|
|
|
down_read(&uts_sem);
|
2006-10-02 17:18:11 +08:00
|
|
|
err = copy_to_user(name, utsname(), sizeof (*name));
|
2005-04-17 06:20:36 +08:00
|
|
|
up_read(&uts_sem);
|
|
|
|
return err?-EFAULT:0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
|
|
|
|
{
|
|
|
|
/* This should flush more selectivly ... */
|
|
|
|
_flush_cache_all();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
|
|
|
|
{
|
|
|
|
/* Not implemented yet. */
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|