X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;ds=sidebyside;f=arch%2Fparisc%2Finclude%2Fasm%2Fuaccess.h;h=192ad9e11b25d8db07804051d649f407a4ca5af3;hb=c605c39677b9842b0566013e0cf30bc13e90bdbc;hp=ed2cd4fb479b0cc991c23a9a48fbcd6fc3485886;hpb=edf6b0e1e4ddb12e022ce0c17829bad6d4161ea7;p=linux-2.6-microblaze.git diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index ed2cd4fb479b..192ad9e11b25 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -11,14 +11,6 @@ #include #include -#define KERNEL_DS ((mm_segment_t){0}) -#define USER_DS ((mm_segment_t){1}) - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - /* * Note that since kernel addresses are in a separate address space on * parisc, we don't need to do anything for access_ok(). @@ -33,11 +25,11 @@ #define get_user __get_user #if !defined(CONFIG_64BIT) -#define LDD_USER(val, ptr) __get_user_asm64(val, ptr) -#define STD_USER(x, ptr) __put_user_asm64(x, ptr) +#define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) +#define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) #else -#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) -#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) +#define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) +#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) #endif /* @@ -67,28 +59,15 @@ struct exception_table_entry { #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) -/* - * load_sr2() preloads the space register %%sr2 - based on the value of - * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which - * is 0), or with the current value of %%sr3 to access user space (USER_DS) - * memory. The following __get_user_asm() and __put_user_asm() functions have - * %%sr2 hard-coded to access the requested memory. - */ -#define load_sr2() \ - __asm__(" or,= %0,%%r0,%%r0\n\t" \ - " mfsp %%sr3,%0\n\t" \ - " mtsp %0,%%sr2\n\t" \ - : : "r"(get_fs()) : ) - -#define __get_user_internal(val, ptr) \ +#define __get_user_internal(sr, val, ptr) \ ({ \ register long __gu_err __asm__ ("r8") = 0; \ \ switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm(val, "ldb", ptr); break; \ - case 2: __get_user_asm(val, "ldh", ptr); break; \ - case 4: __get_user_asm(val, "ldw", ptr); break; \ - case 8: LDD_USER(val, ptr); break; \ + case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ + case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ + case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ + case 8: LDD_USER(sr, val, ptr); break; \ default: BUILD_BUG(); \ } \ \ @@ -97,15 +76,14 @@ struct exception_table_entry { #define __get_user(val, ptr) \ ({ \ - load_sr2(); \ - __get_user_internal(val, ptr); \ + __get_user_internal("%%sr3,", val, ptr); \ }) -#define __get_user_asm(val, ldx, ptr) \ +#define __get_user_asm(sr, val, ldx, ptr) \ { \ register long __gu_val; \ \ - __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ + __asm__("1: " ldx " 0(" sr "%2),%0\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ @@ -114,9 +92,22 @@ struct exception_table_entry { (val) = (__force __typeof__(*(ptr))) __gu_val; \ } +#define HAVE_GET_KERNEL_NOFAULT +#define __get_kernel_nofault(dst, src, type, err_label) \ +{ \ + type __z; \ + long __err; \ + __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \ + if (unlikely(__err)) \ + goto err_label; \ + else \ + *(type *)(dst) = __z; \ +} + + #if !defined(CONFIG_64BIT) -#define __get_user_asm64(val, ptr) \ +#define __get_user_asm64(sr, val, ptr) \ { \ union { \ unsigned long long l; \ @@ -124,8 +115,8 @@ struct exception_table_entry { } __gu_tmp; \ \ __asm__(" copy %%r0,%R0\n" \ - "1: ldw 0(%%sr2,%2),%0\n" \ - "2: ldw 4(%%sr2,%2),%R0\n" \ + "1: ldw 0(" sr "%2),%0\n" \ + "2: ldw 4(" sr "%2),%R0\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ @@ -138,16 +129,16 @@ struct exception_table_entry { #endif /* !defined(CONFIG_64BIT) */ -#define __put_user_internal(x, ptr) \ +#define __put_user_internal(sr, x, ptr) \ ({ \ register long __pu_err __asm__ ("r8") = 0; \ __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm("stb", __x, ptr); break; \ - case 2: __put_user_asm("sth", __x, ptr); break; \ - case 4: __put_user_asm("stw", __x, ptr); break; \ - case 8: STD_USER(__x, ptr); break; \ + case 1: __put_user_asm(sr, "stb", __x, ptr); break; \ + case 2: __put_user_asm(sr, "sth", __x, ptr); break; \ + case 4: __put_user_asm(sr, "stw", __x, ptr); break; \ + case 8: STD_USER(sr, __x, ptr); break; \ default: BUILD_BUG(); \ } \ \ @@ -156,10 +147,20 @@ struct exception_table_entry { #define __put_user(x, ptr) \ ({ \ - load_sr2(); \ - __put_user_internal(x, ptr); \ + __put_user_internal("%%sr3,", x, ptr); \ }) +#define __put_kernel_nofault(dst, src, type, err_label) \ +{ \ + type __z = *(type *)(src); \ + long __err; \ + __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \ + if (unlikely(__err)) \ + goto err_label; \ +} + + + /* * The "__put_user/kernel_asm()" macros tell gcc they read from memory @@ -170,26 +171,26 @@ struct exception_table_entry { * r8 is already listed as err. */ -#define __put_user_asm(stx, x, ptr) \ - __asm__ __volatile__ ( \ - "1: " stx " %2,0(%%sr2,%1)\n" \ - "9:\n" \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ - : "=r"(__pu_err) \ +#define __put_user_asm(sr, stx, x, ptr) \ + __asm__ __volatile__ ( \ + "1: " stx " %2,0(" sr "%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + : "=r"(__pu_err) \ : "r"(ptr), "r"(x), "0"(__pu_err)) #if !defined(CONFIG_64BIT) -#define __put_user_asm64(__val, ptr) do { \ - __asm__ __volatile__ ( \ - "1: stw %2,0(%%sr2,%1)\n" \ - "2: stw %R2,4(%%sr2,%1)\n" \ - "9:\n" \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ - : "=r"(__pu_err) \ - : "r"(ptr), "r"(__val), "0"(__pu_err)); \ +#define __put_user_asm64(sr, __val, ptr) do { \ + __asm__ __volatile__ ( \ + "1: stw %2,0(" sr "%1)\n" \ + "2: stw %R2,4(" sr "%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ + : "=r"(__pu_err) \ + : "r"(ptr), "r"(__val), "0"(__pu_err)); \ } while (0) #endif /* !defined(CONFIG_64BIT) */ @@ -200,14 +201,12 @@ struct exception_table_entry { */ extern long strncpy_from_user(char *, const char __user *, long); -extern unsigned lclear_user(void __user *, unsigned long); -extern long lstrnlen_user(const char __user *, long); +extern __must_check unsigned lclear_user(void __user *, unsigned long); +extern __must_check long strnlen_user(const char __user *src, long n); /* * Complex access routines -- macros */ -#define user_addr_max() (~0UL) -#define strnlen_user lstrnlen_user #define clear_user lclear_user #define __clear_user lclear_user @@ -215,8 +214,6 @@ unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, unsigned long len); unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, unsigned long len); -unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, - unsigned long len); #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER