1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
/*
* User address space access functions.
*
* Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
#include <xen/config.h>
#include <xen/lib.h>
#include <asm/uaccess.h>
unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
{
unsigned long __d0, __d1, __d2, __n = n;
__asm__ __volatile__(
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
" mov %1,%0\n"
" neg %0\n"
" and $"STR(BYTES_PER_LONG-1)",%0\n"
" sub %0,%3\n"
"4: rep; movsb\n" /* make 'to' address aligned */
" mov %3,%0\n"
" shr $"STR(LONG_BYTEORDER)",%0\n"
" and $"STR(BYTES_PER_LONG-1)",%3\n"
" .align 2,0x90\n"
"0: rep; movs"__OS"\n" /* as many words as possible... */
" mov %3,%0\n"
"1: rep; movsb\n" /* ...remainder copied as bytes */
"2:\n"
".section .fixup,\"ax\"\n"
"5: add %3,%0\n"
" jmp 2b\n"
"3: lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" "__FIXUP_ALIGN"\n"
" "__FIXUP_WORD" 4b,5b\n"
" "__FIXUP_WORD" 0b,3b\n"
" "__FIXUP_WORD" 1b,2b\n"
".previous"
: "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
: "3"(__n), "0"(__n), "1"(to), "2"(from)
: "memory");
return (unsigned)__n;
}
unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned n)
{
unsigned long __d0, __d1, __d2, __n = n;
__asm__ __volatile__(
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
" mov %1,%0\n"
" neg %0\n"
" and $"STR(BYTES_PER_LONG-1)",%0\n"
" sub %0,%3\n"
"4: rep; movsb\n" /* make 'to' address aligned */
" mov %3,%0\n"
" shr $"STR(LONG_BYTEORDER)",%0\n"
" and $"STR(BYTES_PER_LONG-1)",%3\n"
" .align 2,0x90\n"
"0: rep; movs"__OS"\n" /* as many words as possible... */
" mov %3,%0\n"
"1: rep; movsb\n" /* ...remainder copied as bytes */
"2:\n"
".section .fixup,\"ax\"\n"
"5: add %3,%0\n"
" jmp 6f\n"
"3: lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
"6: push %0\n"
" push %%"__OP"ax\n"
" xor %%eax,%%eax\n"
" rep; stosb\n"
" pop %%"__OP"ax\n"
" pop %0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" "__FIXUP_ALIGN"\n"
" "__FIXUP_WORD" 4b,5b\n"
" "__FIXUP_WORD" 0b,3b\n"
" "__FIXUP_WORD" 1b,6b\n"
".previous"
: "=&c"(__n), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
: "3"(__n), "0"(__n), "1"(to), "2"(from)
: "memory");
return (unsigned)__n;
}
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
* @from: Source address, in kernel space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
unsigned long
copy_to_user(void __user *to, const void *from, unsigned n)
{
if (access_ok(to, n))
n = __copy_to_user(to, from, n);
return n;
}
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
* @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space.
*
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
unsigned long
copy_from_user(void *to, const void __user *from, unsigned n)
{
if (access_ok(from, n))
n = __copy_from_user(to, from, n);
else
memset(to, 0, n);
return n;
}
|