aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/usercopy.c
blob: b79202bc98fe111cb8876e5c16d605e2e99a958a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
/* 
 * User address space access functions.
 *
 * Copyright 1997 Andi Kleen <ak@muc.de>
 * Copyright 1997 Linus Torvalds
 * Copyright 2002 Andi Kleen <ak@suse.de>
 */

#include <xen/lib.h>
#include <xen/sched.h>
#include <asm/uaccess.h>

unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
{
    unsigned long __d0, __d1, __d2, __n = n;

    asm volatile (
        "    cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
        "    jbe  1f\n"
        "    mov  %1,%0\n"
        "    neg  %0\n"
        "    and  $"STR(BYTES_PER_LONG-1)",%0\n"
        "    sub  %0,%3\n"
        "4:  rep movsb\n" /* make 'to' address aligned */
        "    mov  %3,%0\n"
        "    shr  $"STR(LONG_BYTEORDER)",%0\n"
        "    and  $"STR(BYTES_PER_LONG-1)",%3\n"
        "    .align 2,0x90\n"
        "0:  rep movs"__OS"\n" /* as many words as possible... */
        "    mov  %3,%0\n"
        "1:  rep movsb\n" /* ...remainder copied as bytes */
        "2:\n"
        ".section .fixup,\"ax\"\n"
        "5:  add %3,%0\n"
        "    jmp 2b\n"
        "3:  lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
        "    jmp 2b\n"
        ".previous\n"
        _ASM_EXTABLE(4b, 5b)
        _ASM_EXTABLE(0b, 3b)
        _ASM_EXTABLE(1b, 2b)
        : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
        : "0" (__n), "1" (to), "2" (from), "3" (__n)
        : "memory" );

    return __n;
}

unsigned long
__copy_from_user_ll(void *to, const void __user *from, unsigned n)
{
    unsigned long __d0, __d1, __d2, __n = n;

    asm volatile (
        "    cmp  $"STR(2*BYTES_PER_LONG-1)",%0\n"
        "    jbe  1f\n"
        "    mov  %1,%0\n"
        "    neg  %0\n"
        "    and  $"STR(BYTES_PER_LONG-1)",%0\n"
        "    sub  %0,%3\n"
        "4:  rep; movsb\n" /* make 'to' address aligned */
        "    mov  %3,%0\n"
        "    shr  $"STR(LONG_BYTEORDER)",%0\n"
        "    and  $"STR(BYTES_PER_LONG-1)",%3\n"
        "    .align 2,0x90\n"
        "0:  rep; movs"__OS"\n" /* as many words as possible... */
        "    mov  %3,%0\n"
        "1:  rep; movsb\n" /* ...remainder copied as bytes */
        "2:\n"
        ".section .fixup,\"ax\"\n"
        "5:  add %3,%0\n"
        "    jmp 6f\n"
        "3:  lea 0(%3,%0,"STR(BYTES_PER_LONG)"),%0\n"
        "6:  push %0\n"
        "    push %%"__OP"ax\n"
        "    xor  %%eax,%%eax\n"
        "    rep; stosb\n"
        "    pop  %%"__OP"ax\n"
        "    pop  %0\n"
        "    jmp 2b\n"
        ".previous\n"
        _ASM_EXTABLE(4b, 5b)
        _ASM_EXTABLE(0b, 3b)
        _ASM_EXTABLE(1b, 6b)
        : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
        : "0" (__n), "1" (to), "2" (from), "3" (__n)
        : "memory" );

    return __n;
}

/**
 * copy_to_user: - Copy a block of data into user space.
 * @to:   Destination address, in user space.
 * @from: Source address, in kernel space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from kernel space to user space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 */
unsigned long
copy_to_user(void __user *to, const void *from, unsigned n)
{
    if ( access_ok(to, n) )
        n = __copy_to_user(to, from, n);
    return n;
}

#define __do_clear_user(addr,size)					\
do {									\
	long __d0;							\
	__asm__ __volatile__(						\
		"0:	rep; stosl\n"					\
		"	movl %2,%0\n"					\
		"1:	rep; stosb\n"					\
		"2:\n"							\
		".section .fixup,\"ax\"\n"				\
		"3:	lea 0(%2,%0,4),%0\n"				\
		"	jmp 2b\n"					\
		".previous\n"						\
		_ASM_EXTABLE(0b,3b)					\
		_ASM_EXTABLE(1b,2b)					\
		: "=&c"(size), "=&D" (__d0)				\
		: "r"(size & 3), "0"(size / 4), "1"((long)addr), "a"(0));	\
} while (0)

/**
 * clear_user: - Zero a block of memory in user space.
 * @to:   Destination address, in user space.
 * @n:    Number of bytes to zero.
 *
 * Zero a block of memory in user space.
 *
 * Returns number of bytes that could not be cleared.
 * On success, this will be zero.
 */
unsigned long
clear_user(void __user *to, unsigned n)
{
	if ( access_ok(to, n) )
		__do_clear_user(to, n);
	return n;
}

/**
 * copy_from_user: - Copy a block of data from user space.
 * @to:   Destination address, in kernel space.
 * @from: Source address, in user space.
 * @n:    Number of bytes to copy.
 *
 * Context: User context only.  This function may sleep.
 *
 * Copy data from user space to kernel space.
 *
 * Returns number of bytes that could not be copied.
 * On success, this will be zero.
 *
 * If some data could not be copied, this function will pad the copied
 * data to the requested size using zero bytes.
 */
unsigned long
copy_from_user(void *to, const void __user *from, unsigned n)
{
    if ( access_ok(from, n) )
        n = __copy_from_user(to, from, n);
    else
        memset(to, 0, n);
    return n;
}

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */