aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.4/210-arm64-sve-Disentangle-uapi-asm-ptrace.h-from-uapi-as.patch
blob: 7c574fd34339df895a61deb71ee7efa4cce09ff2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
From 9966a05c7b80f075f2bc7e48dbb108d3f2927234 Mon Sep 17 00:00:00 2001
From: Dave Martin <Dave.Martin@arm.com>
Date: Fri, 4 Jan 2019 13:09:51 +0000
Subject: [PATCH] arm64/sve: Disentangle <uapi/asm/ptrace.h> from
 <uapi/asm/sigcontext.h>

Currently, <uapi/asm/sigcontext.h> provides common definitions for
describing SVE context structures that are also used by the ptrace
definitions in <uapi/asm/ptrace.h>.

For this reason, a #include of <asm/sigcontext.h> was added in
ptrace.h, but it this turns out that this can interact badly with
userspace code that tries to include ptrace.h on top of the libc
headers (which may provide their own shadow definitions for
sigcontext.h).

To make the headers easier for userspace to consume, this patch
bounces the common definitions into an __SVE_* namespace and moves
them to a backend header <uapi/asm/sve_context.h> that can be
included by the other headers as appropriate.  This should allow
ptrace.h to be used alongside libc's sigcontext.h (if any) without
ill effects.

This should make the situation unambiguous: <asm/sigcontext.h> is
the header to include for the sigframe-specific definitions, while
<asm/ptrace.h> is the header to include for ptrace-specific
definitions.

To avoid conflicting with existing usage, <asm/sigcontext.h>
remains the canonical way to get the common definitions for
SVE_VQ_MIN, sve_vq_from_vl() etc., both in userspace and in the
kernel: relying on these being defined as a side effect of
including just <asm/ptrace.h> was never intended to be safe.

Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
---
 arch/arm64/include/uapi/asm/ptrace.h      | 39 ++++++++--------
 arch/arm64/include/uapi/asm/sigcontext.h  | 56 +++++++++++------------
 arch/arm64/include/uapi/asm/sve_context.h | 53 +++++++++++++++++++++
 3 files changed, 99 insertions(+), 49 deletions(-)
 create mode 100644 arch/arm64/include/uapi/asm/sve_context.h

--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -23,7 +23,7 @@
 #include <linux/types.h>
 
 #include <asm/hwcap.h>
-#include <asm/sigcontext.h>
+#include <asm/sve_context.h>
 
 
 /*
@@ -129,9 +129,9 @@ struct user_sve_header {
  */
 
 /* Offset from the start of struct user_sve_header to the register data */
-#define SVE_PT_REGS_OFFSET					\
-	((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+#define SVE_PT_REGS_OFFSET						\
+	((sizeof(struct user_sve_header) + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
 /*
  * The register data content and layout depends on the value of the
@@ -177,39 +177,36 @@ struct user_sve_header {
  * Additional data might be appended in the future.
  */
 
-#define SVE_PT_SVE_ZREG_SIZE(vq)	SVE_SIG_ZREG_SIZE(vq)
-#define SVE_PT_SVE_PREG_SIZE(vq)	SVE_SIG_PREG_SIZE(vq)
-#define SVE_PT_SVE_FFR_SIZE(vq)		SVE_SIG_FFR_SIZE(vq)
+#define SVE_PT_SVE_ZREG_SIZE(vq)	__SVE_ZREG_SIZE(vq)
+#define SVE_PT_SVE_PREG_SIZE(vq)	__SVE_PREG_SIZE(vq)
+#define SVE_PT_SVE_FFR_SIZE(vq)		__SVE_FFR_SIZE(vq)
 #define SVE_PT_SVE_FPSR_SIZE		sizeof(__u32)
 #define SVE_PT_SVE_FPCR_SIZE		sizeof(__u32)
 
-#define __SVE_SIG_TO_PT(offset) \
-	((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
-
 #define SVE_PT_SVE_OFFSET		SVE_PT_REGS_OFFSET
 
 #define SVE_PT_SVE_ZREGS_OFFSET \
-	__SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
+	(SVE_PT_REGS_OFFSET + __SVE_ZREGS_OFFSET)
 #define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
-	__SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
+	(SVE_PT_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
 #define SVE_PT_SVE_ZREGS_SIZE(vq) \
-	(SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
+	(SVE_PT_SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
 
 #define SVE_PT_SVE_PREGS_OFFSET(vq) \
-	__SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
+	(SVE_PT_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
 #define SVE_PT_SVE_PREG_OFFSET(vq, n) \
-	__SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
+	(SVE_PT_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
 #define SVE_PT_SVE_PREGS_SIZE(vq) \
-	(SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \
+	(SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \
 		SVE_PT_SVE_PREGS_OFFSET(vq))
 
 #define SVE_PT_SVE_FFR_OFFSET(vq) \
-	__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
+	(SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
 
 #define SVE_PT_SVE_FPSR_OFFSET(vq)				\
 	((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) +	\
-			(SVE_VQ_BYTES - 1))			\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+			(__SVE_VQ_BYTES - 1))			\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 #define SVE_PT_SVE_FPCR_OFFSET(vq) \
 	(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
 
@@ -220,8 +217,8 @@ struct user_sve_header {
 
 #define SVE_PT_SVE_SIZE(vq, flags)					\
 	((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE		\
-			- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+			- SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
 #define SVE_PT_SIZE(vq, flags)						\
 	 (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ?		\
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -130,6 +130,8 @@ struct sve_context {
 
 #endif /* !__ASSEMBLY__ */
 
+#include <asm/sve_context.h>
+
 /*
  * The SVE architecture leaves space for future expansion of the
  * vector length beyond its initial architectural limit of 2048 bits
@@ -138,21 +140,20 @@ struct sve_context {
  * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
  * terminology.
  */
-#define SVE_VQ_BYTES		16	/* number of bytes per quadword */
+#define SVE_VQ_BYTES		__SVE_VQ_BYTES	/* bytes per quadword */
 
-#define SVE_VQ_MIN		1
-#define SVE_VQ_MAX		512
+#define SVE_VQ_MIN		__SVE_VQ_MIN
+#define SVE_VQ_MAX		__SVE_VQ_MAX
 
-#define SVE_VL_MIN		(SVE_VQ_MIN * SVE_VQ_BYTES)
-#define SVE_VL_MAX		(SVE_VQ_MAX * SVE_VQ_BYTES)
+#define SVE_VL_MIN		__SVE_VL_MIN
+#define SVE_VL_MAX		__SVE_VL_MAX
 
-#define SVE_NUM_ZREGS		32
-#define SVE_NUM_PREGS		16
+#define SVE_NUM_ZREGS		__SVE_NUM_ZREGS
+#define SVE_NUM_PREGS		__SVE_NUM_PREGS
 
-#define sve_vl_valid(vl) \
-	((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
-#define sve_vq_from_vl(vl)	((vl) / SVE_VQ_BYTES)
-#define sve_vl_from_vq(vq)	((vq) * SVE_VQ_BYTES)
+#define sve_vl_valid(vl)	__sve_vl_valid(vl)
+#define sve_vq_from_vl(vl)	__sve_vq_from_vl(vl)
+#define sve_vl_from_vq(vq)	__sve_vl_from_vq(vq)
 
 /*
  * If the SVE registers are currently live for the thread at signal delivery,
@@ -205,34 +206,33 @@ struct sve_context {
  * Additional data might be appended in the future.
  */
 
-#define SVE_SIG_ZREG_SIZE(vq)	((__u32)(vq) * SVE_VQ_BYTES)
-#define SVE_SIG_PREG_SIZE(vq)	((__u32)(vq) * (SVE_VQ_BYTES / 8))
-#define SVE_SIG_FFR_SIZE(vq)	SVE_SIG_PREG_SIZE(vq)
+#define SVE_SIG_ZREG_SIZE(vq)	__SVE_ZREG_SIZE(vq)
+#define SVE_SIG_PREG_SIZE(vq)	__SVE_PREG_SIZE(vq)
+#define SVE_SIG_FFR_SIZE(vq)	__SVE_FFR_SIZE(vq)
 
 #define SVE_SIG_REGS_OFFSET					\
-	((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1))	\
-		/ SVE_VQ_BYTES * SVE_VQ_BYTES)
+	((sizeof(struct sve_context) + (__SVE_VQ_BYTES - 1))	\
+		/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
 
-#define SVE_SIG_ZREGS_OFFSET	SVE_SIG_REGS_OFFSET
+#define SVE_SIG_ZREGS_OFFSET \
+		(SVE_SIG_REGS_OFFSET + __SVE_ZREGS_OFFSET)
 #define SVE_SIG_ZREG_OFFSET(vq, n) \
-	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
-#define SVE_SIG_ZREGS_SIZE(vq) \
-	(SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
+		(SVE_SIG_REGS_OFFSET + __SVE_ZREG_OFFSET(vq, n))
+#define SVE_SIG_ZREGS_SIZE(vq) __SVE_ZREGS_SIZE(vq)
 
 #define SVE_SIG_PREGS_OFFSET(vq) \
-	(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_PREGS_OFFSET(vq))
 #define SVE_SIG_PREG_OFFSET(vq, n) \
-	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
-#define SVE_SIG_PREGS_SIZE(vq) \
-	(SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_PREG_OFFSET(vq, n))
+#define SVE_SIG_PREGS_SIZE(vq) __SVE_PREGS_SIZE(vq)
 
 #define SVE_SIG_FFR_OFFSET(vq) \
-	(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
+		(SVE_SIG_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
 
 #define SVE_SIG_REGS_SIZE(vq) \
-	(SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
-
-#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
+		(__SVE_FFR_OFFSET(vq) + __SVE_FFR_SIZE(vq))
 
+#define SVE_SIG_CONTEXT_SIZE(vq) \
+		(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
 
 #endif /* _UAPI__ASM_SIGCONTEXT_H */
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/sve_context.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017-2018 ARM Limited */
+
+/*
+ * For use by other UAPI headers only.
+ * Do not make direct use of header or its definitions.
+ */
+
+#ifndef _UAPI__ASM_SVE_CONTEXT_H
+#define _UAPI__ASM_SVE_CONTEXT_H
+
+#include <linux/types.h>
+
+#define __SVE_VQ_BYTES		16	/* number of bytes per quadword */
+
+#define __SVE_VQ_MIN		1
+#define __SVE_VQ_MAX		512
+
+#define __SVE_VL_MIN		(__SVE_VQ_MIN * __SVE_VQ_BYTES)
+#define __SVE_VL_MAX		(__SVE_VQ_MAX * __SVE_VQ_BYTES)
+
+#define __SVE_NUM_ZREGS		32
+#define __SVE_NUM_PREGS		16
+
+#define __sve_vl_valid(vl)			\
+	((vl) % __SVE_VQ_BYTES == 0 &&		\
+	 (vl) >= __SVE_VL_MIN &&		\
+	 (vl) <= __SVE_VL_MAX)
+
+#define __sve_vq_from_vl(vl)	((vl) / __SVE_VQ_BYTES)
+#define __sve_vl_from_vq(vq)	((vq) * __SVE_VQ_BYTES)
+
+#define __SVE_ZREG_SIZE(vq)	((__u32)(vq) * __SVE_VQ_BYTES)
+#define __SVE_PREG_SIZE(vq)	((__u32)(vq) * (__SVE_VQ_BYTES / 8))
+#define __SVE_FFR_SIZE(vq)	__SVE_PREG_SIZE(vq)
+
+#define __SVE_ZREGS_OFFSET	0
+#define __SVE_ZREG_OFFSET(vq, n) \
+	(__SVE_ZREGS_OFFSET + __SVE_ZREG_SIZE(vq) * (n))
+#define __SVE_ZREGS_SIZE(vq) \
+	(__SVE_ZREG_OFFSET(vq, __SVE_NUM_ZREGS) - __SVE_ZREGS_OFFSET)
+
+#define __SVE_PREGS_OFFSET(vq) \
+	(__SVE_ZREGS_OFFSET + __SVE_ZREGS_SIZE(vq))
+#define __SVE_PREG_OFFSET(vq, n) \
+	(__SVE_PREGS_OFFSET(vq) + __SVE_PREG_SIZE(vq) * (n))
+#define __SVE_PREGS_SIZE(vq) \
+	(__SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - __SVE_PREGS_OFFSET(vq))
+
+#define __SVE_FFR_OFFSET(vq) \
+	(__SVE_PREGS_OFFSET(vq) + __SVE_PREGS_SIZE(vq))
+
+#endif /* ! _UAPI__ASM_SVE_CONTEXT_H */