diff options
12 files changed, 26 insertions, 316 deletions
diff --git a/toolchain/musl/common.mk b/toolchain/musl/common.mk index 68098f5c6a..0f42a9eb60 100644 --- a/toolchain/musl/common.mk +++ b/toolchain/musl/common.mk @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk include $(INCLUDE_DIR)/target.mk PKG_NAME:=musl -PKG_VERSION:=1.1.24 -PKG_RELEASE:=3 +PKG_VERSION:=1.2.2 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://musl.libc.org/releases/ -PKG_HASH:=1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3 +PKG_HASH:=9b969322012d796dc23dda27a35866034fa67d8fb67e0e2c45c913c3d43219dd LIBC_SO_VERSION:=$(PKG_VERSION) PATCH_DIR:=$(PATH_PREFIX)/patches diff --git a/toolchain/musl/include/features.h b/toolchain/musl/include/features.h index edb8cc72d4..e801e2299a 100644 --- a/toolchain/musl/include/features.h +++ b/toolchain/musl/include/features.h @@ -1,10 +1,14 @@ #ifndef _FEATURES_H #define _FEATURES_H -#ifdef _ALL_SOURCE +#if defined(_ALL_SOURCE) && !defined(_GNU_SOURCE) #define _GNU_SOURCE 1 #endif +#if defined(_DEFAULT_SOURCE) && !defined(_BSD_SOURCE) +#define _BSD_SOURCE 1 +#endif + #if !defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE) \ && !defined(_XOPEN_SOURCE) && !defined(_GNU_SOURCE) \ && !defined(_BSD_SOURCE) && !defined(__STRICT_ANSI__) @@ -20,6 +24,8 @@ #if __STDC_VERSION__ >= 199901L || defined(__cplusplus) #define __inline inline +#elif !defined(__GNUC__) +#define __inline #endif #if __STDC_VERSION__ >= 201112L @@ -29,6 +35,8 @@ #define _Noreturn #endif +#define __REDIR(x,y) __typeof__(x) x __asm__(#y) + /* Convenience macros to test the versions of glibc and gcc. Use them like this: #if __GNUC_PREREQ (2,8) diff --git a/toolchain/musl/patches/110-read_timezone_from_fs.patch b/toolchain/musl/patches/110-read_timezone_from_fs.patch index f92781f7ed..a1f54db551 100644 --- a/toolchain/musl/patches/110-read_timezone_from_fs.patch +++ b/toolchain/musl/patches/110-read_timezone_from_fs.patch @@ -1,6 +1,6 @@ --- a/src/time/__tz.c +++ b/src/time/__tz.c -@@ -25,6 +25,9 @@ static int r0[5], r1[5]; +@@ -31,6 +31,9 @@ static int r0[5], r1[5]; static const unsigned char *zi, *trans, *index, *types, *abbrevs, *abbrevs_end; static size_t map_size; @@ -10,7 +10,7 @@ static char old_tz_buf[32]; static char *old_tz = old_tz_buf; static size_t old_tz_size = sizeof old_tz_buf; -@@ -125,6 +128,15 @@ static void do_tzset() +@@ -132,6 +135,15 @@ static void do_tzset() "/usr/share/zoneinfo/\0/share/zoneinfo/\0/etc/zoneinfo/\0"; s = getenv("TZ"); diff --git a/toolchain/musl/patches/200-add_libssp_nonshared.patch b/toolchain/musl/patches/200-add_libssp_nonshared.patch index 05bd2fe54a..26a9bfebea 100644 --- a/toolchain/musl/patches/200-add_libssp_nonshared.patch +++ b/toolchain/musl/patches/200-add_libssp_nonshared.patch @@ -7,7 +7,7 @@ Signed-off-by: Steven Barth <steven@midlink.org> --- a/Makefile +++ b/Makefile -@@ -66,7 +66,7 @@ CRT_LIBS = $(addprefix lib/,$(notdir $(C +@@ -67,7 +67,7 @@ CRT_LIBS = $(addprefix lib/,$(notdir $(C STATIC_LIBS = lib/libc.a SHARED_LIBS = lib/libc.so TOOL_LIBS = lib/musl-gcc.specs @@ -16,7 +16,7 @@ Signed-off-by: Steven Barth <steven@midlink.org> ALL_TOOLS = obj/musl-gcc WRAPCC_GCC = gcc -@@ -86,7 +86,7 @@ else +@@ -88,7 +88,7 @@ else all: $(ALL_LIBS) $(ALL_TOOLS) @@ -25,7 +25,7 @@ Signed-off-by: Steven Barth <steven@midlink.org> $(ALL_LIBS) $(ALL_TOOLS) $(ALL_OBJS) $(ALL_OBJS:%.o=%.lo) $(GENH) $(GENH_INT): | $(OBJ_DIRS) -@@ -113,6 +113,8 @@ obj/crt/rcrt1.o: $(srcdir)/ldso/dlstart. +@@ -115,6 +115,8 @@ obj/crt/rcrt1.o: $(srcdir)/ldso/dlstart. obj/crt/Scrt1.o obj/crt/rcrt1.o: CFLAGS_ALL += -fPIC @@ -34,7 +34,7 @@ Signed-off-by: Steven Barth <steven@midlink.org> OPTIMIZE_SRCS = $(wildcard $(OPTIMIZE_GLOBS:%=$(srcdir)/src/%)) $(OPTIMIZE_SRCS:$(srcdir)/%.c=obj/%.o) $(OPTIMIZE_SRCS:$(srcdir)/%.c=obj/%.lo): CFLAGS += -O3 -@@ -165,6 +167,11 @@ lib/libc.a: $(AOBJS) +@@ -167,6 +169,11 @@ lib/libc.a: $(AOBJS) $(AR) rc $@ $(AOBJS) $(RANLIB) $@ diff --git a/toolchain/musl/patches/300-relative.patch b/toolchain/musl/patches/300-relative.patch index e34e60a09d..7e1eb7d6bc 100644 --- a/toolchain/musl/patches/300-relative.patch +++ b/toolchain/musl/patches/300-relative.patch @@ -1,6 +1,6 @@ --- a/Makefile +++ b/Makefile -@@ -215,7 +215,7 @@ $(DESTDIR)$(includedir)/%: $(srcdir)/inc +@@ -217,7 +217,7 @@ $(DESTDIR)$(includedir)/%: $(srcdir)/inc $(INSTALL) -D -m 644 $< $@ $(DESTDIR)$(LDSO_PATHNAME): $(DESTDIR)$(libdir)/libc.so diff --git a/toolchain/musl/patches/500-0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch b/toolchain/musl/patches/500-0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch deleted file mode 100644 index d47f2f4108..0000000000 --- a/toolchain/musl/patches/500-0001-reorder-thread-list-unlink-in-pthread_exit-after-all.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 4d5aa20a94a2d3fae3e69289dc23ecafbd0c16c4 Mon Sep 17 00:00:00 2001 -From: Rich Felker <dalias@aerifal.cx> -Date: Fri, 22 May 2020 17:35:14 -0400 -Subject: [PATCH 1/4] reorder thread list unlink in pthread_exit after all - locks - -since the backend for LOCK() skips locking if single-threaded, it's -unsafe to make the process appear single-threaded before the last use -of lock. - -this fixes potential unsynchronized access to a linked list via -__dl_thread_cleanup. ---- - src/thread/pthread_create.c | 19 +++++++++++-------- - 1 file changed, 11 insertions(+), 8 deletions(-) - ---- a/src/thread/pthread_create.c -+++ b/src/thread/pthread_create.c -@@ -90,14 +90,7 @@ _Noreturn void __pthread_exit(void *resu - exit(0); - } - -- /* At this point we are committed to thread termination. Unlink -- * the thread from the list. This change will not be visible -- * until the lock is released, which only happens after SYS_exit -- * has been called, via the exit futex address pointing at the lock. */ -- libc.threads_minus_1--; -- self->next->prev = self->prev; -- self->prev->next = self->next; -- self->prev = self->next = self; -+ /* At this point we are committed to thread termination. */ - - /* Process robust list in userspace to handle non-pshared mutexes - * and the detached thread case where the robust list head will -@@ -121,6 +114,16 @@ _Noreturn void __pthread_exit(void *resu - __do_orphaned_stdio_locks(); - __dl_thread_cleanup(); - -+ /* Last, unlink thread from the list. This change will not be visible -+ * until the lock is released, which only happens after SYS_exit -+ * has been called, via the exit futex address pointing at the lock. -+ * This needs to happen after any possible calls to LOCK() that might -+ * skip locking if libc.threads_minus_1 is zero. */ -+ libc.threads_minus_1--; -+ self->next->prev = self->prev; -+ self->prev->next = self->next; -+ self->prev = self->next = self; -+ - /* This atomic potentially competes with a concurrent pthread_detach - * call; the loser is responsible for freeing thread resources. */ - int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING); diff --git a/toolchain/musl/patches/500-0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch b/toolchain/musl/patches/500-0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch deleted file mode 100644 index 4ca51b0be0..0000000000 --- a/toolchain/musl/patches/500-0002-don-t-use-libc.threads_minus_1-as-relaxed-atomic-for.patch +++ /dev/null @@ -1,69 +0,0 @@ -From e01b5939b38aea5ecbe41670643199825874b26c Mon Sep 17 00:00:00 2001 -From: Rich Felker <dalias@aerifal.cx> -Date: Thu, 21 May 2020 23:32:45 -0400 -Subject: [PATCH 2/4] don't use libc.threads_minus_1 as relaxed atomic for - skipping locks - -after all but the last thread exits, the next thread to observe -libc.threads_minus_1==0 and conclude that it can skip locking fails to -synchronize with any changes to memory that were made by the -last-exiting thread. this can produce data races. - -on some archs, at least x86, memory synchronization is unlikely to be -a problem; however, with the inline locks in malloc, skipping the lock -also eliminated the compiler barrier, and caused code that needed to -re-check chunk in-use bits after obtaining the lock to reuse a stale -value, possibly from before the process became single-threaded. this -in turn produced corruption of the heap state. - -some uses of libc.threads_minus_1 remain, especially for allocation of -new TLS in the dynamic linker; otherwise, it could be removed -entirely. it's made non-volatile to reflect that the remaining -accesses are only made under lock on the thread list. - -instead of libc.threads_minus_1, libc.threaded is now used for -skipping locks. the difference is that libc.threaded is permanently -true once an additional thread has been created. this will produce -some performance regression in processes that are mostly -single-threaded but occasionally creating threads. in the future it -may be possible to bring back the full lock-skipping, but more care -needs to be taken to produce a safe design. ---- - src/internal/libc.h | 2 +- - src/malloc/malloc.c | 2 +- - src/thread/__lock.c | 2 +- - 3 files changed, 3 insertions(+), 3 deletions(-) - ---- a/src/internal/libc.h -+++ b/src/internal/libc.h -@@ -21,7 +21,7 @@ struct __libc { - int can_do_threads; - int threaded; - int secure; -- volatile int threads_minus_1; -+ int threads_minus_1; - size_t *auxv; - struct tls_module *tls_head; - size_t tls_size, tls_align, tls_cnt; ---- a/src/malloc/malloc.c -+++ b/src/malloc/malloc.c -@@ -26,7 +26,7 @@ int __malloc_replaced; - - static inline void lock(volatile int *lk) - { -- if (libc.threads_minus_1) -+ if (libc.threaded) - while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); - } - ---- a/src/thread/__lock.c -+++ b/src/thread/__lock.c -@@ -18,7 +18,7 @@ - - void __lock(volatile int *l) - { -- if (!libc.threads_minus_1) return; -+ if (!libc.threaded) return; - /* fast path: INT_MIN for the lock, +1 for the congestion */ - int current = a_cas(l, 0, INT_MIN + 1); - if (!current) return; diff --git a/toolchain/musl/patches/500-0003-cut-down-size-of-some-libc-struct-members.patch b/toolchain/musl/patches/500-0003-cut-down-size-of-some-libc-struct-members.patch deleted file mode 100644 index 6650434397..0000000000 --- a/toolchain/musl/patches/500-0003-cut-down-size-of-some-libc-struct-members.patch +++ /dev/null @@ -1,25 +0,0 @@ -From f12888e9eb9eed60cc266b899dcafecb4752964a Mon Sep 17 00:00:00 2001 -From: Rich Felker <dalias@aerifal.cx> -Date: Fri, 22 May 2020 17:25:38 -0400 -Subject: [PATCH 3/4] cut down size of some libc struct members - -these are all flags that can be single-byte values. ---- - src/internal/libc.h | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - ---- a/src/internal/libc.h -+++ b/src/internal/libc.h -@@ -18,9 +18,9 @@ struct tls_module { - }; - - struct __libc { -- int can_do_threads; -- int threaded; -- int secure; -+ char can_do_threads; -+ char threaded; -+ char secure; - int threads_minus_1; - size_t *auxv; - struct tls_module *tls_head; diff --git a/toolchain/musl/patches/500-0004-restore-lock-skipping-for-processes-that-return-to-s.patch b/toolchain/musl/patches/500-0004-restore-lock-skipping-for-processes-that-return-to-s.patch deleted file mode 100644 index 83a6d0247a..0000000000 --- a/toolchain/musl/patches/500-0004-restore-lock-skipping-for-processes-that-return-to-s.patch +++ /dev/null @@ -1,90 +0,0 @@ -From 8d81ba8c0bc6fe31136cb15c9c82ef4c24965040 Mon Sep 17 00:00:00 2001 -From: Rich Felker <dalias@aerifal.cx> -Date: Fri, 22 May 2020 17:45:47 -0400 -Subject: [PATCH 4/4] restore lock-skipping for processes that return to - single-threaded state - -the design used here relies on the barrier provided by the first lock -operation after the process returns to single-threaded state to -synchronize with actions by the last thread that exited. by storing -the intent to change modes in the same object used to detect whether -locking is needed, it's possible to avoid an extra (possibly costly) -memory load after the lock is taken. ---- - src/internal/libc.h | 1 + - src/malloc/malloc.c | 5 ++++- - src/thread/__lock.c | 4 +++- - src/thread/pthread_create.c | 8 ++++---- - 4 files changed, 12 insertions(+), 6 deletions(-) - ---- a/src/internal/libc.h -+++ b/src/internal/libc.h -@@ -21,6 +21,7 @@ struct __libc { - char can_do_threads; - char threaded; - char secure; -+ volatile signed char need_locks; - int threads_minus_1; - size_t *auxv; - struct tls_module *tls_head; ---- a/src/malloc/malloc.c -+++ b/src/malloc/malloc.c -@@ -26,8 +26,11 @@ int __malloc_replaced; - - static inline void lock(volatile int *lk) - { -- if (libc.threaded) -+ int need_locks = libc.need_locks; -+ if (need_locks) { - while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); -+ if (need_locks < 0) libc.need_locks = 0; -+ } - } - - static inline void unlock(volatile int *lk) ---- a/src/thread/__lock.c -+++ b/src/thread/__lock.c -@@ -18,9 +18,11 @@ - - void __lock(volatile int *l) - { -- if (!libc.threaded) return; -+ int need_locks = libc.need_locks; -+ if (!need_locks) return; - /* fast path: INT_MIN for the lock, +1 for the congestion */ - int current = a_cas(l, 0, INT_MIN + 1); -+ if (need_locks < 0) libc.need_locks = 0; - if (!current) return; - /* A first spin loop, for medium congestion. */ - for (unsigned i = 0; i < 10; ++i) { ---- a/src/thread/pthread_create.c -+++ b/src/thread/pthread_create.c -@@ -118,8 +118,8 @@ _Noreturn void __pthread_exit(void *resu - * until the lock is released, which only happens after SYS_exit - * has been called, via the exit futex address pointing at the lock. - * This needs to happen after any possible calls to LOCK() that might -- * skip locking if libc.threads_minus_1 is zero. */ -- libc.threads_minus_1--; -+ * skip locking if process appears single-threaded. */ -+ if (!--libc.threads_minus_1) libc.need_locks = -1; - self->next->prev = self->prev; - self->prev->next = self->next; - self->prev = self->next = self; -@@ -339,7 +339,7 @@ int __pthread_create(pthread_t *restrict - ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long)))); - - __tl_lock(); -- libc.threads_minus_1++; -+ if (!libc.threads_minus_1++) libc.need_locks = 1; - ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock); - - /* All clone failures translate to EAGAIN. If explicit scheduling -@@ -363,7 +363,7 @@ int __pthread_create(pthread_t *restrict - new->next->prev = new; - new->prev->next = new; - } else { -- libc.threads_minus_1--; -+ if (!--libc.threads_minus_1) libc.need_locks = 0; - } - __tl_unlock(); - __restore_sigs(&set); diff --git a/toolchain/musl/patches/600-nftw-support-common-gnu-extension.patch b/toolchain/musl/patches/600-nftw-support-common-gnu-extension.patch index 81c96ad76c..2a7436cf84 100644 --- a/toolchain/musl/patches/600-nftw-support-common-gnu-extension.patch +++ b/toolchain/musl/patches/600-nftw-support-common-gnu-extension.patch @@ -32,9 +32,9 @@ Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com> +#define _GNU_SOURCE #include <ftw.h> #include <dirent.h> - #include <sys/stat.h> -@@ -63,8 +64,20 @@ static int do_nftw(char *path, int (*fn) - lev.base = k; + #include <fcntl.h> +@@ -72,8 +73,20 @@ static int do_nftw(char *path, int (*fn) + if (!fd_limit) close(dfd); } - if (!(flags & FTW_DEPTH) && (r=fn(path, &st, type, &lev))) @@ -56,7 +56,7 @@ Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com> for (; h; h = h->chain) if (h->dev == st.st_dev && h->ino == st.st_ino) -@@ -88,7 +101,10 @@ static int do_nftw(char *path, int (*fn) +@@ -101,7 +114,10 @@ static int do_nftw(char *path, int (*fn) strcpy(path+j+1, de->d_name); if ((r=do_nftw(path, fn, fd_limit-1, flags, &new))) { closedir(d); @@ -68,7 +68,7 @@ Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com> } } closedir(d); -@@ -98,8 +114,16 @@ static int do_nftw(char *path, int (*fn) +@@ -112,8 +128,16 @@ static int do_nftw(char *path, int (*fn) } path[l] = 0; @@ -87,7 +87,7 @@ Signed-off-by: Tony Ambardar <Tony.Ambardar@gmail.com> return 0; } -@@ -125,4 +149,5 @@ int nftw(const char *path, int (*fn)(con +@@ -139,4 +163,5 @@ int nftw(const char *path, int (*fn)(con return r; } diff --git a/toolchain/musl/patches/700-wcsnrtombs-cve-2020-28928.diff b/toolchain/musl/patches/700-wcsnrtombs-cve-2020-28928.diff deleted file mode 100644 index 5840dc1aac..0000000000 --- a/toolchain/musl/patches/700-wcsnrtombs-cve-2020-28928.diff +++ /dev/null @@ -1,63 +0,0 @@ ---- a/src/multibyte/wcsnrtombs.c -+++ b/src/multibyte/wcsnrtombs.c -@@ -1,41 +1,33 @@ - #include <wchar.h> -+#include <limits.h> -+#include <string.h> - - size_t wcsnrtombs(char *restrict dst, const wchar_t **restrict wcs, size_t wn, size_t n, mbstate_t *restrict st) - { -- size_t l, cnt=0, n2; -- char *s, buf[256]; - const wchar_t *ws = *wcs; -- const wchar_t *tmp_ws; -- -- if (!dst) s = buf, n = sizeof buf; -- else s = dst; -- -- while ( ws && n && ( (n2=wn)>=n || n2>32 ) ) { -- if (n2>=n) n2=n; -- tmp_ws = ws; -- l = wcsrtombs(s, &ws, n2, 0); -- if (!(l+1)) { -- cnt = l; -- n = 0; -+ size_t cnt = 0; -+ if (!dst) n=0; -+ while (ws && wn) { -+ char tmp[MB_LEN_MAX]; -+ size_t l = wcrtomb(n<MB_LEN_MAX ? tmp : dst, *ws, 0); -+ if (l==-1) { -+ cnt = -1; - break; - } -- if (s != buf) { -- s += l; -+ if (dst) { -+ if (n<MB_LEN_MAX) { -+ if (l>n) break; -+ memcpy(dst, tmp, l); -+ } -+ dst += l; - n -= l; - } -- wn = ws ? wn - (ws - tmp_ws) : 0; -- cnt += l; -- } -- if (ws) while (n && wn) { -- l = wcrtomb(s, *ws, 0); -- if ((l+1)<=1) { -- if (!l) ws = 0; -- else cnt = l; -+ if (!*ws) { -+ ws = 0; - break; - } -- ws++; wn--; -- /* safe - this loop runs fewer than sizeof(buf) times */ -- s+=l; n-=l; -+ ws++; -+ wn--; - cnt += l; - } - if (dst) *wcs = ws; diff --git a/toolchain/musl/patches/901-crypt_size_hack.patch b/toolchain/musl/patches/901-crypt_size_hack.patch index 75f196abca..667894a24f 100644 --- a/toolchain/musl/patches/901-crypt_size_hack.patch +++ b/toolchain/musl/patches/901-crypt_size_hack.patch @@ -43,7 +43,7 @@ typedef uint32_t BF_word; typedef int32_t BF_word_signed; -@@ -796,3 +807,4 @@ char *__crypt_blowfish(const char *key, +@@ -804,3 +815,4 @@ char *__crypt_blowfish(const char *key, return "*"; } |