diff options
author | Konstantin Belousov <kib@FreeBSD.org> | 2022-08-30 15:46:30 +0300 |
---|---|---|
committer | Konstantin Belousov <kib@FreeBSD.org> | 2022-09-04 07:28:02 +0300 |
commit | a687683b997c5805ecd6d8278798b7ef00d9908f (patch) | |
tree | 6ff543e7d67329a274361142d4b80b72dd0778cf /libexec | |
parent | a486fbbd7876bed81d738a32274953c89906edb5 (diff) |
rtld: mask signals for default read locks
Rtld locks from libthr defer signals delivery, which means that binding
is not possible while a signal handler is executed.
Binding might upgrade read-locked rtld_bind_lock to write-lock, if
symbol resolution requires loading filters. If a signal would be delivered
while rtld is in read-locked section, and signal handler needs binding
which upgrades the lock, for non-threaded image that uses default rtld
locks, we get the rtld data structures modified under the top-level
active rtld frame.
To correct the problem, mask signals for read-locking of default locks
in addition to the write-locking. It is very cheap now with
sigfastblock(2).
Note that the global state is used to track pre-locked state of either
sigfastblock(2) or signal mask (if sigfastblock(2) is administratively
disabled). It is fine for non-threaded images since there are no other
threads. But I believe that it is fine for threaded images using libc_r
as well, since masking signals disables preemption (I did not tested
it).
NetBSD PR: https://gnats.netbsd.org/56979
Reported by: tmunro
Reviewed by: markj
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential revision: https://reviews.freebsd.org/D36396
Diffstat (limited to 'libexec')
-rw-r--r-- | libexec/rtld-elf/rtld_lock.c | 60 |
1 files changed, 36 insertions, 24 deletions
diff --git a/libexec/rtld-elf/rtld_lock.c b/libexec/rtld-elf/rtld_lock.c index e501c03f0722..8b9a6a51e061 100644 --- a/libexec/rtld-elf/rtld_lock.c +++ b/libexec/rtld-elf/rtld_lock.c @@ -125,16 +125,6 @@ def_lock_destroy(void *lock) } static void -def_rlock_acquire(void *lock) -{ - Lock *l = (Lock *)lock; - - atomic_add_acq_int(&l->lock, RC_INCR); - while (l->lock & WAFLAG) - ; /* Spin */ -} - -static void sig_fastunblock(void) { uint32_t oldval; @@ -145,24 +135,37 @@ sig_fastunblock(void) __sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL); } +static bool +def_lock_acquire_set(Lock *l, bool wlock) +{ + if (wlock) { + if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + return (true); + } else { + atomic_add_acq_int(&l->lock, RC_INCR); + if ((l->lock & WAFLAG) == 0) + return (true); + atomic_add_int(&l->lock, -RC_INCR); + } + return (false); +} + static void -def_wlock_acquire(void *lock) +def_lock_acquire(Lock *l, bool wlock) { - Lock *l; sigset_t tmp_oldsigmask; - l = (Lock *)lock; if (ld_fast_sigblock) { for (;;) { atomic_add_32(&fsigblock, SIGFASTBLOCK_INC); - if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + if (def_lock_acquire_set(l, wlock)) break; sig_fastunblock(); } } else { for (;;) { sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask); - if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + if (def_lock_acquire_set(l, wlock)) break; sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL); } @@ -172,20 +175,29 @@ def_wlock_acquire(void *lock) } static void +def_rlock_acquire(void *lock) +{ + def_lock_acquire(lock, false); +} + +static void +def_wlock_acquire(void *lock) +{ + def_lock_acquire(lock, true); +} + +static void def_lock_release(void *lock) { Lock *l; l = (Lock *)lock; - if ((l->lock & WAFLAG) == 0) - atomic_add_rel_int(&l->lock, -RC_INCR); - else { - atomic_add_rel_int(&l->lock, -WAFLAG); - if (ld_fast_sigblock) - sig_fastunblock(); - else if (atomic_fetchadd_int(&wnested, -1) == 1) - sigprocmask(SIG_SETMASK, &oldsigmask, NULL); - } + atomic_add_rel_int(&l->lock, -((l->lock & WAFLAG) == 0 ? + RC_INCR : WAFLAG)); + if (ld_fast_sigblock) + sig_fastunblock(); + else if (atomic_fetchadd_int(&wnested, -1) == 1) + sigprocmask(SIG_SETMASK, &oldsigmask, NULL); } static int |