summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-03-02 02:10:32 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-03-02 02:10:32 +0000
commit116c75a12e3632fcc22023fd19ebc5845e00fab9 (patch)
tree1cf815962aa35f1780272d629873f63af25167f4
parent70a6680e4f5dd8ccb642948c9889d4f516766602 (diff)
parent6ca68eb96d6bdea3f7c565168e5b93ea1a234a33 (diff)
downloadart-android13-mainline-networking-release.tar.gz
Snap for 9671667 from 6ca68eb96d6bdea3f7c565168e5b93ea1a234a33 to mainline-networking-releaseaml_net_331812010aml_net_331710000android13-mainline-networking-release
Change-Id: Ib7f4c5fbfbc6958911550a9cf5e68b545356f6cd
-rw-r--r--libartbase/base/metrics/metrics.h48
-rw-r--r--runtime/gc/collector/mark_compact.cc18
-rw-r--r--runtime/gc/heap_test.cc145
-rw-r--r--sigchainlib/sigchain.cc92
4 files changed, 279 insertions, 24 deletions
diff --git a/libartbase/base/metrics/metrics.h b/libartbase/base/metrics/metrics.h
index 9d92ed904c..fd0ae54dac 100644
--- a/libartbase/base/metrics/metrics.h
+++ b/libartbase/base/metrics/metrics.h
@@ -29,6 +29,7 @@
#include "android-base/logging.h"
#include "base/bit_utils.h"
+#include "base/macros.h"
#include "base/time_utils.h"
#include "tinyxml2.h"
@@ -106,6 +107,17 @@ class Runtime;
struct RuntimeArgumentMap;
namespace metrics {
+template <typename value_t>
+class MetricsBase;
+} // namespace metrics
+
+namespace gc {
+class HeapTest_GCMetrics_Test;
+template <typename T>
+bool AnyIsNonNull(const metrics::MetricsBase<T>* x, const metrics::MetricsBase<T>* y);
+} // namespace gc
+
+namespace metrics {
/**
* An enumeration of all ART counters and histograms.
@@ -285,6 +297,15 @@ class MetricsBase {
public:
virtual void Add(value_t value) = 0;
virtual ~MetricsBase() { }
+
+ private:
+ // Is the metric "null", i.e. never updated or freshly reset?
+ // Used for testing purpose only.
+ virtual bool IsNull() const = 0;
+
+ ART_FRIEND_TEST(gc::HeapTest, GCMetrics);
+ template <typename T>
+ friend bool gc::AnyIsNonNull(const MetricsBase<T>* x, const MetricsBase<T>* y);
};
template <DatumId counter_type, typename T = uint64_t>
@@ -300,7 +321,9 @@ class MetricsCounter : public MetricsBase<T> {
}
void AddOne() { Add(1u); }
- void Add(value_t value) { value_.fetch_add(value, std::memory_order::memory_order_relaxed); }
+ void Add(value_t value) override {
+ value_.fetch_add(value, std::memory_order::memory_order_relaxed);
+ }
void Report(const std::vector<MetricsBackend*>& backends) const {
for (MetricsBackend* backend : backends) {
@@ -313,6 +336,8 @@ class MetricsCounter : public MetricsBase<T> {
value_t Value() const { return value_.load(std::memory_order::memory_order_relaxed); }
private:
+ bool IsNull() const override { return Value() == 0; }
+
std::atomic<value_t> value_;
static_assert(std::atomic<value_t>::is_always_lock_free);
@@ -341,7 +366,7 @@ class MetricsAverage final : public MetricsCounter<datum_id, T> {
// 1. The metric eventually becomes consistent.
// 2. For sufficiently large count_, a few data points which are off shouldn't
// make a huge difference to the reporter.
- void Add(value_t value) {
+ void Add(value_t value) override {
MetricsCounter<datum_id, value_t>::Add(value);
count_.fetch_add(1, std::memory_order::memory_order_release);
}
@@ -363,6 +388,10 @@ class MetricsAverage final : public MetricsCounter<datum_id, T> {
}
private:
+ count_t Count() const { return count_.load(std::memory_order::memory_order_relaxed); }
+
+ bool IsNull() const override { return Count() == 0; }
+
std::atomic<count_t> count_;
static_assert(std::atomic<count_t>::is_always_lock_free);
@@ -397,6 +426,10 @@ class MetricsDeltaCounter : public MetricsBase<T> {
void Reset() { value_ = 0; }
private:
+ value_t Value() const { return value_.load(std::memory_order::memory_order_relaxed); }
+
+ bool IsNull() const override { return Value() == 0; }
+
std::atomic<value_t> value_;
static_assert(std::atomic<value_t>::is_always_lock_free);
@@ -422,7 +455,7 @@ class MetricsHistogram final : public MetricsBase<int64_t> {
== RoundUp(sizeof(intptr_t) + sizeof(value_t) * num_buckets_, sizeof(uint64_t)));
}
- void Add(int64_t value) {
+ void Add(int64_t value) override {
const size_t i = FindBucketId(value);
buckets_[i].fetch_add(1u, std::memory_order::memory_order_relaxed);
}
@@ -462,6 +495,11 @@ class MetricsHistogram final : public MetricsBase<int64_t> {
return std::vector<value_t>{buckets_.begin(), buckets_.end()};
}
+ bool IsNull() const override {
+ std::vector<value_t> buckets = GetBuckets();
+ return std::all_of(buckets.cbegin(), buckets.cend(), [](value_t i) { return i == 0; });
+ }
+
std::array<std::atomic<value_t>, num_buckets_> buckets_;
static_assert(std::atomic<value_t>::is_always_lock_free);
@@ -479,7 +517,7 @@ class MetricsAccumulator final : MetricsBase<T> {
RoundUp(sizeof(intptr_t) + sizeof(T), sizeof(uint64_t)));
}
- void Add(T value) {
+ void Add(T value) override {
T current = value_.load(std::memory_order::memory_order_relaxed);
T new_value;
do {
@@ -505,6 +543,8 @@ class MetricsAccumulator final : MetricsBase<T> {
private:
T Value() const { return value_.load(std::memory_order::memory_order_relaxed); }
+ bool IsNull() const override { return Value() == 0; }
+
std::atomic<T> value_;
friend class ArtMetrics;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index f262b66f7a..8aba47fb3f 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -353,6 +353,24 @@ MarkCompact::MarkCompact(Heap* heap)
CHECK_EQ(*conc_compaction_termination_page_, 0);
// In most of the cases, we don't expect more than one LinearAlloc space.
linear_alloc_spaces_data_.reserve(1);
+
+ // Initialize GC metrics.
+ metrics::ArtMetrics* metrics = GetMetrics();
+ // The mark-compact collector supports only full-heap collections at the moment.
+ gc_time_histogram_ = metrics->FullGcCollectionTime();
+ metrics_gc_count_ = metrics->FullGcCount();
+ metrics_gc_count_delta_ = metrics->FullGcCountDelta();
+ gc_throughput_histogram_ = metrics->FullGcThroughput();
+ gc_tracing_throughput_hist_ = metrics->FullGcTracingThroughput();
+ gc_throughput_avg_ = metrics->FullGcThroughputAvg();
+ gc_tracing_throughput_avg_ = metrics->FullGcTracingThroughputAvg();
+ gc_scanned_bytes_ = metrics->FullGcScannedBytes();
+ gc_scanned_bytes_delta_ = metrics->FullGcScannedBytesDelta();
+ gc_freed_bytes_ = metrics->FullGcFreedBytes();
+ gc_freed_bytes_delta_ = metrics->FullGcFreedBytesDelta();
+ gc_duration_ = metrics->FullGcDuration();
+ gc_duration_delta_ = metrics->FullGcDurationDelta();
+ are_metrics_initialized_ = true;
}
void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) {
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 5e8c1e368a..e8a0f711fa 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <algorithm>
+
+#include "base/metrics/metrics.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "gc/accounting/card_table-inl.h"
@@ -99,6 +102,148 @@ TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
Runtime::Current()->SetDumpGCPerformanceOnShutdown(true);
}
+template <typename T>
+bool AnyIsNonNull(const metrics::MetricsBase<T>* x, const metrics::MetricsBase<T>* y) {
+ return !x->IsNull() || !y->IsNull();
+}
+
+TEST_F(HeapTest, GCMetrics) {
+ // Allocate a few string objects (to be collected), then trigger garbage
+ // collection, and check that GC metrics are updated (where applicable).
+ {
+ constexpr const size_t kNumObj = 128;
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<kNumObj> hs(soa.Self());
+ for (size_t i = 0u; i < kNumObj; ++i) {
+ Handle<mirror::String> string [[maybe_unused]] (
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "test")));
+ }
+ }
+ Heap* heap = Runtime::Current()->GetHeap();
+ heap->CollectGarbage(/* clear_soft_references= */ false);
+
+ // ART Metrics.
+ metrics::ArtMetrics* metrics = Runtime::Current()->GetMetrics();
+ // ART full-heap GC metrics.
+ metrics::MetricsBase<int64_t>* full_gc_collection_time = metrics->FullGcCollectionTime();
+ metrics::MetricsBase<uint64_t>* full_gc_count = metrics->FullGcCount();
+ metrics::MetricsBase<uint64_t>* full_gc_count_delta = metrics->FullGcCountDelta();
+ metrics::MetricsBase<int64_t>* full_gc_throughput = metrics->FullGcThroughput();
+ metrics::MetricsBase<int64_t>* full_gc_tracing_throughput = metrics->FullGcTracingThroughput();
+ metrics::MetricsBase<uint64_t>* full_gc_throughput_avg = metrics->FullGcThroughputAvg();
+ metrics::MetricsBase<uint64_t>* full_gc_tracing_throughput_avg =
+ metrics->FullGcTracingThroughputAvg();
+ metrics::MetricsBase<uint64_t>* full_gc_scanned_bytes = metrics->FullGcScannedBytes();
+ metrics::MetricsBase<uint64_t>* full_gc_scanned_bytes_delta = metrics->FullGcScannedBytesDelta();
+ metrics::MetricsBase<uint64_t>* full_gc_freed_bytes = metrics->FullGcFreedBytes();
+ metrics::MetricsBase<uint64_t>* full_gc_freed_bytes_delta = metrics->FullGcFreedBytesDelta();
+ metrics::MetricsBase<uint64_t>* full_gc_duration = metrics->FullGcDuration();
+ metrics::MetricsBase<uint64_t>* full_gc_duration_delta = metrics->FullGcDurationDelta();
+ // ART young-generation GC metrics.
+ metrics::MetricsBase<int64_t>* young_gc_collection_time = metrics->YoungGcCollectionTime();
+ metrics::MetricsBase<uint64_t>* young_gc_count = metrics->YoungGcCount();
+ metrics::MetricsBase<uint64_t>* young_gc_count_delta = metrics->YoungGcCountDelta();
+ metrics::MetricsBase<int64_t>* young_gc_throughput = metrics->YoungGcThroughput();
+ metrics::MetricsBase<int64_t>* young_gc_tracing_throughput = metrics->YoungGcTracingThroughput();
+ metrics::MetricsBase<uint64_t>* young_gc_throughput_avg = metrics->YoungGcThroughputAvg();
+ metrics::MetricsBase<uint64_t>* young_gc_tracing_throughput_avg =
+ metrics->YoungGcTracingThroughputAvg();
+ metrics::MetricsBase<uint64_t>* young_gc_scanned_bytes = metrics->YoungGcScannedBytes();
+ metrics::MetricsBase<uint64_t>* young_gc_scanned_bytes_delta =
+ metrics->YoungGcScannedBytesDelta();
+ metrics::MetricsBase<uint64_t>* young_gc_freed_bytes = metrics->YoungGcFreedBytes();
+ metrics::MetricsBase<uint64_t>* young_gc_freed_bytes_delta = metrics->YoungGcFreedBytesDelta();
+ metrics::MetricsBase<uint64_t>* young_gc_duration = metrics->YoungGcDuration();
+ metrics::MetricsBase<uint64_t>* young_gc_duration_delta = metrics->YoungGcDurationDelta();
+
+ CollectorType fg_collector_type = heap->GetForegroundCollectorType();
+ if (fg_collector_type == kCollectorTypeCC || fg_collector_type == kCollectorTypeCMC) {
+ // Only the Concurrent Copying and Concurrent Mark-Compact collectors enable
+ // GC metrics at the moment.
+ if (heap->GetUseGenerationalCC()) {
+ // Check that full-heap and/or young-generation GC metrics are non-null
+ // after trigerring the collection.
+ EXPECT_PRED2(AnyIsNonNull<int64_t>, full_gc_collection_time, young_gc_collection_time);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_count, young_gc_count);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_count_delta, young_gc_count_delta);
+ EXPECT_PRED2(AnyIsNonNull<int64_t>, full_gc_throughput, young_gc_throughput);
+ EXPECT_PRED2(AnyIsNonNull<int64_t>, full_gc_tracing_throughput, young_gc_tracing_throughput);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_throughput_avg, young_gc_throughput_avg);
+ EXPECT_PRED2(
+ AnyIsNonNull<uint64_t>, full_gc_tracing_throughput_avg, young_gc_tracing_throughput_avg);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_scanned_bytes, young_gc_scanned_bytes);
+ EXPECT_PRED2(
+ AnyIsNonNull<uint64_t>, full_gc_scanned_bytes_delta, young_gc_scanned_bytes_delta);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_freed_bytes, young_gc_freed_bytes);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_freed_bytes_delta, young_gc_freed_bytes_delta);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_duration, young_gc_duration);
+ EXPECT_PRED2(AnyIsNonNull<uint64_t>, full_gc_duration_delta, young_gc_duration_delta);
+ } else {
+ // Check that only full-heap GC metrics are non-null after trigerring the collection.
+ EXPECT_FALSE(full_gc_collection_time->IsNull());
+ EXPECT_FALSE(full_gc_count->IsNull());
+ EXPECT_FALSE(full_gc_count_delta->IsNull());
+ EXPECT_FALSE(full_gc_throughput->IsNull());
+ EXPECT_FALSE(full_gc_tracing_throughput->IsNull());
+ EXPECT_FALSE(full_gc_throughput_avg->IsNull());
+ EXPECT_FALSE(full_gc_tracing_throughput_avg->IsNull());
+ if (fg_collector_type != kCollectorTypeCMC) {
+ // TODO(b/270957146): For some reason, these metrics are still null
+ // after running the Concurrent Mark-Compact collector; investigate why.
+ EXPECT_FALSE(full_gc_scanned_bytes->IsNull());
+ EXPECT_FALSE(full_gc_scanned_bytes_delta->IsNull());
+ }
+ EXPECT_FALSE(full_gc_freed_bytes->IsNull());
+ EXPECT_FALSE(full_gc_freed_bytes_delta->IsNull());
+ EXPECT_FALSE(full_gc_duration->IsNull());
+ EXPECT_FALSE(full_gc_duration_delta->IsNull());
+
+ EXPECT_TRUE(young_gc_collection_time->IsNull());
+ EXPECT_TRUE(young_gc_count->IsNull());
+ EXPECT_TRUE(young_gc_count_delta->IsNull());
+ EXPECT_TRUE(young_gc_throughput->IsNull());
+ EXPECT_TRUE(young_gc_tracing_throughput->IsNull());
+ EXPECT_TRUE(young_gc_throughput_avg->IsNull());
+ EXPECT_TRUE(young_gc_tracing_throughput_avg->IsNull());
+ EXPECT_TRUE(young_gc_scanned_bytes->IsNull());
+ EXPECT_TRUE(young_gc_scanned_bytes_delta->IsNull());
+ EXPECT_TRUE(young_gc_freed_bytes->IsNull());
+ EXPECT_TRUE(young_gc_freed_bytes_delta->IsNull());
+ EXPECT_TRUE(young_gc_duration->IsNull());
+ EXPECT_TRUE(young_gc_duration_delta->IsNull());
+ }
+ } else {
+ // Check that all metrics are null after trigerring the collection.
+ EXPECT_TRUE(full_gc_collection_time->IsNull());
+ EXPECT_TRUE(full_gc_count->IsNull());
+ EXPECT_TRUE(full_gc_count_delta->IsNull());
+ EXPECT_TRUE(full_gc_throughput->IsNull());
+ EXPECT_TRUE(full_gc_tracing_throughput->IsNull());
+ EXPECT_TRUE(full_gc_throughput_avg->IsNull());
+ EXPECT_TRUE(full_gc_tracing_throughput_avg->IsNull());
+ EXPECT_TRUE(full_gc_scanned_bytes->IsNull());
+ EXPECT_TRUE(full_gc_scanned_bytes_delta->IsNull());
+ EXPECT_TRUE(full_gc_freed_bytes->IsNull());
+ EXPECT_TRUE(full_gc_freed_bytes_delta->IsNull());
+ EXPECT_TRUE(full_gc_duration->IsNull());
+ EXPECT_TRUE(full_gc_duration_delta->IsNull());
+
+ EXPECT_TRUE(young_gc_collection_time->IsNull());
+ EXPECT_TRUE(young_gc_count->IsNull());
+ EXPECT_TRUE(young_gc_count_delta->IsNull());
+ EXPECT_TRUE(young_gc_throughput->IsNull());
+ EXPECT_TRUE(young_gc_tracing_throughput->IsNull());
+ EXPECT_TRUE(young_gc_throughput_avg->IsNull());
+ EXPECT_TRUE(young_gc_tracing_throughput_avg->IsNull());
+ EXPECT_TRUE(young_gc_scanned_bytes->IsNull());
+ EXPECT_TRUE(young_gc_scanned_bytes_delta->IsNull());
+ EXPECT_TRUE(young_gc_freed_bytes->IsNull());
+ EXPECT_TRUE(young_gc_freed_bytes_delta->IsNull());
+ EXPECT_TRUE(young_gc_duration->IsNull());
+ EXPECT_TRUE(young_gc_duration_delta->IsNull());
+ }
+}
+
class ZygoteHeapTest : public CommonRuntimeTest {
void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonRuntimeTest::SetUpRuntimeOptions(options);
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 5bad8568f2..24101c3be1 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -27,6 +27,7 @@
#endif
#include <algorithm>
+#include <atomic>
#include <initializer_list>
#include <mutex>
#include <type_traits>
@@ -151,38 +152,92 @@ __attribute__((constructor)) static void InitializeSignalChain() {
});
}
-static pthread_key_t GetHandlingSignalKey() {
- static pthread_key_t key;
+template <typename T>
+static constexpr bool IsPowerOfTwo(T x) {
+ static_assert(std::is_integral_v<T>, "T must be integral");
+ static_assert(std::is_unsigned_v<T>, "T must be unsigned");
+ return (x & (x - 1)) == 0;
+}
+
+template <typename T>
+static constexpr T RoundUp(T x, T n) {
+ return (x + n - 1) & -n;
+}
+// Use a bitmap to indicate which signal is being handled so that other
+// non-blocked signals are allowed to be handled, if raised.
+static constexpr size_t kSignalSetLength = _NSIG - 1;
+static constexpr size_t kNumSignalsPerKey = std::numeric_limits<uintptr_t>::digits;
+static_assert(IsPowerOfTwo(kNumSignalsPerKey));
+static constexpr size_t kHandlingSignalKeyCount =
+ RoundUp(kSignalSetLength, kNumSignalsPerKey) / kNumSignalsPerKey;
+
+// We rely on bionic's implementation of pthread_(get/set)specific being
+// async-signal safe.
+static pthread_key_t GetHandlingSignalKey(size_t idx) {
+ static pthread_key_t key[kHandlingSignalKeyCount];
static std::once_flag once;
std::call_once(once, []() {
- int rc = pthread_key_create(&key, nullptr);
- if (rc != 0) {
- fatal("failed to create sigchain pthread key: %s", strerror(rc));
+ for (size_t i = 0; i < kHandlingSignalKeyCount; i++) {
+ int rc = pthread_key_create(&key[i], nullptr);
+ if (rc != 0) {
+ fatal("failed to create sigchain pthread key: %s", strerror(rc));
+ }
}
});
- return key;
+ return key[idx];
}
static bool GetHandlingSignal() {
- void* result = pthread_getspecific(GetHandlingSignalKey());
- return reinterpret_cast<uintptr_t>(result);
+ for (size_t i = 0; i < kHandlingSignalKeyCount; i++) {
+ void* result = pthread_getspecific(GetHandlingSignalKey(i));
+ if (reinterpret_cast<uintptr_t>(result) != 0) {
+ return true;
+ }
+ }
+ return false;
}
-static void SetHandlingSignal(bool value) {
- pthread_setspecific(GetHandlingSignalKey(),
- reinterpret_cast<void*>(static_cast<uintptr_t>(value)));
+static bool GetHandlingSignal(int signo) {
+ size_t bit_idx = signo - 1;
+ size_t key_idx = bit_idx / kNumSignalsPerKey;
+ uintptr_t bit_mask = static_cast<uintptr_t>(1) << (bit_idx % kNumSignalsPerKey);
+ uintptr_t result =
+ reinterpret_cast<uintptr_t>(pthread_getspecific(GetHandlingSignalKey(key_idx)));
+ return result & bit_mask;
+}
+
+static bool SetHandlingSignal(int signo, bool value) {
+ // Use signal-fence to ensure that compiler doesn't reorder generated code
+ // across signal handlers.
+ size_t bit_idx = signo - 1;
+ size_t key_idx = bit_idx / kNumSignalsPerKey;
+ uintptr_t bit_mask = static_cast<uintptr_t>(1) << (bit_idx % kNumSignalsPerKey);
+ pthread_key_t key = GetHandlingSignalKey(key_idx);
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ uintptr_t bitmap = reinterpret_cast<uintptr_t>(pthread_getspecific(key));
+ bool ret = bitmap & bit_mask;
+ if (value) {
+ bitmap |= bit_mask;
+ } else {
+ bitmap &= ~bit_mask;
+ }
+ pthread_setspecific(key, reinterpret_cast<void*>(bitmap));
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+ return ret;
}
class ScopedHandlingSignal {
public:
- ScopedHandlingSignal() : original_value_(GetHandlingSignal()) {
- }
+ ScopedHandlingSignal(int signo, bool set)
+ : signo_(signo),
+ original_value_(set ? SetHandlingSignal(signo, true) : GetHandlingSignal(signo)) {}
~ScopedHandlingSignal() {
- SetHandlingSignal(original_value_);
+ SetHandlingSignal(signo_, original_value_);
}
private:
+ int signo_;
bool original_value_;
};
@@ -336,14 +391,14 @@ class SignalChain {
// _NSIG is 1 greater than the highest valued signal, but signals start from 1.
// Leave an empty element at index 0 for convenience.
-static SignalChain chains[_NSIG + 1];
+static SignalChain chains[_NSIG];
static bool is_signal_hook_debuggable = false;
void SignalChain::Handler(int signo, siginfo_t* siginfo, void* ucontext_raw) {
// Try the special handlers first.
// If one of them crashes, we'll reenter this handler and pass that crash onto the user handler.
- if (!GetHandlingSignal()) {
+ if (!GetHandlingSignal(signo)) {
for (const auto& handler : chains[signo].special_handlers_) {
if (handler.sc_sigaction == nullptr) {
break;
@@ -356,10 +411,7 @@ void SignalChain::Handler(int signo, siginfo_t* siginfo, void* ucontext_raw) {
sigset_t previous_mask;
linked_sigprocmask(SIG_SETMASK, &handler.sc_mask, &previous_mask);
- ScopedHandlingSignal restorer;
- if (!handler_noreturn) {
- SetHandlingSignal(true);
- }
+ ScopedHandlingSignal restorer(signo, !handler_noreturn);
if (handler.sc_sigaction(signo, siginfo, ucontext_raw)) {
return;