summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLokesh Gidra <lokeshgidra@google.com>2023-05-25 23:43:59 +0000
committerCherrypicker Worker <android-build-cherrypicker-worker@google.com>2023-05-30 04:32:05 +0000
commit1733aa10e29501c32a84d98ea32ddd1642a33025 (patch)
tree51a354a775d59c1148ceff323c30f8da7ca351b8
parent86309e745c1222101ec162401f8192f479fa15fe (diff)
downloadart-1733aa10e29501c32a84d98ea32ddd1642a33025.tar.gz
Delay copy-buffer allotment for uffd compaction until actual use
Currently, we allot copy-buffer to mutator threads as soon as a userfault for moving space is received. This could lead to a scenario wherein a lot of mutator threads try to access the same page simultaneously and hence get userfaulted. In this case we would uselessly allot buffers to all mutator threads. In this CL we defer the allotment until it is confirmed that the mutator thread needs the buffer for compaction. Bug: 284289925 Test: art/test/testrunner/testrunner.py (cherry picked from commit 33e2f0e1caecab9c7b681e841e500e87caaa540f) (cherry picked from https://googleplex-android-review.googlesource.com/q/commit:684b6c42ab68ed1c10616ea076c23fb22a5ff354) Merged-In: I9201716227e9e7245f20b016ff86558e0842fc53 Change-Id: I9201716227e9e7245f20b016ff86558e0842fc53
-rw-r--r--runtime/gc/collector/mark_compact.cc23
1 files changed, 12 insertions, 11 deletions
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 336b143771..993e0dbb17 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -581,7 +581,7 @@ void MarkCompact::InitializePhase() {
bytes_scanned_ = 0;
freed_objects_ = 0;
// The first buffer is used by gc-thread.
- compaction_buffer_counter_ = 1;
+ compaction_buffer_counter_.store(1, std::memory_order_relaxed);
from_space_slide_diff_ = from_space_begin_ - bump_pointer_space_->Begin();
black_allocations_begin_ = bump_pointer_space_->Limit();
walk_super_class_cache_ = nullptr;
@@ -2791,6 +2791,7 @@ void MarkCompact::CompactionPause() {
RecordFree(ObjectBytePair(freed_objects_, freed_bytes));
} else {
DCHECK_EQ(compaction_in_progress_count_.load(std::memory_order_relaxed), 0u);
+ DCHECK_EQ(compaction_buffer_counter_.load(std::memory_order_relaxed), 1);
if (!use_uffd_sigbus_) {
// We must start worker threads before resuming mutators to avoid deadlocks.
heap_->GetThreadPool()->StartWorkers(thread_running_gc_);
@@ -3036,16 +3037,8 @@ bool MarkCompact::SigbusHandler(siginfo_t* info) {
ConcurrentlyProcessMovingPage<kMinorFaultMode>(
fault_page, nullptr, nr_moving_space_used_pages);
} else {
- uint8_t* buf = self->GetThreadLocalGcBuffer();
- if (buf == nullptr) {
- uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed);
- // The buffer-map is one page bigger as the first buffer is used by GC-thread.
- CHECK_LE(idx, kMutatorCompactionBufferCount);
- buf = compaction_buffers_map_.Begin() + idx * kPageSize;
- DCHECK(compaction_buffers_map_.HasAddress(buf));
- self->SetThreadLocalGcBuffer(buf);
- }
- ConcurrentlyProcessMovingPage<kCopyMode>(fault_page, buf, nr_moving_space_used_pages);
+ ConcurrentlyProcessMovingPage<kCopyMode>(
+ fault_page, self->GetThreadLocalGcBuffer(), nr_moving_space_used_pages);
}
return true;
} else {
@@ -3154,6 +3147,14 @@ void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
if (kMode == kMinorFaultMode) {
DCHECK_EQ(buf, nullptr);
buf = shadow_to_space_map_.Begin() + page_idx * kPageSize;
+ } else if (UNLIKELY(buf == nullptr)) {
+ DCHECK_EQ(kMode, kCopyMode);
+ uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed);
+ // The buffer-map is one page bigger as the first buffer is used by GC-thread.
+ CHECK_LE(idx, kMutatorCompactionBufferCount);
+ buf = compaction_buffers_map_.Begin() + idx * kPageSize;
+ DCHECK(compaction_buffers_map_.HasAddress(buf));
+ Thread::Current()->SetThreadLocalGcBuffer(buf);
}
if (fault_page < post_compact_end_) {