summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Geoffray <ngeoffray@google.com>2024-05-02 17:49:12 +0000
committerTreehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com>2024-05-03 09:57:14 +0000
commitb3d88b3f4b47b7635cbdab54dfdcbf866b9813ff (patch)
treea77d0d9ca5d0026b2fc309e5230665304ae64193
parent5a4566f39c3e22ecca1e88008af772c1a808ac9b (diff)
downloadart-b3d88b3f4b47b7635cbdab54dfdcbf866b9813ff.tar.gz
Revert "Reland^2 "Revamp JIT GC.""
This reverts commit eac7cd76d9a39a61c7fecbd3c3eb4f2932a3d55c. Reason for revert: Bot failure Change-Id: Ia4632c06ec88cff218d64a8087f762337cabc79e
-rw-r--r--openjdkjvmti/ti_redefine.cc3
-rw-r--r--runtime/art_method.cc26
-rw-r--r--runtime/art_method.h8
-rw-r--r--runtime/jit/jit_code_cache.cc339
-rw-r--r--runtime/jit/jit_code_cache.h50
-rw-r--r--runtime/runtime_image.cc4
-rw-r--r--test/667-jit-jni-stub/jit_jni_stub_test.cc8
-rw-r--r--test/667-jit-jni-stub/src/Main.java7
8 files changed, 196 insertions, 249 deletions
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index d0ce6a1f49..60e8193f5d 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -2550,9 +2550,6 @@ void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class>
const art::DexFile& old_dex_file = mclass->GetDexFile();
// Update methods.
for (art::ArtMethod& method : mclass->GetDeclaredMethods(image_pointer_size)) {
- // Reinitialize the method by calling `CopyFrom`. This ensures for example
- // the entrypoint and the hotness are reset.
- method.CopyFrom(&method, image_pointer_size);
const art::dex::StringId* new_name_id = dex_file_->FindStringId(method.GetName());
art::dex::TypeIndex method_return_idx =
dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(method.GetReturnTypeDescriptor()));
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 0b6b883bab..c8b16990b2 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -795,18 +795,16 @@ void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
const void* entry_point = GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size);
if (runtime->UseJitCompilation()) {
if (runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point)) {
- SetNativePointer(EntryPointFromQuickCompiledCodeOffset(image_pointer_size),
- src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(),
- image_pointer_size);
+ SetEntryPointFromQuickCompiledCodePtrSize(
+ src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(),
+ image_pointer_size);
}
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (interpreter::IsNterpSupported() && class_linker->IsNterpEntryPoint(entry_point)) {
// If the entrypoint is nterp, it's too early to check if the new method
// will support it. So for simplicity, use the interpreter bridge.
- SetNativePointer(EntryPointFromQuickCompiledCodeOffset(image_pointer_size),
- GetQuickToInterpreterBridge(),
- image_pointer_size);
+ SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
}
// Clear the data pointer, it will be set if needed by the caller.
@@ -924,20 +922,4 @@ ALWAYS_INLINE static inline void DoGetAccessFlagsHelper(ArtMethod* method)
method->GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
}
-void ArtMethod::SetEntryPointFromQuickCompiledCodePtrSize(
- const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) {
- const void* current_entry_point = GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
- if (current_entry_point == entry_point_from_quick_compiled_code) {
- return;
- }
- jit::Jit* jit = Runtime::Current()->GetJit();
- SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
- entry_point_from_quick_compiled_code,
- pointer_size);
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsPc(current_entry_point)) {
- jit->GetCodeCache()->AddZombieCode(this, current_entry_point);
- }
-}
-
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 97e1e6fa25..a612c81b75 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -766,7 +766,11 @@ class EXPORT ArtMethod final {
}
ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
const void* entry_point_from_quick_compiled_code, PointerSize pointer_size)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
+ entry_point_from_quick_compiled_code,
+ pointer_size);
+ }
static constexpr MemberOffset DataOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
@@ -1186,8 +1190,6 @@ class EXPORT ArtMethod final {
// Used by GetName and GetNameView to share common code.
const char* GetRuntimeMethodName() REQUIRES_SHARED(Locks::mutator_lock_);
- friend class RuntimeImageHelper; // For SetNativePointer.
-
DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 378e6cba97..7ea9efb0f9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -342,8 +342,6 @@ const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* metho
auto it = saved_compiled_methods_map_.find(method);
if (it != saved_compiled_methods_map_.end()) {
code_ptr = it->second;
- // Now that we're using the saved entrypoint, remove it from the saved map.
- saved_compiled_methods_map_.erase(it);
}
}
if (code_ptr != nullptr) {
@@ -491,40 +489,33 @@ void JitCodeCache::FreeAllMethodHeaders(
->RemoveDependentsWithMethodHeaders(method_headers);
}
- {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
- ScopedCodeCacheWrite scc(private_region_);
- for (const OatQuickMethodHeader* method_header : method_headers) {
- FreeCodeAndData(method_header->GetCode());
- }
-
- // We have potentially removed a lot of debug info. Do maintenance pass to save space.
- RepackNativeDebugInfoForJit();
+ ScopedCodeCacheWrite scc(private_region_);
+ for (const OatQuickMethodHeader* method_header : method_headers) {
+ FreeCodeAndData(method_header->GetCode());
}
+ // We have potentially removed a lot of debug info. Do maintenance pass to save space.
+ RepackNativeDebugInfoForJit();
+
// Check that the set of compiled methods exactly matches native debug information.
// Does not check zygote methods since they can change concurrently.
if (kIsDebugBuild && !Runtime::Current()->IsZygote()) {
std::map<const void*, ArtMethod*> compiled_methods;
+ VisitAllMethods([&](const void* addr, ArtMethod* method) {
+ if (!IsInZygoteExecSpace(addr)) {
+ CHECK(addr != nullptr && method != nullptr);
+ compiled_methods.emplace(addr, method);
+ }
+ });
std::set<const void*> debug_info;
- {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
- VisitAllMethods([&](const void* addr, ArtMethod* method) {
- if (!IsInZygoteExecSpace(addr)) {
- CHECK(addr != nullptr && method != nullptr);
- compiled_methods.emplace(addr, method);
- }
- });
- ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
- addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit.
- bool res = debug_info.emplace(addr).second;
- CHECK(res) << "Duplicate debug info: " << addr << " " << name;
- CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
- });
- }
+ ForEachNativeDebugSymbol([&](const void* addr, size_t, const char* name) {
+ addr = AlignDown(addr, GetInstructionSetInstructionAlignment(kRuntimeISA)); // Thumb-bit.
+ CHECK(debug_info.emplace(addr).second) << "Duplicate debug info: " << addr << " " << name;
+ CHECK_EQ(compiled_methods.count(addr), 1u) << "Extra debug info: " << addr << " " << name;
+ });
if (!debug_info.empty()) { // If debug-info generation is enabled.
for (auto it : compiled_methods) {
- CHECK_EQ(debug_info.count(it.first), 1u) << "Missing debug info";
+ CHECK_EQ(debug_info.count(it.first), 1u) << "No debug info: " << it.second->PrettyMethod();
}
CHECK_EQ(compiled_methods.size(), debug_info.size());
}
@@ -555,34 +546,17 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
++it;
}
}
- for (auto it = zombie_jni_code_.begin(); it != zombie_jni_code_.end();) {
- if (alloc.ContainsUnsafe(*it)) {
- it = zombie_jni_code_.erase(it);
- } else {
- ++it;
- }
- }
- for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
- if (alloc.ContainsUnsafe(*it)) {
- it = processed_zombie_jni_code_.erase(it);
- } else {
- ++it;
- }
- }
for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
if (alloc.ContainsUnsafe(it->second)) {
method_headers.insert(OatQuickMethodHeader::FromCodePointer(it->first));
VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
it = method_code_map_.erase(it);
- zombie_code_.erase(it->first);
- processed_zombie_code_.erase(it->first);
} else {
++it;
}
}
}
for (auto it = osr_code_map_.begin(); it != osr_code_map_.end();) {
- DCHECK(!ContainsElement(zombie_code_, it->second));
if (alloc.ContainsUnsafe(it->first)) {
// Note that the code has already been pushed to method_headers in the loop
// above and is going to be removed in FreeCode() below.
@@ -600,8 +574,8 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
++it;
}
}
+ FreeAllMethodHeaders(method_headers);
}
- FreeAllMethodHeaders(method_headers);
}
bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
@@ -667,6 +641,18 @@ static void ClearMethodCounter(ArtMethod* method, bool was_warm)
method->UpdateCounter(/* new_samples= */ 1);
}
+void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
+ while (collection_in_progress_) {
+ Locks::jit_lock_->Unlock(self);
+ {
+ ScopedThreadSuspension sts(self, ThreadState::kSuspended);
+ MutexLock mu(self, *Locks::jit_lock_);
+ WaitForPotentialCollectionToComplete(self);
+ }
+ Locks::jit_lock_->Lock(self);
+ }
+}
+
bool JitCodeCache::Commit(Thread* self,
JitMemoryRegion* region,
ArtMethod* method,
@@ -694,6 +680,9 @@ bool JitCodeCache::Commit(Thread* self,
OatQuickMethodHeader* method_header = nullptr;
{
MutexLock mu(self, *Locks::jit_lock_);
+ // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
+ // finish.
+ WaitForPotentialCollectionToCompleteRunnable(self);
const uint8_t* code_ptr = region->CommitCode(reserved_code, code, stack_map_data);
if (code_ptr == nullptr) {
return false;
@@ -793,6 +782,11 @@ bool JitCodeCache::Commit(Thread* self,
method, method_header->GetEntryPoint());
}
}
+ if (collection_in_progress_) {
+ // We need to update the live bitmap if there is a GC to ensure it sees this new
+ // code.
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+ }
VLOG(jit)
<< "JIT added (kind=" << compilation_kind << ") "
<< ArtMethod::PrettyMethod(method) << "@" << method
@@ -861,7 +855,6 @@ bool JitCodeCache::RemoveMethodLocked(ArtMethod* method, bool release_memory) {
FreeCodeAndData(it->second.GetCode());
}
jni_stubs_map_.erase(it);
- zombie_jni_code_.erase(method);
} else {
it->first.UpdateShorty(it->second.GetMethods().front());
}
@@ -992,6 +985,7 @@ bool JitCodeCache::Reserve(Thread* self,
{
ScopedThreadSuspension sts(self, ThreadState::kSuspended);
MutexLock mu(self, *Locks::jit_lock_);
+ WaitForPotentialCollectionToComplete(self);
ScopedCodeCacheWrite ccw(*region);
code = region->AllocateCode(code_size);
data = region->AllocateData(data_size);
@@ -1008,8 +1002,8 @@ bool JitCodeCache::Reserve(Thread* self,
<< PrettySize(data_size);
return false;
}
- // Increase the capacity and try again.
- IncreaseCodeCacheCapacity(self);
+ // Run a code cache collection and try again.
+ GarbageCollectCache(self);
}
*reserved_code = ArrayRef<const uint8_t>(code, code_size);
@@ -1087,6 +1081,11 @@ class MarkCodeClosure final : public Closure {
Barrier* const barrier_;
};
+void JitCodeCache::NotifyCollectionDone(Thread* self) {
+ collection_in_progress_ = false;
+ lock_cond_.Broadcast(self);
+}
+
void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
Barrier barrier(0);
size_t threads_running_checkpoint = 0;
@@ -1104,114 +1103,86 @@ bool JitCodeCache::IsAtMaxCapacity() const {
return private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity();
}
-void JitCodeCache::IncreaseCodeCacheCapacity(Thread* self) {
- MutexLock mu(self, *Locks::jit_lock_);
- private_region_.IncreaseCodeCacheCapacity();
+void JitCodeCache::GarbageCollectCache(Thread* self) {
+ ScopedTrace trace(__FUNCTION__);
+ // Wait for an existing collection, or let everyone know we are starting one.
+ {
+ ScopedThreadSuspension sts(self, ThreadState::kSuspended);
+ MutexLock mu(self, *Locks::jit_lock_);
+ if (!garbage_collect_code_) {
+ private_region_.IncreaseCodeCacheCapacity();
+ return;
+ } else if (WaitForPotentialCollectionToComplete(self)) {
+ return;
+ } else {
+ number_of_collections_++;
+ live_bitmap_.reset(CodeCacheBitmap::Create(
+ "code-cache-bitmap",
+ reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
+ reinterpret_cast<uintptr_t>(
+ private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
+ collection_in_progress_ = true;
+ }
+ }
+
+ TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
+ {
+ TimingLogger::ScopedTiming st("Code cache collection", &logger);
+
+ VLOG(jit) << "Do code cache collection, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+
+ DoCollection(self);
+
+ VLOG(jit) << "After code cache collection, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+
+ {
+ MutexLock mu(self, *Locks::jit_lock_);
+ private_region_.IncreaseCodeCacheCapacity();
+ live_bitmap_.reset(nullptr);
+ NotifyCollectionDone(self);
+ }
+ }
+ Runtime::Current()->GetJit()->AddTimingLogger(logger);
}
void JitCodeCache::RemoveUnmarkedCode(Thread* self) {
ScopedTrace trace(__FUNCTION__);
- std::unordered_set<OatQuickMethodHeader*> method_headers;
ScopedDebugDisallowReadBarriers sddrb(self);
+ std::unordered_set<OatQuickMethodHeader*> method_headers;
{
MutexLock mu(self, *Locks::jit_lock_);
- // Iterate over all zombie code and remove entries that are not marked.
- for (auto it = processed_zombie_code_.begin(); it != processed_zombie_code_.end();) {
- const void* code_ptr = *it;
- uintptr_t allocation = FromCodeToAllocation(code_ptr);
- DCHECK(!IsInZygoteExecSpace(code_ptr));
- if (GetLiveBitmap()->Test(allocation)) {
+ // Iterate over all compiled code and remove entries that are not marked.
+ for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
+ JniStubData* data = &it->second;
+ if (IsInZygoteExecSpace(data->GetCode()) ||
+ !data->IsCompiled() ||
+ GetLiveBitmap()->Test(FromCodeToAllocation(data->GetCode()))) {
++it;
} else {
- OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
- method_headers.insert(header);
- method_code_map_.erase(header->GetCode());
- VLOG(jit) << "JIT removed " << *it;
- it = processed_zombie_code_.erase(it);
+ method_headers.insert(OatQuickMethodHeader::FromCodePointer(data->GetCode()));
+ for (ArtMethod* method : data->GetMethods()) {
+ VLOG(jit) << "JIT removed (JNI) " << method->PrettyMethod() << ": " << data->GetCode();
+ }
+ it = jni_stubs_map_.erase(it);
}
}
- for (auto it = processed_zombie_jni_code_.begin(); it != processed_zombie_jni_code_.end();) {
- ArtMethod* method = *it;
- auto stub = jni_stubs_map_.find(JniStubKey(method));
- if (stub == jni_stubs_map_.end()) {
- it = processed_zombie_jni_code_.erase(it);
- continue;
- }
- JniStubData& data = stub->second;
- if (!data.IsCompiled() || !ContainsElement(data.GetMethods(), method)) {
- it = processed_zombie_jni_code_.erase(it);
- } else if (method->GetEntryPointFromQuickCompiledCode() ==
- OatQuickMethodHeader::FromCodePointer(data.GetCode())->GetEntryPoint()) {
- // The stub got reused for this method, remove ourselves from the zombie
- // list.
- it = processed_zombie_jni_code_.erase(it);
- } else if (!GetLiveBitmap()->Test(FromCodeToAllocation(data.GetCode()))) {
- data.RemoveMethod(method);
- if (data.GetMethods().empty()) {
- OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(data.GetCode());
- method_headers.insert(header);
- CHECK(ContainsPc(header));
- VLOG(jit) << "JIT removed native code of" << method->PrettyMethod();
- jni_stubs_map_.erase(stub);
- } else {
- stub->first.UpdateShorty(stub->second.GetMethods().front());
- }
- it = processed_zombie_jni_code_.erase(it);
- } else {
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ if (IsInZygoteExecSpace(code_ptr) || GetLiveBitmap()->Test(allocation)) {
++it;
+ } else {
+ OatQuickMethodHeader* header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ method_headers.insert(header);
+ VLOG(jit) << "JIT removed " << it->second->PrettyMethod() << ": " << it->first;
+ it = method_code_map_.erase(it);
}
}
- }
-
- FreeAllMethodHeaders(method_headers);
-}
-
-void JitCodeCache::AddZombieCode(ArtMethod* method, const void* entry_point) {
- CHECK(ContainsPc(entry_point));
- CHECK(method->IsNative() || (method->GetEntryPointFromQuickCompiledCode() != entry_point));
- const void* code_ptr = OatQuickMethodHeader::FromEntryPoint(entry_point)->GetCode();
- if (!IsInZygoteExecSpace(code_ptr)) {
- Thread* self = Thread::Current();
- if (Locks::jit_lock_->IsExclusiveHeld(self)) {
- AddZombieCodeInternal(method, code_ptr);
- } else {
- MutexLock mu(Thread::Current(), *Locks::jit_lock_);
- AddZombieCodeInternal(method, code_ptr);
- }
- }
-}
-
-
-class JitGcTask final : public Task {
- public:
- JitGcTask() {}
-
- void Run(Thread* self) override {
- Runtime::Current()->GetJit()->GetCodeCache()->DoCollection(self);
- }
-
- void Finalize() override {
- delete this;
- }
-};
-
-void JitCodeCache::AddZombieCodeInternal(ArtMethod* method, const void* code_ptr) {
- if (method->IsNative()) {
- CHECK(jni_stubs_map_.find(JniStubKey(method)) != jni_stubs_map_.end());
- zombie_jni_code_.insert(method);
- } else {
- zombie_code_.insert(code_ptr);
- }
- // Arbitrary threshold of number of zombie code before doing a GC.
- static constexpr size_t kNumberOfZombieCodeThreshold = kIsDebugBuild ? 1 : 1000;
- size_t number_of_code_to_delete =
- zombie_code_.size() + zombie_jni_code_.size() + osr_code_map_.size();
- if (number_of_code_to_delete >= kNumberOfZombieCodeThreshold) {
- JitThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
- if (pool != nullptr && !gc_task_scheduled_) {
- gc_task_scheduled_ = true;
- pool->AddTask(Thread::Current(), new JitGcTask());
- }
+ FreeAllMethodHeaders(method_headers);
}
}
@@ -1263,58 +1234,51 @@ void JitCodeCache::ResetHotnessCounter(ArtMethod* method, Thread* self) {
void JitCodeCache::DoCollection(Thread* self) {
ScopedTrace trace(__FUNCTION__);
-
{
ScopedDebugDisallowReadBarriers sddrb(self);
MutexLock mu(self, *Locks::jit_lock_);
- if (!garbage_collect_code_) {
- return;
- } else if (WaitForPotentialCollectionToComplete(self)) {
- return;
+ // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not
+ // an entry point is either:
+ // - an osr compiled code, that will be removed if not in a thread call stack.
+ // - discarded compiled code, that will be removed if not in a thread call stack.
+ for (const auto& entry : jni_stubs_map_) {
+ const JniStubData& data = entry.second;
+ const void* code_ptr = data.GetCode();
+ if (IsInZygoteExecSpace(code_ptr)) {
+ continue;
+ }
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ for (ArtMethod* method : data.GetMethods()) {
+ if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+ break;
+ }
+ }
}
- collection_in_progress_ = true;
- number_of_collections_++;
- live_bitmap_.reset(CodeCacheBitmap::Create(
- "code-cache-bitmap",
- reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
- reinterpret_cast<uintptr_t>(
- private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
- processed_zombie_code_.insert(zombie_code_.begin(), zombie_code_.end());
- zombie_code_.clear();
- processed_zombie_jni_code_.insert(zombie_jni_code_.begin(), zombie_jni_code_.end());
- zombie_jni_code_.clear();
+ for (const auto& it : method_code_map_) {
+ ArtMethod* method = it.second;
+ const void* code_ptr = it.first;
+ if (IsInZygoteExecSpace(code_ptr)) {
+ continue;
+ }
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+ }
+ }
+
// Empty osr method map, as osr compiled code will be deleted (except the ones
// on thread stacks).
- for (auto it = osr_code_map_.begin(); it != osr_code_map_.end(); ++it) {
- processed_zombie_code_.insert(it->second);
- }
osr_code_map_.clear();
}
- TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit));
- {
- TimingLogger::ScopedTiming st("Code cache collection", &logger);
-
- {
- ScopedObjectAccess soa(self);
- // Run a checkpoint on all threads to mark the JIT compiled code they are running.
- MarkCompiledCodeOnThreadStacks(self);
- // Remove zombie code which hasn't been marked.
- RemoveUnmarkedCode(self);
- }
-
- MutexLock mu(self, *Locks::jit_lock_);
- live_bitmap_.reset(nullptr);
- NotifyCollectionDone(self);
- }
+ // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+ MarkCompiledCodeOnThreadStacks(self);
- Runtime::Current()->GetJit()->AddTimingLogger(logger);
-}
-
-void JitCodeCache::NotifyCollectionDone(Thread* self) {
- collection_in_progress_ = false;
- gc_task_scheduled_ = false;
- lock_cond_.Broadcast(self);
+ // At this point, mutator threads are still running, and entrypoints of methods can
+ // change. We do know they cannot change to a code cache entry that is not marked,
+ // therefore we can safely remove those entries.
+ RemoveUnmarkedCode(self);
}
OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
@@ -1418,7 +1382,7 @@ ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self,
}
if (info == nullptr) {
- IncreaseCodeCacheCapacity(self);
+ GarbageCollectCache(self);
MutexLock mu(self, *Locks::jit_lock_);
info = AddProfilingInfoInternal(self, method, inline_cache_entries, branch_cache_entries);
}
@@ -1646,6 +1610,11 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
// as well change them back as this stub shall not be collected anyway and this
// can avoid a few expensive GenericJNI calls.
data->UpdateEntryPoints(entrypoint);
+ if (collection_in_progress_) {
+ if (!IsInZygoteExecSpace(data->GetCode())) {
+ GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
+ }
+ }
}
return new_compilation;
} else {
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 5202c2c37e..96fc7e2706 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -285,9 +285,11 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
void FreeLocked(JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::jit_lock_);
- void IncreaseCodeCacheCapacity(Thread* self)
+ // Perform a collection on the code cache.
+ EXPORT void GarbageCollectCache(Thread* self)
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -421,20 +423,9 @@ class JitCodeCache {
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_);
- // NO_THREAD_SAFETY_ANALYSIS because we may be called with the JIT lock held
- // or not. The implementation of this method handles the two cases.
- void AddZombieCode(ArtMethod* method, const void* code_ptr) NO_THREAD_SAFETY_ANALYSIS;
-
- EXPORT void DoCollection(Thread* self)
- REQUIRES(!Locks::jit_lock_);
-
private:
JitCodeCache();
- void AddZombieCodeInternal(ArtMethod* method, const void* code_ptr)
- REQUIRES(Locks::jit_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& inline_cache_entries,
@@ -442,16 +433,21 @@ class JitCodeCache {
REQUIRES(Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
+ // The non-mutator lock version should be used if possible. This method will release then
+ // re-acquire the mutator lock.
+ void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
+ REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
bool WaitForPotentialCollectionToComplete(Thread* self)
- REQUIRES(Locks::jit_lock_) REQUIRES_SHARED(!Locks::mutator_lock_);
-
- // Notify all waiting threads that a collection is done.
- void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
+ REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
// Remove CHA dependents and underlying allocations for entries in `method_headers`.
void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::jit_lock_)
REQUIRES(!Locks::cha_lock_);
// Removes method from the cache. The caller must ensure that all threads
@@ -466,7 +462,8 @@ class JitCodeCache {
// Free code and data allocations for `code_ptr`.
void FreeCodeAndData(const void* code_ptr)
- REQUIRES(Locks::jit_lock_);
+ REQUIRES(Locks::jit_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
@@ -480,9 +477,16 @@ class JitCodeCache {
// Number of bytes allocated in the data cache.
size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
+ // Notify all waiting threads that a collection is done.
+ void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
+
// Return whether the code cache's capacity is at its maximum.
bool IsAtMaxCapacity() const REQUIRES(Locks::jit_lock_);
+ void DoCollection(Thread* self)
+ REQUIRES(!Locks::jit_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void RemoveUnmarkedCode(Thread* self)
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -559,28 +563,18 @@ class JitCodeCache {
// ProfilingInfo objects we have allocated.
SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
- // Zombie code and JNI methods to consider for collection.
- std::set<const void*> zombie_code_ GUARDED_BY(Locks::jit_lock_);
- std::set<ArtMethod*> zombie_jni_code_ GUARDED_BY(Locks::jit_lock_);
-
- std::set<const void*> processed_zombie_code_ GUARDED_BY(Locks::jit_lock_);
- std::set<ArtMethod*> processed_zombie_jni_code_ GUARDED_BY(Locks::jit_lock_);
-
// Methods that the zygote has compiled and can be shared across processes
// forked from the zygote.
ZygoteMap zygote_map_;
// -------------- JIT GC related data structures ----------------------- //
- // Condition to wait on during collection and for accessing weak references in inline caches.
+ // Condition to wait on during collection.
ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
// Whether there is a code cache collection in progress.
bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
- // Whether a GC task is already scheduled.
- bool gc_task_scheduled_ GUARDED_BY(Locks::jit_lock_);
-
// Bitmap for collecting code and data.
std::unique_ptr<CodeCacheBitmap> live_bitmap_;
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index b134c79286..997ea2fde6 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -964,9 +964,7 @@ class RuntimeImageHelper {
entrypoint = boot_jni_stub;
}
}
- copy->SetNativePointer(ArtMethod::EntryPointFromQuickCompiledCodeOffset(kRuntimePointerSize),
- entrypoint,
- kRuntimePointerSize);
+ copy->SetEntryPointFromQuickCompiledCode(entrypoint);
if (method->IsNative()) {
StubType stub_type = method->IsCriticalNative()
diff --git a/test/667-jit-jni-stub/jit_jni_stub_test.cc b/test/667-jit-jni-stub/jit_jni_stub_test.cc
index b7946f39ed..2b8e14c03d 100644
--- a/test/667-jit-jni-stub/jit_jni_stub_test.cc
+++ b/test/667-jit-jni-stub/jit_jni_stub_test.cc
@@ -39,11 +39,9 @@ extern "C" JNIEXPORT
void Java_Main_jitGc(JNIEnv*, jclass) {
CHECK(Runtime::Current()->GetJit() != nullptr);
jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
- {
- ScopedObjectAccess soa(Thread::Current());
- cache->InvalidateAllCompiledCode();
- }
- cache->DoCollection(Thread::Current());
+ ScopedObjectAccess soa(Thread::Current());
+ cache->InvalidateAllCompiledCode();
+ cache->GarbageCollectCache(Thread::Current());
}
extern "C" JNIEXPORT
diff --git a/test/667-jit-jni-stub/src/Main.java b/test/667-jit-jni-stub/src/Main.java
index 05039670d1..c0829b5100 100644
--- a/test/667-jit-jni-stub/src/Main.java
+++ b/test/667-jit-jni-stub/src/Main.java
@@ -67,6 +67,9 @@ public class Main {
assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
assertFalse(hasJitCompiledCode(Main.class, "callThrough"));
callThrough(Main.class, "testMixedFramesOnStackStage2");
+ // We have just returned through the JIT-compiled JNI stub, so it must still
+ // be compiled (though not necessarily with the entrypoint pointing to it).
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
// Though the callThrough() is on the stack, that frame is using the GenericJNI
// and does not prevent the collection of the JNI stub.
testStubCanBeCollected();
@@ -91,6 +94,10 @@ public class Main {
}
public static void testStubCanBeCollected() {
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
+ doJitGcsUntilFullJitGcIsScheduled();
+ assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
+ assertTrue(hasJitCompiledCode(Main.class, "callThrough"));
jitGc(); // JIT GC without callThrough() on the stack should collect the callThrough() stub.
assertFalse(hasJitCompiledEntrypoint(Main.class, "callThrough"));
assertFalse(hasJitCompiledCode(Main.class, "callThrough"));