summaryrefslogtreecommitdiff
path: root/simpleperf/RecordReadThread.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'simpleperf/RecordReadThread.cpp')
-rw-r--r--simpleperf/RecordReadThread.cpp139
1 files changed, 35 insertions, 104 deletions
diff --git a/simpleperf/RecordReadThread.cpp b/simpleperf/RecordReadThread.cpp
index b89066b9..876f4041 100644
--- a/simpleperf/RecordReadThread.cpp
+++ b/simpleperf/RecordReadThread.cpp
@@ -23,9 +23,7 @@
#include <unordered_map>
#include "environment.h"
-#include "event_type.h"
#include "record.h"
-#include "utils.h"
namespace simpleperf {
@@ -206,22 +204,14 @@ bool KernelRecordReader::MoveToNextRecord(const RecordParser& parser) {
}
RecordReadThread::RecordReadThread(size_t record_buffer_size, const perf_event_attr& attr,
- size_t min_mmap_pages, size_t max_mmap_pages,
- size_t aux_buffer_size, bool allow_cutting_samples)
- : record_buffer_(record_buffer_size),
- record_parser_(attr),
- attr_(attr),
- min_mmap_pages_(min_mmap_pages),
- max_mmap_pages_(max_mmap_pages),
- aux_buffer_size_(aux_buffer_size) {
+ size_t min_mmap_pages, size_t max_mmap_pages)
+ : record_buffer_(record_buffer_size), record_parser_(attr), attr_(attr),
+ min_mmap_pages_(min_mmap_pages), max_mmap_pages_(max_mmap_pages) {
if (attr.sample_type & PERF_SAMPLE_STACK_USER) {
stack_size_in_sample_record_ = attr.sample_stack_user;
}
record_buffer_low_level_ = std::min(record_buffer_size / 4, kDefaultLowBufferLevel);
record_buffer_critical_level_ = std::min(record_buffer_size / 6, kDefaultCriticalBufferLevel);
- if (!allow_cutting_samples) {
- record_buffer_low_level_ = record_buffer_critical_level_;
- }
}
RecordReadThread::~RecordReadThread() {
@@ -293,13 +283,7 @@ std::unique_ptr<Record> RecordReadThread::GetRecord() {
record_buffer_.MoveToNextRecord();
char* p = record_buffer_.GetCurrentRecord();
if (p != nullptr) {
- std::unique_ptr<Record> r = ReadRecordFromBuffer(attr_, p);
- if (r->type() == PERF_RECORD_AUXTRACE) {
- auto auxtrace = static_cast<AuxTraceRecord*>(r.get());
- record_buffer_.AddCurrentRecordSize(auxtrace->data->aux_size);
- auxtrace->location.addr = r->Binary() + r->size();
- }
- return r;
+ return ReadRecordFromBuffer(attr_, p);
}
if (has_data_notification_) {
char dummy;
@@ -367,21 +351,13 @@ bool RecordReadThread::HandleAddEventFds(IOEventLoop& loop,
std::unordered_map<int, EventFd*> cpu_map;
for (size_t pages = max_mmap_pages_; pages >= min_mmap_pages_; pages >>= 1) {
bool success = true;
- bool report_error = pages == min_mmap_pages_;
for (EventFd* fd : event_fds) {
auto it = cpu_map.find(fd->Cpu());
if (it == cpu_map.end()) {
- if (!fd->CreateMappedBuffer(pages, report_error)) {
+ if (!fd->CreateMappedBuffer(pages, pages == min_mmap_pages_)) {
success = false;
break;
}
- if (IsEtmEventType(fd->attr().type)) {
- if (!fd->CreateAuxBuffer(aux_buffer_size_, report_error)) {
- fd->DestroyMappedBuffer();
- success = false;
- break;
- }
- }
cpu_map[fd->Cpu()] = fd;
} else {
if (!fd->ShareMappedBuffer(*(it->second), pages == min_mmap_pages_)) {
@@ -396,7 +372,6 @@ bool RecordReadThread::HandleAddEventFds(IOEventLoop& loop,
}
for (auto& pair : cpu_map) {
pair.second->DestroyMappedBuffer();
- pair.second->DestroyAuxBuffer();
}
cpu_map.clear();
}
@@ -423,7 +398,6 @@ bool RecordReadThread::HandleRemoveEventFds(const std::vector<EventFd*>& event_f
kernel_record_readers_.erase(it);
event_fd->StopPolling();
event_fd->DestroyMappedBuffer();
- event_fd->DestroyAuxBuffer();
}
}
}
@@ -445,39 +419,34 @@ bool RecordReadThread::ReadRecordsFromKernelBuffer() {
readers.push_back(&reader);
}
}
- bool has_data = false;
- if (!readers.empty()) {
- has_data = true;
- if (readers.size() == 1u) {
- // Only one buffer has data, process it directly.
- while (readers[0]->MoveToNextRecord(record_parser_)) {
- PushRecordToRecordBuffer(readers[0]);
- }
- } else {
- // Use a binary heap to merge records from different buffers. As records from the same
- // buffer are already ordered by time, we only need to merge the first record from all
- // buffers. And each time a record is popped from the heap, we put the next record from its
- // buffer into the heap.
- for (auto& reader : readers) {
- reader->MoveToNextRecord(record_parser_);
- }
- std::make_heap(readers.begin(), readers.end(), CompareRecordTime);
- size_t size = readers.size();
- while (size > 0) {
- std::pop_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
- PushRecordToRecordBuffer(readers[size - 1]);
- if (readers[size - 1]->MoveToNextRecord(record_parser_)) {
- std::push_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
- } else {
- size--;
- }
+ if (readers.empty()) {
+ break;
+ }
+ if (readers.size() == 1u) {
+ // Only one buffer has data, process it directly.
+ while (readers[0]->MoveToNextRecord(record_parser_)) {
+ PushRecordToRecordBuffer(readers[0]);
+ }
+ } else {
+ // Use a binary heap to merge records from different buffers. As records from the same buffer
+ // are already ordered by time, we only need to merge the first record from all buffers. And
+ // each time a record is popped from the heap, we put the next record from its buffer into
+ // the heap.
+ for (auto& reader : readers) {
+ reader->MoveToNextRecord(record_parser_);
+ }
+ std::make_heap(readers.begin(), readers.end(), CompareRecordTime);
+ size_t size = readers.size();
+ while (size > 0) {
+ std::pop_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
+ PushRecordToRecordBuffer(readers[size - 1]);
+ if (readers[size - 1]->MoveToNextRecord(record_parser_)) {
+ std::push_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
+ } else {
+ size--;
}
}
}
- ReadAuxDataFromKernelBuffer(&has_data);
- if (!has_data) {
- break;
- }
if (!SendDataNotificationToMainThread()) {
return false;
}
@@ -493,7 +462,7 @@ void RecordReadThread::PushRecordToRecordBuffer(KernelRecordReader* kernel_recor
if (free_size < record_buffer_critical_level_) {
// When the free size in record buffer is below critical level, drop sample records to save
// space for more important records (like mmap or fork records).
- stat_.lost_samples++;
+ lost_samples_++;
return;
}
size_t stack_size_limit = stack_size_in_sample_record_;
@@ -540,10 +509,10 @@ void RecordReadThread::PushRecordToRecordBuffer(KernelRecordReader* kernel_recor
memcpy(p + pos + new_stack_size, &new_stack_size, sizeof(uint64_t));
record_buffer_.FinishWrite();
if (new_stack_size < dyn_stack_size) {
- stat_.cut_stack_samples++;
+ cut_stack_samples_++;
}
} else {
- stat_.lost_samples++;
+ lost_samples_++;
}
return;
}
@@ -555,47 +524,9 @@ void RecordReadThread::PushRecordToRecordBuffer(KernelRecordReader* kernel_recor
record_buffer_.FinishWrite();
} else {
if (header.type == PERF_RECORD_SAMPLE) {
- stat_.lost_samples++;
+ lost_samples_++;
} else {
- stat_.lost_non_samples++;
- }
- }
-}
-
-void RecordReadThread::ReadAuxDataFromKernelBuffer(bool* has_data) {
- for (auto& reader : kernel_record_readers_) {
- EventFd* event_fd = reader.GetEventFd();
- if (event_fd->HasAuxBuffer()) {
- char* buf[2];
- size_t size[2];
- uint64_t offset = event_fd->GetAvailableAuxData(&buf[0], &size[0], &buf[1], &size[1]);
- size_t aux_size = size[0] + size[1];
- if (aux_size == 0) {
- continue;
- }
- *has_data = true;
- AuxTraceRecord auxtrace(Align(aux_size, 8), offset, event_fd->Cpu(), 0, event_fd->Cpu());
- size_t alloc_size = auxtrace.size() + auxtrace.data->aux_size;
- if (record_buffer_.GetFreeSize() < alloc_size + record_buffer_critical_level_) {
- stat_.lost_aux_data_size += aux_size;
- } else {
- char* p = record_buffer_.AllocWriteSpace(alloc_size);
- CHECK(p != nullptr);
- MoveToBinaryFormat(auxtrace.Binary(), auxtrace.size(), p);
- MoveToBinaryFormat(buf[0], size[0], p);
- if (size[1] != 0) {
- MoveToBinaryFormat(buf[1], size[1], p);
- }
- size_t pad_size = auxtrace.data->aux_size - aux_size;
- if (pad_size != 0) {
- uint64_t pad = 0;
- memcpy(p, &pad, pad_size);
- }
- record_buffer_.FinishWrite();
- stat_.aux_data_size += aux_size;
- LOG(DEBUG) << "record aux data " << aux_size << " bytes";
- }
- event_fd->DiscardAuxData(aux_size);
+ lost_non_samples_++;
}
}
}