diff options
author | Andreas Gampe <agampe@google.com> | 2018-05-11 21:02:56 +0000 |
---|---|---|
committer | Android (Google) Code Review <android-gerrit@google.com> | 2018-05-11 21:02:56 +0000 |
commit | 0db2d7b9dc463e4db33456b4944bb62d7281ba93 (patch) | |
tree | aeddb69a1d9aec92dfd278245675c9fc8b2afb68 | |
parent | 34b5ed247cf41ff9889a9e9c3a413e017e690f58 (diff) | |
parent | 18e09166a52b4377009e8b199823963df1936dcc (diff) | |
download | extras-0db2d7b9dc463e4db33456b4944bb62d7281ba93.tar.gz |
Merge changes I8fd41da0,I9a7ad445,I4e15cd4f,I896cfe8c,I5d80887a, ... into pi-dev
* changes:
Perfprofd: Minor fixes
Perfprofd: Add string-based binder interface
Perfprofd: Add ScopeGuard for file deletion
Perfprofd: Fix binder interface
Perfprofd: Use the right parameter
Perfprofd: Expose more parameters in the simple binder interface
Perfprofd: Add process to configreader
Perfprofd: More scripts
Perfprofd: Separate symbol offset in samples
Perfprofd: Add parallel processing to stack script
Perfprofd: Update perf_proto_stack
-rw-r--r-- | perfprofd/binder_interface/aidl/android/os/IPerfProfd.aidl | 14 | ||||
-rw-r--r-- | perfprofd/binder_interface/perfprofd_binder.cc | 73 | ||||
-rw-r--r-- | perfprofd/configreader.cc | 9 | ||||
-rw-r--r-- | perfprofd/perfprofdcore.cc | 18 | ||||
-rw-r--r-- | perfprofd/scripts/perf_proto_json2sqlite.py | 166 | ||||
-rw-r--r-- | perfprofd/scripts/perf_proto_stack.py | 606 | ||||
-rw-r--r-- | perfprofd/scripts/perf_proto_stack_sqlite_flame.py | 234 | ||||
-rw-r--r-- | perfprofd/scripts/sorted_collection.py | 146 | ||||
-rw-r--r-- | perfprofd/tests/perfprofd_test.cc | 8 |
9 files changed, 1144 insertions, 130 deletions
diff --git a/perfprofd/binder_interface/aidl/android/os/IPerfProfd.aidl b/perfprofd/binder_interface/aidl/android/os/IPerfProfd.aidl index f022dbf4..e2628c71 100644 --- a/perfprofd/binder_interface/aidl/android/os/IPerfProfd.aidl +++ b/perfprofd/binder_interface/aidl/android/os/IPerfProfd.aidl @@ -21,8 +21,18 @@ interface IPerfProfd { /** * Start continuous profiling with the given parameters. */ - void startProfiling(int profilingDuration, int profilingInterval, - int iterations); + void startProfiling(int collectionInterval, int iterations, + int process, int samplingPeriod, int samplingFrequency, + int sampleDuration, boolean stackProfile, + boolean useElfSymbolizer, boolean sendToDropbox); + + /** + * Start continuous profiling with the given encoded parameters. + * Parameters should be encoded in the ConfigReader syntax, + * separated by colons. + */ + void startProfilingString(String config); + /** * Start profiling with the parameters in the given protobuf. */ diff --git a/perfprofd/binder_interface/perfprofd_binder.cc b/perfprofd/binder_interface/perfprofd_binder.cc index 53394400..e4672c34 100644 --- a/perfprofd/binder_interface/perfprofd_binder.cc +++ b/perfprofd/binder_interface/perfprofd_binder.cc @@ -30,6 +30,7 @@ #include <android-base/logging.h> #include <android-base/stringprintf.h> +#include <android-base/strings.h> #include <binder/BinderService.h> #include <binder/IResultReceiver.h> #include <binder/Status.h> @@ -66,9 +67,16 @@ class PerfProfdNativeService : public BinderService<PerfProfdNativeService>, status_t dump(int fd, const Vector<String16> &args) override; - Status startProfiling(int32_t profilingDuration, - int32_t profilingInterval, - int32_t iterations) override; + Status startProfiling(int32_t collectionInterval, + int32_t iterations, + int32_t process, + int32_t samplingPeriod, + int32_t samplingFrequency, + int32_t sampleDuration, + bool stackProfile, + bool useElfSymbolizer, + bool sendToDropbox) override; + Status startProfilingString(const String16& config) override; Status startProfilingProtobuf(const std::vector<uint8_t>& config_proto) override; Status stopProfiling() override; @@ -105,15 +113,39 @@ status_t PerfProfdNativeService::dump(int fd, const Vector<String16> &args) { return NO_ERROR; } -Status PerfProfdNativeService::startProfiling(int32_t profilingDuration, - int32_t profilingInterval, - int32_t iterations) { +Status PerfProfdNativeService::startProfiling(int32_t collectionInterval, + int32_t iterations, + int32_t process, + int32_t samplingPeriod, + int32_t samplingFrequency, + int32_t sampleDuration, + bool stackProfile, + bool useElfSymbolizer, + bool sendToDropbox) { auto config_fn = [&](ThreadedConfig& config) { config = ThreadedConfig(); // Reset to a default config. - config.sample_duration_in_s = static_cast<uint32_t>(profilingDuration); - config.collection_interval_in_s = static_cast<uint32_t>(profilingInterval); - config.main_loop_iterations = static_cast<uint32_t>(iterations); + if (collectionInterval >= 0) { + config.collection_interval_in_s = collectionInterval; + } + if (iterations >= 0) { + config.main_loop_iterations = iterations; + } + if (process >= 0) { + config.process = process; + } + if (samplingPeriod > 0) { + config.sampling_period = samplingPeriod; + } + if (samplingFrequency > 0) { + config.sampling_frequency = samplingFrequency; + } + if (sampleDuration > 0) { + config.sample_duration_in_s = sampleDuration; + } + config.stack_profile = stackProfile; + config.use_elf_symbolizer = useElfSymbolizer; + config.send_to_dropbox = sendToDropbox; }; std::string error_msg; if (!StartProfiling(config_fn, &error_msg)) { @@ -121,6 +153,26 @@ Status PerfProfdNativeService::startProfiling(int32_t profilingDuration, } return Status::ok(); } +Status PerfProfdNativeService::startProfilingString(const String16& config) { + ConfigReader reader; + std::string error_msg; + // Split configuration along colon. + std::vector<std::string> args = base::Split(String8(config).string(), ":"); + for (auto& arg : args) { + if (!reader.Read(arg, /* fail_on_error */ true)) { + error_msg = base::StringPrintf("Could not parse %s", arg.c_str()); + return Status::fromExceptionCode(1, error_msg.c_str()); + } + } + auto config_fn = [&](ThreadedConfig& config) { + config = ThreadedConfig(); // Reset to a default config. + reader.FillConfig(&config); + }; + if (!StartProfiling(config_fn, &error_msg)) { + return Status::fromExceptionCode(1, error_msg.c_str()); + } + return Status::ok(); +} Status PerfProfdNativeService::startProfilingProtobuf(const std::vector<uint8_t>& config_proto) { auto proto_loader_fn = [&config_proto](ProfilingConfig& proto_config) { return proto_config.ParseFromArray(config_proto.data(), config_proto.size()); @@ -307,7 +359,8 @@ status_t PerfProfdNativeService::onTransact(uint32_t _aidl_code, } default: - return BBinder::onTransact(_aidl_code, _aidl_data, _aidl_reply, _aidl_flags); + return ::android::os::BnPerfProfd::onTransact( + _aidl_code, _aidl_data, _aidl_reply, _aidl_flags); } } diff --git a/perfprofd/configreader.cc b/perfprofd/configreader.cc index 5d52b26e..def3f18f 100644 --- a/perfprofd/configreader.cc +++ b/perfprofd/configreader.cc @@ -70,7 +70,7 @@ void ConfigReader::addDefaultEntries() // set to 100, then over time we want to see a perf profile // collected every 100 seconds). The actual time within the interval // for the collection is chosen randomly. - addUnsignedEntry("collection_interval", config.collection_interval_in_s, 1, UINT32_MAX); + addUnsignedEntry("collection_interval", config.collection_interval_in_s, 0, UINT32_MAX); // Use the specified fixed seed for random number generation (unit // testing) @@ -142,6 +142,10 @@ void ConfigReader::addDefaultEntries() // If true, send the proto to dropbox instead of to a file. addUnsignedEntry("dropbox", config.send_to_dropbox ? 1 : 0, 0, 1); + + // The pid of the process to profile. May be negative, in which case + // the whole system will be profiled. + addUnsignedEntry("process", static_cast<uint32_t>(-1), 0, UINT32_MAX); } void ConfigReader::addUnsignedEntry(const char *key, @@ -333,6 +337,7 @@ void ConfigReader::FillConfig(Config* config) { config->perf_path = getStringValue("perf_path"); config->sampling_period = getUnsignedValue("sampling_period"); + config->sampling_frequency = getUnsignedValue("sampling_frequency"); config->sample_duration_in_s = getUnsignedValue("sample_duration"); @@ -352,7 +357,7 @@ void ConfigReader::FillConfig(Config* config) { config->collect_booting = getBoolValue("collect_booting"); config->collect_camera_active = getBoolValue("collect_camera_active"); - config->process = -1; + config->process = static_cast<int32_t>(getUnsignedValue("process")); config->use_elf_symbolizer = getBoolValue("use_elf_symbolizer"); config->compress = getBoolValue("compress"); config->send_to_dropbox = getBoolValue("dropbox"); diff --git a/perfprofd/perfprofdcore.cc b/perfprofd/perfprofdcore.cc index d7b0e9b4..73332a3f 100644 --- a/perfprofd/perfprofdcore.cc +++ b/perfprofd/perfprofdcore.cc @@ -36,6 +36,7 @@ #include <android-base/file.h> #include <android-base/logging.h> #include <android-base/macros.h> +#include <android-base/scopeguard.h> #include <android-base/stringprintf.h> #ifdef __BIONIC__ @@ -456,7 +457,6 @@ PROFILE_RESULT encode_to_proto(const std::string &data_file_path, // static PROFILE_RESULT invoke_perf(Config& config, const std::string &perf_path, - unsigned sampling_period, const char *stack_profile_opt, unsigned duration, const std::string &data_file_path, @@ -481,7 +481,7 @@ static PROFILE_RESULT invoke_perf(Config& config, } // marshall arguments - constexpr unsigned max_args = 15; + constexpr unsigned max_args = 17; const char *argv[max_args]; unsigned slot = 0; argv[slot++] = perf_path.c_str(); @@ -495,17 +495,20 @@ static PROFILE_RESULT invoke_perf(Config& config, std::string p_str; if (config.sampling_frequency > 0) { argv[slot++] = "-f"; - p_str = android::base::StringPrintf("%u", sampling_period); + p_str = android::base::StringPrintf("%u", config.sampling_frequency); argv[slot++] = p_str.c_str(); } else if (config.sampling_period > 0) { argv[slot++] = "-c"; - p_str = android::base::StringPrintf("%u", sampling_period); + p_str = android::base::StringPrintf("%u", config.sampling_period); argv[slot++] = p_str.c_str(); } // -g if desired - if (stack_profile_opt) + if (stack_profile_opt) { argv[slot++] = stack_profile_opt; + argv[slot++] = "-m"; + argv[slot++] = "8192"; + } std::string pid_str; if (config.process < 0) { @@ -651,17 +654,18 @@ static ProtoUniquePtr collect_profile(Config& config) bool take_action = (hardwire && duration <= max_duration); HardwireCpuHelper helper(take_action); + auto scope_guard = android::base::make_scope_guard( + [&data_file_path]() { unlink(data_file_path.c_str()); }); + // // Invoke perf // const char *stack_profile_opt = (config.stack_profile ? "-g" : nullptr); const std::string& perf_path = config.perf_path; - uint32_t period = config.sampling_period; PROFILE_RESULT ret = invoke_perf(config, perf_path.c_str(), - period, stack_profile_opt, duration, data_file_path, diff --git a/perfprofd/scripts/perf_proto_json2sqlite.py b/perfprofd/scripts/perf_proto_json2sqlite.py new file mode 100644 index 00000000..5725424d --- /dev/null +++ b/perfprofd/scripts/perf_proto_json2sqlite.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import itertools +import json +import sqlite3 + +class SqliteWriter(object): + def __init__(self): + self.sample_count = 0 + self.dso_map = {} + self.pid_map = {} + self.tid_map = {} + self.symbol_map = {} + + def open(self, out): + self._conn = sqlite3.connect(out) + self._c = self._conn.cursor() + # Ensure tables exist + # The sample replicates pid and tid. + try: + self._c.execute('''CREATE TABLE pids (id integer PRIMARY KEY AUTOINCREMENT, + name text)''') + self._c.execute('''CREATE TABLE tids (id integer PRIMARY KEY AUTOINCREMENT, + name text)''') + self._c.execute('''CREATE TABLE syms (id integer PRIMARY KEY AUTOINCREMENT, + name text)''') + self._c.execute('''CREATE TABLE dsos (id integer PRIMARY KEY AUTOINCREMENT, + name text)''') + self._c.execute('''CREATE TABLE samples (id integer PRIMARY KEY AUTOINCREMENT, + pid_id int not null, + tid_id int not null) + ''') + self._c.execute('''CREATE TABLE stacks (sample_id int not null, + depth int not null, + dso_id int not null, + sym_id int not null, + offset int not null, + primary key (sample_id, depth)) + ''') + except sqlite3.OperationalError: + pass # ignore + + def close(self): + self._conn.commit() + self._conn.close() + + def insert_into_tmp_or_get(self, name, table_dict, table_dict_tmp): + if name in table_dict: + return table_dict[name] + if name in table_dict_tmp: + return table_dict_tmp[name] + index = len(table_dict) + len(table_dict_tmp) + table_dict_tmp[name] = index + return index + + def prepare(self): + self.dso_tmp_map = {} + self.pid_tmp_map = {} + self.tid_tmp_map = {} + self.symbol_tmp_map = {} + self.samples_tmp_list = [] + self.stacks_tmp_list = [] + + def write_sqlite_index_table(self, table_dict, table_name): + for key, value in table_dict.iteritems(): + self._c.execute("insert into {tn} values (?,?)".format(tn=table_name), (value,key)) + + def flush(self): + self.write_sqlite_index_table(self.pid_tmp_map, 'pids') + self.write_sqlite_index_table(self.tid_tmp_map, 'tids') + self.write_sqlite_index_table(self.dso_tmp_map, 'dsos') + self.write_sqlite_index_table(self.symbol_tmp_map, 'syms') + + for sample in self.samples_tmp_list: + self._c.execute("insert into samples values (?,?,?)", sample) + for stack in self.stacks_tmp_list: + self._c.execute("insert into stacks values (?,?,?,?,?)", stack) + + self.pid_map.update(self.pid_tmp_map) + self.tid_map.update(self.tid_tmp_map) + self.dso_map.update(self.dso_tmp_map) + self.symbol_map.update(self.symbol_tmp_map) + + self.dso_tmp_map = {} + self.pid_tmp_map = {} + self.tid_tmp_map = {} + self.symbol_tmp_map = {} + self.samples_tmp_list = [] + self.stacks_tmp_list = [] + + def add_sample(self, sample, tid_name_map): + sample_id = self.sample_count + self.sample_count = self.sample_count + 1 + + def get_name(pid, name_map): + if pid in name_map: + return name_map[pid] + pid_str = str(pid) + if pid_str in name_map: + return name_map[pid_str] + if pid == 0: + return "[kernel]" + return "[unknown]" + + pid_name = get_name(sample[0], tid_name_map) + pid_id = self.insert_into_tmp_or_get(pid_name, self.pid_map, self.pid_tmp_map) + tid_name = get_name(sample[1], tid_name_map) + tid_id = self.insert_into_tmp_or_get(tid_name, self.tid_map, self.tid_tmp_map) + + self.samples_tmp_list.append((sample_id, pid_id, tid_id)) + + stack_depth = 0 + for entry in sample[2]: + sym_id = self.insert_into_tmp_or_get(entry[0], self.symbol_map, self.symbol_tmp_map) + dso = entry[2] + if dso is None: + dso = "None" + dso_id = self.insert_into_tmp_or_get(dso, self.dso_map, self.dso_tmp_map) + + self.stacks_tmp_list.append((sample_id, stack_depth, dso_id, sym_id, entry[1])) + + stack_depth = stack_depth + 1 + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='''Process a set of perfprofd JSON files produced + by perf_proto_stack.py into SQLite database''') + + parser.add_argument('file', help='JSON files to parse and combine', metavar='file', nargs='+') + + parser.add_argument('--sqlite-out', help='SQLite database output', type=str, + default='sqlite.db') + + args = parser.parse_args() + if args is not None: + sql_out = SqliteWriter() + sql_out.open(args.sqlite_out) + sql_out.prepare() + + for f in args.file: + print 'Processing %s' % (f) + fp = open(f, 'r') + data = json.load(fp) + fp.close() + + for sample in data['samples']: + sql_out.add_sample(sample, data['names']) + + sql_out.flush() + + sql_out.close() diff --git a/perfprofd/scripts/perf_proto_stack.py b/perfprofd/scripts/perf_proto_stack.py index 03693589..eb93eb6e 100644 --- a/perfprofd/scripts/perf_proto_stack.py +++ b/perfprofd/scripts/perf_proto_stack.py @@ -17,121 +17,519 @@ # Super simplistic printer of a perfprofd output proto. Illustrates # how to parse and traverse a perfprofd output proto in Python. +# This relies on libunwindstack's unwind_symbol. Build with +# mmma system/core/libunwindstack + +import argparse +from datetime import datetime +import itertools +import json + +import logging +logging.basicConfig(format = "%(message)s") + +from multiprocessing.dummy import Pool as ThreadPool +import os.path +from sorted_collection import SortedCollection +import subprocess +from threading import Timer + + # Generate with: -# aprotoc -I=system/extras/perfprofd --python_out=system/extras/perfprofd/scripts \ -# system/extras/perfprofd/perf_profile.proto -import perf_profile_pb2 +# aprotoc -I=external/perf_data_converter/src/quipper \ +# --python_out=system/extras/perfprofd/scripts \ +# external/perf_data_converter/src/quipper/perf_data.proto +# aprotoc -I=external/perf_data_converter/src/quipper -I=system/extras/perfprofd \ +# --python_out=system/extras/perfprofd/scripts \ +# system/extras/perfprofd/perfprofd_record.proto +import perfprofd_record_pb2 # Make sure that symbol is on the PYTHONPATH, e.g., run as # PYTHONPATH=$PYTHONPATH:$ANDROID_BUILD_TOP/development/scripts python ... import symbol +from symbol import SymbolInformation # This is wrong. But then the symbol module is a bad quagmire. +# TODO: Check build IDs. symbol.SetAbi(["ABI: 'arm64'"]) -print "Reading symbols from", symbol.SYMBOLS_DIR - -# TODO: accept argument for parsing. -file = open('perf.data.encoded.0', 'rb') -data = file.read() - -profile = perf_profile_pb2.AndroidPerfProfile() -profile.ParseFromString(data) - -print "Total samples: ", profile.total_samples - -module_list = profile.load_modules - -counters = {} - -def indent(txt, stops = 1): - return '\n'.join(' ' * stops + line for line in txt.splitlines()) - - -def print_samples(module_list, programs, process_names, counters): - print 'Samples:' - for program in programs: - process_name = '?' - if program.HasField('process_name_id'): - process_name = process_names[program.process_name_id] - print indent('%s (%s)' % (program.name, process_name), 1) - for module in program.modules: - if module.HasField('load_module_id'): - module_descr = module_list[module.load_module_id] - print indent(module_descr.name, 2) - has_build_id = module_descr.HasField('build_id') - if has_build_id: - print indent('Build ID: %s' % (module_descr.build_id), 3) - for addr in module.address_samples: - # TODO: Stacks vs single samples. - addr_rel = addr.address[0] - addr_rel_hex = "%x" % addr_rel - print indent('%d %s' % (addr.count, addr_rel_hex), 3) - if module_descr.name != '[kernel.kallsyms]': - if has_build_id: - info = symbol.SymbolInformation(module_descr.name, addr_rel_hex) - # As-is, only info[0] (inner-most inlined function) is recognized. - (source_symbol, source_location, object_symbol_with_offset) = info[0] - if object_symbol_with_offset is not None: - print indent(object_symbol_with_offset, 4) - if source_symbol is not None: - for (sym_inlined, loc_inlined, _) in info: - # TODO: Figure out what's going on here: - if sym_inlined is not None: - print indent(sym_inlined, 5) - else: - print indent('???', 5) - if loc_inlined is not None: - print ' %s' % (indent(loc_inlined, 5)) - elif module_descr.symbol and (addr_rel & 0x8000000000000000 != 0): - index = 0xffffffffffffffff - addr_rel - source_symbol = module_descr.symbol[index] - print indent(source_symbol, 4) - counters_key = None - if source_symbol is not None: - counters_key = (module_descr.name, source_symbol) - else: - counters_key = (module_descr.name, addr_rel_hex) - if counters_key in counters: - counters[counters_key] = counters[counters_key] + addr.count - else: - counters[counters_key] = addr.count + +class MmapState(object): + def __init__(self): + self._list = SortedCollection((), lambda x : x[0]) + + def add_map(self, start, length, pgoff, name): + tuple = (start, length, pgoff, name) + self._list.insert(tuple) + + def find(self, addr): + try: + tuple = self._list.find_le(addr) + if addr < tuple[0] + tuple[1]: + return tuple + return None + except ValueError: + return None + + def copy(self): + ret = MmapState() + ret._list = self._list.copy() + return ret + + def __str__(self): + return "MmapState: " + self._list.__str__() + def __repr__(self): + return self.__str__() + +class SymbolMap(object): + def __init__(self, min_v): + self._list = SortedCollection((), lambda x : x[0]) + self._min_vaddr = min_v + + def add_symbol(self, start, length, name): + tuple = (start, length, name) + self._list.insert(tuple) + + def find(self, addr): + try: + tuple = self._list.find_le(addr) + if addr < tuple[0] + tuple[1]: + return tuple[2] + return None + except ValueError: + return None + + def copy(self): + ret = SymbolMap() + ret._list = self._list.copy() + return ret + + def __str__(self): + return "SymbolMap: " + self._list.__str__() + def __repr__(self): + return self.__str__() + +def intern_uni(u): + return intern(u.encode('ascii', 'replace')) + +def collect_tid_names(perf_data): + tid_name_map = {} + for event in perf_data.events: + if event.HasField('comm_event'): + tid_name_map[event.comm_event.tid] = intern_uni(event.comm_event.comm) + return tid_name_map + +def create_symbol_maps(profile): + symbol_maps = {} + for si in profile.symbol_info: + map = SymbolMap(si.min_vaddr) + symbol_maps[si.filename] = map + for sym in si.symbols: + map.add_symbol(sym.addr, sym.size, intern_uni(sym.name)) + return symbol_maps + +def update_mmap_states(event, state_map): + if event.HasField('mmap_event'): + mmap_event = event.mmap_event + # Skip kernel stuff. + if mmap_event.tid == 0: + return + # Create new map, if necessary. + if not mmap_event.pid in state_map: + state_map[mmap_event.pid] = MmapState() + state_map[mmap_event.pid].add_map(mmap_event.start, mmap_event.len, mmap_event.pgoff, + intern_uni(mmap_event.filename)) + elif event.HasField('fork_event'): + fork_event = event.fork_event + # Skip threads + if fork_event.pid == fork_event.ppid: + return + if fork_event.ppid not in state_map: + logging.warn("fork from %d without map", fork_event.ppid) + return + state_map[fork_event.pid] = state_map[fork_event.ppid].copy() + +skip_dso = set() +vaddr = {} + +def find_vaddr(vaddr_map, filename): + if filename in vaddr_map: + return vaddr_map[filename] + + path = "%s/%s" % (symbol.SYMBOLS_DIR, filename) + if not os.path.isfile(path): + logging.warn('Cannot find %s for min_vaddr', filename) + vaddr_map[filename] = 0 + return 0 + + try: + # Use "-W" to have single-line format. + res = subprocess.check_output(['readelf', '-lW', path]) + lines = res.split("\n") + reading_headers = False + min_vaddr = None + min_fn = lambda x, y: y if x is None else min(x, y) + # Using counting loop for access to next line. + for i in range(0, len(lines) - 1): + line = lines[i].strip() + if reading_headers: + if line == "": + # Block is done, won't find anything else. + break + if line.startswith("LOAD"): + # Look at the current line to distinguish 32-bit from 64-bit + line_split = line.split() + if len(line_split) >= 8: + if " R E " in line: + # Found something expected. So parse VirtAddr. + try: + min_vaddr = min_fn(min_vaddr, int(line_split[2], 0)) + except ValueError: + pass + else: + logging.warn('Could not parse readelf line %s', line) + else: + if line.strip() == "Program Headers:": + reading_headers = True + + if min_vaddr is None: + min_vaddr = 0 + logging.debug("min_vaddr for %s is %d", filename, min_vaddr) + vaddr_map[filename] = min_vaddr + except subprocess.CalledProcessError: + logging.warn('Error finding min_vaddr for %s', filename) + vaddr_map[filename] = 0 + return vaddr_map[filename] + +unwind_symbols_cache = {} +unwind_symbols_warn_missing_cache = set() +def run_unwind_symbols(filename, offset_hex): + path = "%s/%s" % (symbol.SYMBOLS_DIR, filename) + if not os.path.isfile(path): + if path not in unwind_symbols_warn_missing_cache: + logging.warn('Cannot find %s for unwind_symbols', filename) + unwind_symbols_warn_missing_cache.add(path) + return None + + if (path, offset_hex) in unwind_symbols_cache: + pair = unwind_symbols_cache[(path, offset_hex)] + if pair is None: + return None + return [(pair[0], pair[1], filename)] + + try: + res = subprocess.check_output(['unwind_symbols', path, offset_hex]) + lines = res.split("\n") + for line in lines: + if line.startswith('<0x'): + parts = line.split(' ', 1) + if len(parts) == 2: + # Get offset, too. + offset = 0 + plus_index = parts[0].find('>+') + if plus_index > 0: + offset_str = parts[0][plus_index + 2:-1] + try: + offset = int(offset_str) + except ValueError: + logging.warn('error parsing offset from %s', parts[0]) + + # TODO C++ demangling necessary. + logging.debug('unwind_symbols: %s %s -> %s +%d', filename, offset_hex, parts[1], + offset) + sym = intern(parts[1]) + unwind_symbols_cache[(path, offset_hex)] = (sym, offset) + return [(sym, offset, filename)] + except subprocess.CalledProcessError: + logging.warn('Failed running unwind_symbols for %s', filename) + unwind_symbols_cache[(path, offset_hex)] = None + return None + + +def decode_with_symbol_lib(name, addr_rel_hex): + info = SymbolInformation(name, addr_rel_hex) + # As-is, only info[0] (inner-most inlined function) is recognized. + (source_symbol, source_location, object_symbol_with_offset) = info[0] + + def parse_symbol_lib_output(s): + i = s.rfind('+') + if i > 0: + try: + off = int(s[i+1:]) + return (s[0:i], off) + except ValueError: + pass + return (s, 0) + + ret = [] + + if object_symbol_with_offset is not None: + pair = parse_symbol_lib_output(object_symbol_with_offset) + ret.append((intern(pair[0]), pair[1], name)) + if source_symbol is not None: + iterinfo = iter(info) + next(iterinfo) + for (sym_inlined, loc_inlined, _) in iterinfo: + # TODO: Figure out what's going on here: + if sym_inlined is not None: + pair = parse_symbol_lib_output(sym_inlined) + ret.insert(0, (intern(pair[0]), pair[1], name)) + if len(ret) > 0: + return ret + return None + +def decode_addr(addr, mmap_state, device_symbols): + """Try to decode the given address against the current mmap table and device symbols. + + First, look up the address in the mmap state. If none is found, use a simple address + heuristic to guess kernel frames on 64-bit devices. + + Next, check on-device symbolization for a hit. + + Last, try to symbolize against host information. First try the symbol module. However, + as it is based on addr2line, it will not work for pure-gnu_debugdata DSOs (e.g., ART + preopt artifacts). For that case, use libunwindstack's unwind_symbols. + """ + + map = mmap_state.find(addr) + if map is None: + # If it looks large enough, assume it's from + # the kernel. + if addr > 18000000000000000000: + return [("[kernel]", 0, "[kernel]")] + return [("%d (no mapped segment)" % addr, 0, None)] + name = map[3] + logging.debug('%d is %s (%d +%d)', addr, name, map[0], map[1]) + + # Once relocation packer is off, it would be: + # offset = addr - map.start + map.pgoff + # Right now it is + # offset = addr - map.start (+ min_vaddr) + # Note that on-device symbolization doesn't include min_vaddr but + # does include pgoff. + offset = addr - map[0] + + if name in device_symbols: + offset = offset + map[2] + symbol = device_symbols[name].find(offset) + if symbol is None: + return [("%s (missing on-device symbol)" % (name), offset, name)] + else: + # TODO: Should we change the format? + return [(symbol, 0, name)] + offset = offset + find_vaddr(vaddr, name) + if (name, offset) in skip_dso: + # We already failed, skip symbol finding. + return [(name, offset, name)] + else: + addr_rel_hex = intern("%x" % offset) + ret = decode_with_symbol_lib(name, addr_rel_hex) + if ret is not None and len(ret) != 0: + # Addr2line may report oatexec+xyz. Let unwind_symbols take care of that. + if len(ret) != 1 or ret[0][0] != 'oatexec': + logging.debug('Got result from symbol module: %s', str(ret)) + return ret + # Try unwind_symbols + ret = run_unwind_symbols(name, addr_rel_hex) + if ret is not None and len(ret) != 0: + return ret + logging.warn("Failed to find symbol for %s +%d (%d)", name, offset, addr) + # Remember the fail. + skip_dso.add((name, offset)) + return [(name, offset, name)] + + +def print_sample(sample, tid_name_map): + if sample[0] in tid_name_map: + pid_name = "%s (%d)" % (tid_name_map[sample[0]], sample[0]) + elif sample[0] == 0: + pid_name = "kernel (0)" + else: + pid_name = "unknown (%d)" % (sample[0]) + if sample[1] in tid_name_map: + tid_name = "%s (%d)" % (tid_name_map[sample[1]], sample[1]) + elif sample[1] == 0: + tid_name = "kernel (0)" + else: + tid_name = "unknown (%d)" % (sample[1]) + print " %s - %s:" % (pid_name, tid_name) + for sym in sample[2]: + print " %s +%d (%s)" % (sym[0], sym[1], sym[2]) + +def print_samples(samples, tid_name_map): + for sample in samples: + print_sample(sample, tid_name_map) + +def symbolize_events(perf_data, device_symbols, tid_name_map, printSamples = False, + removeKernelTop = False): + samples = [] + mmap_states = {} + for event in perf_data.events: + update_mmap_states(event, mmap_states) + if event.HasField('sample_event'): + sample_ev = event.sample_event + # Handle sample. + new_sample = None + if sample_ev.pid in mmap_states: + mmap_state = mmap_states[sample_ev.pid] + ip_sym = decode_addr(sample_ev.ip, mmap_state, device_symbols) + stack = ip_sym + for cc_ip in sample_ev.callchain: + cc_sym = decode_addr(cc_ip, mmap_state, device_symbols) + stack.extend(cc_sym) + if removeKernelTop: + while len(stack) > 1 and stack[0][0] == "[kernel]": + stack.pop(0) + new_sample = (sample_ev.pid, sample_ev.tid, stack) else: - print indent('<Missing module>', 2) + # Handle kernel symbols specially. + if sample_ev.pid == 0: + samples.append((0, sample_ev.tid, [("[kernel]", 0, "[kernel]")])) + elif sample_ev.pid in tid_name_map: + samples.append((sample_ev.pid, sample_ev.tid, [(tid_name_map[sample_ev.pid], 0, + None)])) + else: + samples.append((sample_ev.pid, sample_ev.tid, [("[unknown]", 0, None)])) + if new_sample is not None: + samples.append(new_sample) + if printSamples: + print_sample(new_sample, tid_name_map) + return samples -def print_histogram(counters, size): +def count_key_reduce_function(x, y, key_fn): + key = key_fn(y) + if key not in x: + x[key] = 0 + x[key] = x[key] + 1 + return x + +def print_histogram(samples, reduce_key_fn, label_key_fn, size): # Create a sorted list of top samples. - counter_list = [] - for key, value in counters.iteritems(): - temp = (key,value) - counter_list.append(temp) - counter_list.sort(key=lambda counter: counter[1], reverse=True) + sorted_count_list = sorted( + reduce(lambda x, y: count_key_reduce_function(x, y, reduce_key_fn), samples, {}). + iteritems(), + cmp=lambda x,y: cmp(x[1], y[1]), + reverse=True) + sorted_count_topX = list(itertools.islice(sorted_count_list, size)) # Print top-size samples. print 'Histogram top-%d:' % (size) - for i in xrange(0, min(len(counter_list), size)): - print indent('%d: %s' % (i+1, counter_list[i]), 1) - -def print_modules(module_list): - print 'Modules:' - for module in module_list: - print indent(module.name, 1) - if module.HasField('build_id'): - print indent('Build ID: %s' % (module.build_id), 2) - print indent('Symbols:', 2) - for symbol in module.symbol: - print indent(symbol, 3) - -def print_process_names(process_names): - print 'Processes:' - for proc in process_names: - print indent(proc, 1) - -if profile.HasField('process_names'): - process_names = profile.process_names.name -else: - process_names = [] - -print_samples(module_list, profile.programs, process_names, counters) -print_modules(module_list) -print_histogram(counters, 100) -print_process_names(process_names) + for i in xrange(0, len(sorted_count_topX)): + print ' %d: %s (%s)' % (i+1, label_key_fn(sorted_count_topX[i][0]), + sorted_count_topX[i][1]) + +def get_name(pid): + if pid in tid_name_map: + return tid_name_map[pid] + if pid == 0: + return "[kernel]" + return "[unknown]" + +def create_cmd(args, f): + ret = ['python', '-u', 'system/extras/perfprofd/scripts/perf_proto_stack.py'] + if args.syms is not None: + ret.extend(['--syms', args.syms[0]]) + if args.print_samples is not None: + ret.append('--print-samples') + if args.skip_kernel_syms is not None: + ret.append('--skip-kernel-syms') + if args.print_pid_histogram is not None: + ret.append('--print-pid-histogram') + if args.print_sym_histogram is not None: + ret.append('--print-sym-histogram') + if args.print_dso_histogram is not None: + ret.append('--print-dso-histogram') + ret.extend(['--json-out', '%s.json' % (f)]) + ret.append(f) + return ret + +def run_cmd(x): + args = x[0] + f = x[1] + cmd = create_cmd(args,f) + logging.warn('Running on %s', f) + success = False + logging.debug('%r', cmd) + err_out = open('%s.err' % (f), 'w') + kill = lambda process: process.kill() + start = datetime.now() + p = subprocess.Popen(cmd, stderr=err_out) + kill_timer = Timer(3600, kill, [p]) + try: + kill_timer.start() + stdout, stderr = p.communicate() + success = True + finally: + kill_timer.cancel() + err_out.close() + end = datetime.now() + logging.warn('Ended %s (%s)', f, str(end-start)) + return '%s: %r' % (f, success) + +def parallel_runner(args): + pool = ThreadPool(args.parallel) + map_args = map(lambda f: (args, f), args.file) + result = pool.map(run_cmd, map_args) + pool.close() + pool.join() + print result + +def run(args): + if args.syms is not None: + symbol.SYMBOLS_DIR = args.syms[0] + print_symbols = args.print_samples is not None + skip_kernel_syms = args.skip_kernel_syms is not None + + # TODO: accept argument for parsing. + file = open(args.file[0], 'rb') + data = file.read() + file.close() + + profile = perfprofd_record_pb2.PerfprofdRecord() + profile.ParseFromString(data) + + perf_data = profile.perf_data + + print "Stats: ", perf_data.stats + + tid_name_map = collect_tid_names(perf_data) + symbol_maps = create_symbol_maps(profile) + + samples = symbolize_events(perf_data, symbol_maps, tid_name_map, printSamples=print_symbols, + removeKernelTop=skip_kernel_syms) + + if args.print_pid_histogram is not None: + print_histogram(samples, lambda x: x[0], lambda x: get_name(x), 25) + if args.print_sym_histogram is not None: + print_histogram(samples, lambda x: x[2][0][0], lambda x: x, 100) + if args.print_dso_histogram is not None: + print_histogram(samples, lambda x: x[2][0][2], lambda x: x, 25) + + if args.json_out is not None: + json_file = open(args.json_out[0], 'w') + json_data = { 'samples': samples, 'names': tid_name_map } + json.dump(json_data, json_file) + json_file.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Process a perfprofd record.') + + parser.add_argument('file', help='proto file to parse', metavar='file', nargs='+') + parser.add_argument('--syms', help='directory for symbols', nargs=1) + parser.add_argument('--json-out', help='output file for JSON', nargs=1) + parser.add_argument('--print-samples', help='print samples', action='store_const', const=True) + parser.add_argument('--skip-kernel-syms', help='skip kernel symbols at the top of stack', + action='store_const', const=True) + parser.add_argument('--print-pid-histogram', help='print a top-25 histogram of processes', + action='store_const', const=True) + parser.add_argument('--print-sym-histogram', help='print a top-100 histogram of symbols', + action='store_const', const=True) + parser.add_argument('--print-dso-histogram', help='print a top-25 histogram of maps', + action='store_const', const=True) + parser.add_argument('--parallel', help='run parallel jobs', type=int) + + args = parser.parse_args() + if args is not None: + if args.parallel is not None: + parallel_runner(args) + else: + run(args) diff --git a/perfprofd/scripts/perf_proto_stack_sqlite_flame.py b/perfprofd/scripts/perf_proto_stack_sqlite_flame.py new file mode 100644 index 00000000..756062f1 --- /dev/null +++ b/perfprofd/scripts/perf_proto_stack_sqlite_flame.py @@ -0,0 +1,234 @@ +#!/usr/bin/python +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make sure that simpleperf's inferno is on the PYTHONPATH, e.g., run as +# PYTHONPATH=$PYTHONPATH:$ANDROID_BUILD_TOP/system/extras/simpleperf/scripts/inferno python .. + +import argparse +import itertools +import sqlite3 + +class Callsite(object): + def __init__(self, dso_id, sym_id): + self.dso_id = dso_id + self.sym_id = sym_id + self.count = 0 + self.child_map = {} + self.id = self._get_next_callsite_id() + + def add(self, dso_id, sym_id): + if (dso_id, sym_id) in self.child_map: + return self.child_map[(dso_id, sym_id)] + new_callsite = Callsite(dso_id, sym_id) + self.child_map[(dso_id, sym_id)] = new_callsite + return new_callsite + + def child_count_to_self(self): + self.count = reduce(lambda x, y: x + y[1].count, self.child_map.iteritems(), 0) + + def trim(self, local_threshold_in_percent, global_threshold): + local_threshold = local_threshold_in_percent * 0.01 * self.count + threshold = max(local_threshold, global_threshold) + for k, v in self.child_map.items(): + if v.count < threshold: + del self.child_map[k] + for _, v in self.child_map.iteritems(): + v.trim(local_threshold_in_percent, global_threshold) + + def _get_str(self, id, m): + if id in m: + return m[id] + return str(id) + + def print_callsite_ascii(self, depth, indent, dsos, syms): + + print ' ' * indent + "%s (%s) [%d]" % (self._get_str(self.sym_id, syms), + self._get_str(self.dso_id, dsos), + self.count) + if depth == 0: + return + for v in sorted(self.child_map.itervalues, key=lambda x: x.count, reverse=True): + v.print_callsite_ascii(depth - 1, indent + 1, dsos, syms) + + # Functions for flamegraph compatibility. + + callsite_counter = 0 + @classmethod + def _get_next_callsite_id(cls): + cls.callsite_counter += 1 + return cls.callsite_counter + + def create_children_list(self): + self.children = sorted(self.child_map.itervalues(), key=lambda x: x.count, reverse=True) + + def generate_offset(self, start_offset): + self.offset = start_offset + child_offset = start_offset + for child in self.children: + child_offset = child.generate_offset(child_offset) + return self.offset + self.count + + def svgrenderer_compat(self, dsos, syms): + self.create_children_list() + self.method = self._get_str(self.sym_id, syms) + self.dso = self._get_str(self.dso_id, dsos) + self.offset = 0 + for c in self.children: + c.svgrenderer_compat(dsos, syms) + + def weight(self): + return float(self.count) + + def get_max_depth(self): + if self.child_map: + return max([c.get_max_depth() for c in self.child_map.itervalues()]) + 1 + return 1 + +class SqliteReader(object): + def __init__(self): + self.root = Callsite("root", "root") + self.dsos = {} + self.syms = {} + + def open(self, f): + self._conn = sqlite3.connect(f) + self._c = self._conn.cursor() + + def close(self): + self._conn.close() + + def read(self, local_threshold_in_percent, global_threshold_in_percent, limit): + # Read aux tables first, as we need to find the kernel symbols. + def read_table(name, dest_table): + self._c.execute('select id, name from %s' % (name)) + while True: + rows = self._c.fetchmany(100) + if not rows: + break + for row in rows: + dest_table[row[0]] = row[1] + + print 'Reading DSOs' + read_table('dsos', self.dsos) + + print 'Reading symbol strings' + read_table('syms', self.syms) + + kernel_sym_id = None + for i, v in self.syms.iteritems(): + if v == '[kernel]': + kernel_sym_id = i + break + + print 'Reading samples' + self._c.execute('''select sample_id, depth, dso_id, sym_id from stacks + order by sample_id asc, depth desc''') + + last_sample_id = None + chain = None + count = 0 + while True: + rows = self._c.fetchmany(100) + + if not rows: + break + for row in rows: + if row[3] == kernel_sym_id and row[1] == 0: + # Skip kernel. + continue + if row[0] != last_sample_id: + last_sample_id = row[0] + chain = self.root + chain = chain.add(row[2], row[3]) + chain.count = chain.count + 1 + + count = count + len(rows) + if limit is not None and count >= limit: + print 'Breaking as limit is reached' + break + + self.root.child_count_to_self() + global_threshold = global_threshold_in_percent * 0.01 * self.root.count + self.root.trim(local_threshold_in_percent, global_threshold) + + def print_data_ascii(self, depth): + self.root.print_callsite_ascii(depth, 0, self.dsos, self.syms) + + def print_svg(self, filename, depth): + from svg_renderer import renderSVG + self.root.svgrenderer_compat(self.dsos, self.syms) + self.root.generate_offset(0) + f = open(filename, 'w') + f.write(''' +<html> +<body> +<div id='flamegraph_id' style='font-family: Monospace;'> +<style type="text/css"> .s { stroke:black; stroke-width:0.5; cursor:pointer;} </style> +<style type="text/css"> .t:hover { cursor:pointer; } </style> +''') + + class FakeProcess: + def __init__(self): + self.props = { 'trace_offcpu': False } + fake_process = FakeProcess() + renderSVG(fake_process, self.root, f, 'hot') + + f.write(''' +</div> +''') + + # Emit script.js, if we can find it. + import os.path + import sys + script_js_rel = "../../simpleperf/scripts/inferno/script.js" + script_js = os.path.join(os.path.dirname(__file__), script_js_rel) + if os.path.exists(script_js): + f.write('<script>\n') + with open(script_js, 'r') as script_f: + f.write(script_f.read()) + f.write(''' +</script> +<br/><br/> +<div>Navigate with WASD, zoom in with SPACE, zoom out with BACKSPACE.</div> +<script>document.addEventListener('DOMContentLoaded', flamegraphInit);</script> +</body> +</html> +''') + f.close() + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='''Translate a perfprofd database into a flame + representation''') + + parser.add_argument('file', help='the sqlite database to use', metavar='file', type=str) + + parser.add_argument('--html-out', help='output file for HTML flame graph', type=str) + parser.add_argument('--threshold', help='child threshold in percent', type=float, default=5) + parser.add_argument('--global-threshold', help='global threshold in percent', type=float, + default=.1) + parser.add_argument('--depth', help='depth to print to', type=int, default=10) + parser.add_argument('--limit', help='limit to given number of stack trace entries', type=int) + + args = parser.parse_args() + if args is not None: + sql_out = SqliteReader() + sql_out.open(args.file) + sql_out.read(args.threshold, args.global_threshold, args.limit) + if args.html_out is None: + sql_out.print_data_ascii(args.depth) + else: + sql_out.print_svg(args.html_out, args.depth) + sql_out.close() diff --git a/perfprofd/scripts/sorted_collection.py b/perfprofd/scripts/sorted_collection.py new file mode 100644 index 00000000..315f7c89 --- /dev/null +++ b/perfprofd/scripts/sorted_collection.py @@ -0,0 +1,146 @@ +# Note: Taken from https://code.activestate.com/recipes/577197-sortedcollection/. +# +# Copyright 2010 Raymond Hettinger +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in the +# Software without restriction, including without limitation the rights to use, copy, +# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the +# following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +from bisect import bisect_left, bisect_right + +class SortedCollection(object): + def __init__(self, iterable=(), key=None): + self._given_key = key + key = (lambda x: x) if key is None else key + decorated = sorted((key(item), item) for item in iterable) + self._keys = [k for k, item in decorated] + self._items = [item for k, item in decorated] + self._key = key + + def _getkey(self): + return self._key + + def _setkey(self, key): + if key is not self._key: + self.__init__(self._items, key=key) + + def _delkey(self): + self._setkey(None) + + key = property(_getkey, _setkey, _delkey, 'key function') + + def clear(self): + self.__init__([], self._key) + + def copy(self): + return self.__class__(self, self._key) + + def __len__(self): + return len(self._items) + + def __getitem__(self, i): + return self._items[i] + + def __iter__(self): + return iter(self._items) + + def __reversed__(self): + return reversed(self._items) + + def __repr__(self): + return '%s(%r, key=%s)' % ( + self.__class__.__name__, + self._items, + getattr(self._given_key, '__name__', repr(self._given_key)) + ) + + def __reduce__(self): + return self.__class__, (self._items, self._given_key) + + def __contains__(self, item): + k = self._key(item) + i = bisect_left(self._keys, k) + j = bisect_right(self._keys, k) + return item in self._items[i:j] + + def index(self, item): + 'Find the position of an item. Raise ValueError if not found.' + k = self._key(item) + i = bisect_left(self._keys, k) + j = bisect_right(self._keys, k) + return self._items[i:j].index(item) + i + + def count(self, item): + 'Return number of occurrences of item' + k = self._key(item) + i = bisect_left(self._keys, k) + j = bisect_right(self._keys, k) + return self._items[i:j].count(item) + + def insert(self, item): + 'Insert a new item. If equal keys are found, add to the left' + k = self._key(item) + i = bisect_left(self._keys, k) + self._keys.insert(i, k) + self._items.insert(i, item) + + def insert_right(self, item): + 'Insert a new item. If equal keys are found, add to the right' + k = self._key(item) + i = bisect_right(self._keys, k) + self._keys.insert(i, k) + self._items.insert(i, item) + + def remove(self, item): + 'Remove first occurence of item. Raise ValueError if not found' + i = self.index(item) + del self._keys[i] + del self._items[i] + + def find(self, k): + 'Return first item with a key == k. Raise ValueError if not found.' + i = bisect_left(self._keys, k) + if i != len(self) and self._keys[i] == k: + return self._items[i] + raise ValueError('No item found with key equal to: %r' % (k,)) + + def find_le(self, k): + 'Return last item with a key <= k. Raise ValueError if not found.' + i = bisect_right(self._keys, k) + if i: + return self._items[i-1] + raise ValueError('No item found with key at or below: %r' % (k,)) + + def find_lt(self, k): + 'Return last item with a key < k. Raise ValueError if not found.' + i = bisect_left(self._keys, k) + if i: + return self._items[i-1] + raise ValueError('No item found with key below: %r' % (k,)) + + def find_ge(self, k): + 'Return first item with a key >= equal to k. Raise ValueError if not found' + i = bisect_left(self._keys, k) + if i != len(self): + return self._items[i] + raise ValueError('No item found with key at or above: %r' % (k,)) + + def find_gt(self, k): + 'Return first item with a key > k. Raise ValueError if not found' + i = bisect_right(self._keys, k) + if i != len(self): + return self._items[i] + raise ValueError('No item found with key above: %r' % (k,)) diff --git a/perfprofd/tests/perfprofd_test.cc b/perfprofd/tests/perfprofd_test.cc index 6c5c2783..a96fd710 100644 --- a/perfprofd/tests/perfprofd_test.cc +++ b/perfprofd/tests/perfprofd_test.cc @@ -613,7 +613,6 @@ TEST_F(PerfProfdTest, ConfigFileParsing) runner.addToConfig("destination_directory=/does/not/exist"); // assorted bad syntax - runner.addToConfig("collection_interval=0"); runner.addToConfig("collection_interval=-1"); runner.addToConfig("nonexistent_key=something"); runner.addToConfig("no_equals_stmt"); @@ -626,10 +625,9 @@ TEST_F(PerfProfdTest, ConfigFileParsing) // Verify log contents const std::string expected = RAW_RESULT( - W: line 6: specified value 0 for 'collection_interval' outside permitted range [1 4294967295] (ignored) - W: line 7: malformed unsigned value (ignored) - W: line 8: unknown option 'nonexistent_key' ignored - W: line 9: line malformed (no '=' found) + W: line 6: malformed unsigned value (ignored) + W: line 7: unknown option 'nonexistent_key' ignored + W: line 8: line malformed (no '=' found) ); // check to make sure log excerpt matches |