summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2018-04-11 11:23:10 -0700
committerAndreas Gampe <agampe@google.com>2018-05-03 13:30:23 -0700
commite9e5710d43556989f48525202460971de3da772a (patch)
tree39592d163ebebcbae8b74e842e485352f570972f
parentd183243d0c16df7011226c6246318e65822b4df3 (diff)
downloadextras-e9e5710d43556989f48525202460971de3da772a.tar.gz
Perfprofd: More scripts
Add conversion of JSON files to a SQLite database, and aggregation of data in a SQLite database to a flamegraph. (cherry picked from commit 597f40df90fae8b89ce7672da6ab4825eb234d10) Bug: 73175642 Test: m Merged-In: I70827c7395b6a60a59ed420503434ad4b84a105f Change-Id: I70827c7395b6a60a59ed420503434ad4b84a105f
-rw-r--r--perfprofd/scripts/perf_proto_json2sqlite.py166
-rw-r--r--perfprofd/scripts/perf_proto_stack.py5
-rw-r--r--perfprofd/scripts/perf_proto_stack_sqlite_flame.py234
3 files changed, 404 insertions, 1 deletions
diff --git a/perfprofd/scripts/perf_proto_json2sqlite.py b/perfprofd/scripts/perf_proto_json2sqlite.py
new file mode 100644
index 00000000..5725424d
--- /dev/null
+++ b/perfprofd/scripts/perf_proto_json2sqlite.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import itertools
+import json
+import sqlite3
+
+class SqliteWriter(object):
+ def __init__(self):
+ self.sample_count = 0
+ self.dso_map = {}
+ self.pid_map = {}
+ self.tid_map = {}
+ self.symbol_map = {}
+
+ def open(self, out):
+ self._conn = sqlite3.connect(out)
+ self._c = self._conn.cursor()
+ # Ensure tables exist
+ # The sample replicates pid and tid.
+ try:
+ self._c.execute('''CREATE TABLE pids (id integer PRIMARY KEY AUTOINCREMENT,
+ name text)''')
+ self._c.execute('''CREATE TABLE tids (id integer PRIMARY KEY AUTOINCREMENT,
+ name text)''')
+ self._c.execute('''CREATE TABLE syms (id integer PRIMARY KEY AUTOINCREMENT,
+ name text)''')
+ self._c.execute('''CREATE TABLE dsos (id integer PRIMARY KEY AUTOINCREMENT,
+ name text)''')
+ self._c.execute('''CREATE TABLE samples (id integer PRIMARY KEY AUTOINCREMENT,
+ pid_id int not null,
+ tid_id int not null)
+ ''')
+ self._c.execute('''CREATE TABLE stacks (sample_id int not null,
+ depth int not null,
+ dso_id int not null,
+ sym_id int not null,
+ offset int not null,
+ primary key (sample_id, depth))
+ ''')
+ except sqlite3.OperationalError:
+ pass # ignore
+
+ def close(self):
+ self._conn.commit()
+ self._conn.close()
+
+ def insert_into_tmp_or_get(self, name, table_dict, table_dict_tmp):
+ if name in table_dict:
+ return table_dict[name]
+ if name in table_dict_tmp:
+ return table_dict_tmp[name]
+ index = len(table_dict) + len(table_dict_tmp)
+ table_dict_tmp[name] = index
+ return index
+
+ def prepare(self):
+ self.dso_tmp_map = {}
+ self.pid_tmp_map = {}
+ self.tid_tmp_map = {}
+ self.symbol_tmp_map = {}
+ self.samples_tmp_list = []
+ self.stacks_tmp_list = []
+
+ def write_sqlite_index_table(self, table_dict, table_name):
+ for key, value in table_dict.iteritems():
+ self._c.execute("insert into {tn} values (?,?)".format(tn=table_name), (value,key))
+
+ def flush(self):
+ self.write_sqlite_index_table(self.pid_tmp_map, 'pids')
+ self.write_sqlite_index_table(self.tid_tmp_map, 'tids')
+ self.write_sqlite_index_table(self.dso_tmp_map, 'dsos')
+ self.write_sqlite_index_table(self.symbol_tmp_map, 'syms')
+
+ for sample in self.samples_tmp_list:
+ self._c.execute("insert into samples values (?,?,?)", sample)
+ for stack in self.stacks_tmp_list:
+ self._c.execute("insert into stacks values (?,?,?,?,?)", stack)
+
+ self.pid_map.update(self.pid_tmp_map)
+ self.tid_map.update(self.tid_tmp_map)
+ self.dso_map.update(self.dso_tmp_map)
+ self.symbol_map.update(self.symbol_tmp_map)
+
+ self.dso_tmp_map = {}
+ self.pid_tmp_map = {}
+ self.tid_tmp_map = {}
+ self.symbol_tmp_map = {}
+ self.samples_tmp_list = []
+ self.stacks_tmp_list = []
+
+ def add_sample(self, sample, tid_name_map):
+ sample_id = self.sample_count
+ self.sample_count = self.sample_count + 1
+
+ def get_name(pid, name_map):
+ if pid in name_map:
+ return name_map[pid]
+ pid_str = str(pid)
+ if pid_str in name_map:
+ return name_map[pid_str]
+ if pid == 0:
+ return "[kernel]"
+ return "[unknown]"
+
+ pid_name = get_name(sample[0], tid_name_map)
+ pid_id = self.insert_into_tmp_or_get(pid_name, self.pid_map, self.pid_tmp_map)
+ tid_name = get_name(sample[1], tid_name_map)
+ tid_id = self.insert_into_tmp_or_get(tid_name, self.tid_map, self.tid_tmp_map)
+
+ self.samples_tmp_list.append((sample_id, pid_id, tid_id))
+
+ stack_depth = 0
+ for entry in sample[2]:
+ sym_id = self.insert_into_tmp_or_get(entry[0], self.symbol_map, self.symbol_tmp_map)
+ dso = entry[2]
+ if dso is None:
+ dso = "None"
+ dso_id = self.insert_into_tmp_or_get(dso, self.dso_map, self.dso_tmp_map)
+
+ self.stacks_tmp_list.append((sample_id, stack_depth, dso_id, sym_id, entry[1]))
+
+ stack_depth = stack_depth + 1
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='''Process a set of perfprofd JSON files produced
+ by perf_proto_stack.py into SQLite database''')
+
+ parser.add_argument('file', help='JSON files to parse and combine', metavar='file', nargs='+')
+
+ parser.add_argument('--sqlite-out', help='SQLite database output', type=str,
+ default='sqlite.db')
+
+ args = parser.parse_args()
+ if args is not None:
+ sql_out = SqliteWriter()
+ sql_out.open(args.sqlite_out)
+ sql_out.prepare()
+
+ for f in args.file:
+ print 'Processing %s' % (f)
+ fp = open(f, 'r')
+ data = json.load(fp)
+ fp.close()
+
+ for sample in data['samples']:
+ sql_out.add_sample(sample, data['names'])
+
+ sql_out.flush()
+
+ sql_out.close()
diff --git a/perfprofd/scripts/perf_proto_stack.py b/perfprofd/scripts/perf_proto_stack.py
index 1fdd938b..eb93eb6e 100644
--- a/perfprofd/scripts/perf_proto_stack.py
+++ b/perfprofd/scripts/perf_proto_stack.py
@@ -21,6 +21,7 @@
# mmma system/core/libunwindstack
import argparse
+from datetime import datetime
import itertools
import json
@@ -450,6 +451,7 @@ def run_cmd(x):
logging.debug('%r', cmd)
err_out = open('%s.err' % (f), 'w')
kill = lambda process: process.kill()
+ start = datetime.now()
p = subprocess.Popen(cmd, stderr=err_out)
kill_timer = Timer(3600, kill, [p])
try:
@@ -458,8 +460,9 @@ def run_cmd(x):
success = True
finally:
kill_timer.cancel()
- logging.warn('Ended %s', f)
err_out.close()
+ end = datetime.now()
+ logging.warn('Ended %s (%s)', f, str(end-start))
return '%s: %r' % (f, success)
def parallel_runner(args):
diff --git a/perfprofd/scripts/perf_proto_stack_sqlite_flame.py b/perfprofd/scripts/perf_proto_stack_sqlite_flame.py
new file mode 100644
index 00000000..756062f1
--- /dev/null
+++ b/perfprofd/scripts/perf_proto_stack_sqlite_flame.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make sure that simpleperf's inferno is on the PYTHONPATH, e.g., run as
+# PYTHONPATH=$PYTHONPATH:$ANDROID_BUILD_TOP/system/extras/simpleperf/scripts/inferno python ..
+
+import argparse
+import itertools
+import sqlite3
+
+class Callsite(object):
+ def __init__(self, dso_id, sym_id):
+ self.dso_id = dso_id
+ self.sym_id = sym_id
+ self.count = 0
+ self.child_map = {}
+ self.id = self._get_next_callsite_id()
+
+ def add(self, dso_id, sym_id):
+ if (dso_id, sym_id) in self.child_map:
+ return self.child_map[(dso_id, sym_id)]
+ new_callsite = Callsite(dso_id, sym_id)
+ self.child_map[(dso_id, sym_id)] = new_callsite
+ return new_callsite
+
+ def child_count_to_self(self):
+ self.count = reduce(lambda x, y: x + y[1].count, self.child_map.iteritems(), 0)
+
+ def trim(self, local_threshold_in_percent, global_threshold):
+ local_threshold = local_threshold_in_percent * 0.01 * self.count
+ threshold = max(local_threshold, global_threshold)
+ for k, v in self.child_map.items():
+ if v.count < threshold:
+ del self.child_map[k]
+ for _, v in self.child_map.iteritems():
+ v.trim(local_threshold_in_percent, global_threshold)
+
+ def _get_str(self, id, m):
+ if id in m:
+ return m[id]
+ return str(id)
+
+ def print_callsite_ascii(self, depth, indent, dsos, syms):
+
+ print ' ' * indent + "%s (%s) [%d]" % (self._get_str(self.sym_id, syms),
+ self._get_str(self.dso_id, dsos),
+ self.count)
+ if depth == 0:
+ return
+ for v in sorted(self.child_map.itervalues, key=lambda x: x.count, reverse=True):
+ v.print_callsite_ascii(depth - 1, indent + 1, dsos, syms)
+
+ # Functions for flamegraph compatibility.
+
+ callsite_counter = 0
+ @classmethod
+ def _get_next_callsite_id(cls):
+ cls.callsite_counter += 1
+ return cls.callsite_counter
+
+ def create_children_list(self):
+ self.children = sorted(self.child_map.itervalues(), key=lambda x: x.count, reverse=True)
+
+ def generate_offset(self, start_offset):
+ self.offset = start_offset
+ child_offset = start_offset
+ for child in self.children:
+ child_offset = child.generate_offset(child_offset)
+ return self.offset + self.count
+
+ def svgrenderer_compat(self, dsos, syms):
+ self.create_children_list()
+ self.method = self._get_str(self.sym_id, syms)
+ self.dso = self._get_str(self.dso_id, dsos)
+ self.offset = 0
+ for c in self.children:
+ c.svgrenderer_compat(dsos, syms)
+
+ def weight(self):
+ return float(self.count)
+
+ def get_max_depth(self):
+ if self.child_map:
+ return max([c.get_max_depth() for c in self.child_map.itervalues()]) + 1
+ return 1
+
+class SqliteReader(object):
+ def __init__(self):
+ self.root = Callsite("root", "root")
+ self.dsos = {}
+ self.syms = {}
+
+ def open(self, f):
+ self._conn = sqlite3.connect(f)
+ self._c = self._conn.cursor()
+
+ def close(self):
+ self._conn.close()
+
+ def read(self, local_threshold_in_percent, global_threshold_in_percent, limit):
+ # Read aux tables first, as we need to find the kernel symbols.
+ def read_table(name, dest_table):
+ self._c.execute('select id, name from %s' % (name))
+ while True:
+ rows = self._c.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ dest_table[row[0]] = row[1]
+
+ print 'Reading DSOs'
+ read_table('dsos', self.dsos)
+
+ print 'Reading symbol strings'
+ read_table('syms', self.syms)
+
+ kernel_sym_id = None
+ for i, v in self.syms.iteritems():
+ if v == '[kernel]':
+ kernel_sym_id = i
+ break
+
+ print 'Reading samples'
+ self._c.execute('''select sample_id, depth, dso_id, sym_id from stacks
+ order by sample_id asc, depth desc''')
+
+ last_sample_id = None
+ chain = None
+ count = 0
+ while True:
+ rows = self._c.fetchmany(100)
+
+ if not rows:
+ break
+ for row in rows:
+ if row[3] == kernel_sym_id and row[1] == 0:
+ # Skip kernel.
+ continue
+ if row[0] != last_sample_id:
+ last_sample_id = row[0]
+ chain = self.root
+ chain = chain.add(row[2], row[3])
+ chain.count = chain.count + 1
+
+ count = count + len(rows)
+ if limit is not None and count >= limit:
+ print 'Breaking as limit is reached'
+ break
+
+ self.root.child_count_to_self()
+ global_threshold = global_threshold_in_percent * 0.01 * self.root.count
+ self.root.trim(local_threshold_in_percent, global_threshold)
+
+ def print_data_ascii(self, depth):
+ self.root.print_callsite_ascii(depth, 0, self.dsos, self.syms)
+
+ def print_svg(self, filename, depth):
+ from svg_renderer import renderSVG
+ self.root.svgrenderer_compat(self.dsos, self.syms)
+ self.root.generate_offset(0)
+ f = open(filename, 'w')
+ f.write('''
+<html>
+<body>
+<div id='flamegraph_id' style='font-family: Monospace;'>
+<style type="text/css"> .s { stroke:black; stroke-width:0.5; cursor:pointer;} </style>
+<style type="text/css"> .t:hover { cursor:pointer; } </style>
+''')
+
+ class FakeProcess:
+ def __init__(self):
+ self.props = { 'trace_offcpu': False }
+ fake_process = FakeProcess()
+ renderSVG(fake_process, self.root, f, 'hot')
+
+ f.write('''
+</div>
+''')
+
+ # Emit script.js, if we can find it.
+ import os.path
+ import sys
+ script_js_rel = "../../simpleperf/scripts/inferno/script.js"
+ script_js = os.path.join(os.path.dirname(__file__), script_js_rel)
+ if os.path.exists(script_js):
+ f.write('<script>\n')
+ with open(script_js, 'r') as script_f:
+ f.write(script_f.read())
+ f.write('''
+</script>
+<br/><br/>
+<div>Navigate with WASD, zoom in with SPACE, zoom out with BACKSPACE.</div>
+<script>document.addEventListener('DOMContentLoaded', flamegraphInit);</script>
+</body>
+</html>
+''')
+ f.close()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='''Translate a perfprofd database into a flame
+ representation''')
+
+ parser.add_argument('file', help='the sqlite database to use', metavar='file', type=str)
+
+ parser.add_argument('--html-out', help='output file for HTML flame graph', type=str)
+ parser.add_argument('--threshold', help='child threshold in percent', type=float, default=5)
+ parser.add_argument('--global-threshold', help='global threshold in percent', type=float,
+ default=.1)
+ parser.add_argument('--depth', help='depth to print to', type=int, default=10)
+ parser.add_argument('--limit', help='limit to given number of stack trace entries', type=int)
+
+ args = parser.parse_args()
+ if args is not None:
+ sql_out = SqliteReader()
+ sql_out.open(args.file)
+ sql_out.read(args.threshold, args.global_threshold, args.limit)
+ if args.html_out is None:
+ sql_out.print_data_ascii(args.depth)
+ else:
+ sql_out.print_svg(args.html_out, args.depth)
+ sql_out.close()