summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2018-04-01 07:24:42 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2018-04-01 07:24:42 +0000
commitd262359acf951644b13b33af3aa1f7062613699a (patch)
treee85d1375977b5ae8d1b12aa7dd44af7c1870f2b4
parent72de33d4de33177a52578de49dd8a232c12716a7 (diff)
parent8c2705f568fffc69a6b22707d49771dac627f75c (diff)
downloadextras-d262359acf951644b13b33af3aa1f7062613699a.tar.gz
Snap for 4693621 from 8c2705f568fffc69a6b22707d49771dac627f75c to pi-release
Change-Id: Ibcf3790c83f8c4330912a71e98d0ef679ef9ae74
-rwxr-xr-xboottime_tools/io_analysis/check_file_read.py441
-rw-r--r--boottime_tools/io_analysis/check_io_trace.py193
-rw-r--r--boottime_tools/io_analysis/check_io_trace_all.py386
-rw-r--r--boottime_tools/io_analysis/check_verity.py139
-rw-r--r--ext4_utils/ext4_crypt.cpp25
-rw-r--r--perfprofd/Android.bp45
-rw-r--r--perfprofd/binder_interface/Android.bp3
-rw-r--r--perfprofd/binder_interface/perfprofd_binder.cc83
-rw-r--r--perfprofd/binder_interface/perfprofd_config.proto3
-rw-r--r--perfprofd/config.h3
-rw-r--r--perfprofd/configreader.cc4
-rw-r--r--perfprofd/dropbox/Android.bp55
-rw-r--r--perfprofd/dropbox/dropbox.cc129
-rw-r--r--perfprofd/dropbox/dropbox.h37
-rw-r--r--perfprofd/dropbox/dropbox_host.cc35
-rw-r--r--perfprofd/perfprofd_cmdline.cc255
-rw-r--r--perfprofd/perfprofd_cmdline.h39
-rw-r--r--perfprofd/perfprofdcore.cc244
-rw-r--r--perfprofd/perfprofdcore.h21
-rw-r--r--perfprofd/perfprofdmain.cc1
-rw-r--r--perfprofd/tests/Android.bp5
-rw-r--r--perfprofd/tests/perfprofd_test.cc1
-rw-r--r--simpleperf/Android.mk2
-rw-r--r--verity/Android.mk14
-rw-r--r--verity/BootSignature.java21
-rw-r--r--verity/verify_boot_signature.c463
26 files changed, 1836 insertions, 811 deletions
diff --git a/boottime_tools/io_analysis/check_file_read.py b/boottime_tools/io_analysis/check_file_read.py
new file mode 100755
index 00000000..5f629f3a
--- /dev/null
+++ b/boottime_tools/io_analysis/check_file_read.py
@@ -0,0 +1,441 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Analyze ext4 trace with custom open trace"""
+import collections
+import math
+import os
+import re
+import string
+import sys
+
+DBG = False
+DBG_ISSUE = False
+
+# hard coded maps to detect partition for given device or the other way around
+# this can be different per each device. This works for marlin.
+DEVICE_TO_PARTITION = { "253,0": "/system/", "253,1": "/vendor/", "259,19": "/data/" }
+PARTITION_TO_DEVICE = {}
+for key, value in DEVICE_TO_PARTITION.iteritems():
+ PARTITION_TO_DEVICE[value] = key
+
+# init-1 [003] .... 2.703964: do_sys_open: init: open("/sys/fs/selinux/null", 131074, 0) fd = 0, inode = 22
+RE_DO_SYS_OPEN = r"""\s+\S+-([0-9]+).*\s+([0-9]+\.[0-9]+):\s+do_sys_open:\s+(\S+):\sopen..(\S+).,\s([0-9]+).\s+.+inode\s=\s([0-9]+)"""
+# init-1 [003] ...1 2.703991: ext4_ext_map_blocks_enter: dev 253,0 ino 2719 lblk 154 len 30 flags
+RE_EXT4_MA_BLOCKS_ENTER = r"""\s+(\S+)-([0-9]+).+\s+([0-9]+\.[0-9]+):\s+ext4_ext_map_blocks_enter:\s+dev\s+(\S+)\s+ino\s+([0-9]+)\s+lblk\s+([0-9]+)\s+len\s+([0-9]+)"""
+# init-1 [002] ...1 2.687205: ext4_ext_map_blocks_exit: dev 253,0 ino 8 flags lblk 0 pblk 196608 len 1 mflags M ret 1
+RE_EXT4_MA_BLOCKS_EXIT = r"""\s+(\S+)-([0-9]+).+\s+([0-9]+\.[0-9]+):\s+ext4_ext_map_blocks_exit:\s+dev\s+(\S+)\s+ino\s+([0-9]+)\sflags.*\slblk\s+([0-9]+)\spblk\s([0-9]+)\slen\s+([0-9]+).*mflags\s(\S*)\sret\s([0-9]+)"""
+# init-1 [002] ...1 2.887119: block_bio_remap: 8,0 R 10010384 + 8 <- (259,18) 3998944
+RE_BLOCK_BIO_REMAP = r""".+block_bio_remap:\s\d+,\d+\s\S+\s(\d+)\s\+\s\d+\s<-\s\([^\)]+\)\s(\d+)"""
+# kworker/u9:1-83 [003] d..2 2.682991: block_rq_issue: 8,0 RA 0 () 10140208 + 32 [kworker/u9:1]
+RE_BLOCK_RQ_ISSUE = r"""\s+\S+-([0-9]+).*\s+([0-9]+\.[0-9]+):\s+block_rq_issue:\s+([0-9]+)\,([0-9]+)\s+([RW]\S*)\s[0-9]+\s\([^\)]*\)\s([0-9]+)\s+\+\s+([0-9]+)\s+\[([^\]]+)\]"""
+
+EXT4_SIZE_TO_BLOCK_SIZE = 8 # ext4: 4KB, block device block size: 512B
+
+class FileAccess:
+ def __init__(self, file):
+ self.file = file
+ self.accesses = []
+ self.total_access = 0
+ self.ext4_access_size_histogram = {} #key: read size, value: occurrence
+ self.block_access_size_histogram = {}
+ self.ext4_single_block_accesses = {} # process name, occurrence
+ self.block_single_block_accesses = {} # process name, occurrence
+ self.blocks_histogram = {} # K: offset, V: read counter
+
+ def add_if_single_block(self, container, size, offset, process_name):
+ if size != 1:
+ return
+ offsets = container.get(process_name)
+ if not offsets:
+ offsets = []
+ container[process_name] = offsets
+ offsets.append(offset)
+
+ def add_access(self, time, offset, size, process_name, read_sizes):
+ self.accesses.append((time, offset, size, process_name))
+ self.total_access += size
+ self.ext4_access_size_histogram[size] = self.ext4_access_size_histogram.get(size, 0) + 1
+ read_offset = offset
+ for s in read_sizes:
+ self.block_access_size_histogram[s] = self.block_access_size_histogram.get(s, 0) + 1
+ self.add_if_single_block(self.block_single_block_accesses, s, read_offset, process_name)
+ read_offset += s
+ for i in range(size):
+ self.blocks_histogram[offset + i] = self.blocks_histogram.get(offset + i, 0) + 1
+ self.add_if_single_block(self.ext4_single_block_accesses, size, offset, process_name)
+
+ def add_merged_access(self, time, offsets, lens, process_names):
+ total_read_offsets = set() # each read can overwrap. So count only once for block level counting
+ for i in range(len(offsets)):
+ self.accesses.append((time, offsets[i], lens[i], process_names[i]))
+ self.ext4_access_size_histogram[lens[i]] = self.ext4_access_size_histogram.get(lens[i], 0) + 1
+ self.add_if_single_block(self.ext4_single_block_accesses, lens[i], offsets[i], process_names[i])
+ for j in range(len(lens)):
+ total_read_offsets.add(offsets[i] + j)
+ total_lens = len(total_read_offsets)
+ start_offset = min(total_read_offsets)
+ self.total_access += total_lens
+ self.block_access_size_histogram[total_lens] = self.block_access_size_histogram.get(total_lens, 0) \
+ + 1
+ self.add_if_single_block(self.block_single_block_accesses, total_lens, start_offset, \
+ process_names[0])
+ for s in range(total_lens):
+ self.blocks_histogram[start_offset + s] = self.blocks_histogram.get(start_offset + s, 0) + 1
+
+
+ def dump(self):
+ if len(self.ext4_access_size_histogram) > 1:
+ print " Ext4 access size histograms:", collections.OrderedDict( \
+ sorted(self.ext4_access_size_histogram.items(), key = lambda item: item[0]))
+ if len(self.ext4_single_block_accesses) > 0 and self.total_access > 1:
+ print " Ext4 single block accesses:", collections.OrderedDict( \
+ sorted(self.ext4_single_block_accesses.items(), key = lambda item: item[1], reverse = True))
+ if len(self.block_access_size_histogram) > 1:
+ print " Block access size histograms:", collections.OrderedDict( \
+ sorted(self.block_access_size_histogram.items(), key = lambda item: item[0]))
+ if len(self.block_single_block_accesses) > 0 and self.total_access > 1:
+ print " Block single block accesses:", collections.OrderedDict( \
+ sorted(self.block_single_block_accesses.items(), key = lambda item: item[1], reverse = True))
+ if self.total_access > 1:
+ sorted_blocks_histogram = sorted(self.blocks_histogram.items(), key = lambda item: item[1], \
+ reverse = True)
+ prints = []
+ repeating_reads_counter = 0
+ for entry in sorted_blocks_histogram:
+ offset = entry[0]
+ counter = entry[1]
+ if counter == 1:
+ break
+ prints.append(str(offset) + ":" + str(counter))
+ repeating_reads_counter += (counter - 1)
+ if len(prints) > 0:
+ print " repeating accesses", repeating_reads_counter, " offset:count ->", ','.join(prints)
+
+class FileEvent:
+ def __init__(self, open_time, file_name, process_name, inode, flags):
+ self.file_name = file_name
+ self.inode = inode
+ self.total_open = 1
+ self.processes = []
+ self.processes.append((open_time, process_name, flags))
+ self.read = FileAccess(self)
+ self.write = FileAccess(self)
+
+
+ def add_open(self, open_time, process_name, flags):
+ self.processes.append((open_time, process_name, flags))
+ self.total_open += 1
+
+ def add_access(self, is_read, time, offset, size, process_name, read_sizes):
+ if is_read:
+ self.read.add_access(time, offset, size, process_name, read_sizes)
+ else:
+ self.write.add_access(time, offset, size, process_name, read_sizes)
+
+ def add_merged_access(self, is_read, time, offsets, lens, process_names):
+ if is_read:
+ self.read.add_merged_access(time, offsets, lens, process_names)
+ else:
+ self.write.add_merged_access(time, offsets, lens, process_names)
+
+ def dump(self, name_to_pid_map):
+ print " ***filename %s, total reads %d, total writes %d, total open %d inode %s" \
+ % (self.file_name, self.read.total_access, self.write.total_access, self.total_open,\
+ self.inode)
+ process_names = []
+ for opener in self.processes:
+ process_names.append(opener[1] + "-" + name_to_pid_map.get(opener[1], '?') + " t:" + \
+ str(opener[0]) + " flags:" + opener[2])
+ print " Processes opened this file:", ','.join(process_names)
+ if self.read.total_access > 0:
+ print " ****Reads:"
+ self.read.dump()
+ if self.write.total_access > 0:
+ print " ****Writes:"
+ self.write.dump()
+
+ def dump_short(self):
+ print " filename %s, total reads %d, total writes %d" % (self.file_name,
+ self.read.total_access, self.write.total_access)
+
+class PendingAccess:
+ def __init__(self, process_name, pid, time, dev, inode, lblk, pblk, len, fevent):
+ self.process_name = process_name
+ self.pid = pid
+ self.time = time
+ self.dev = dev
+ self.inode = inode
+ self.lblk = lblk
+ self.pblk = pblk
+ self.blk_len = len * EXT4_SIZE_TO_BLOCK_SIZE
+ self.len = len
+ self.fevent = fevent
+ self.pending_accesses = set()
+ for i in range(len):
+ self.pending_accesses.add(i)
+ self.access_sizes = [] # valid read for this file in block dev level.
+ self.block_access_counter = 0
+
+ def get_valid_access(self, block_offset, block_len):
+ ext4_offset = block_offset / EXT4_SIZE_TO_BLOCK_SIZE
+ if ext4_offset > self.len:
+ return 0, 0
+ ext4_len = block_len / EXT4_SIZE_TO_BLOCK_SIZE
+ if (ext4_offset + ext4_len) > self.len:
+ ext4_len = self.len - ext4_offset
+ return ext4_offset, ext4_len
+
+ def queue_block_access(self, ext4_offset, ext4_len):
+ if ext4_len <= 0:
+ return
+ self.block_access_counter += 1
+ ext4_blocks_accessed = 0
+ for i in range(ext4_len):
+ ext4_block_i = i + ext4_offset
+ if ext4_block_i in self.pending_accesses:
+ ext4_blocks_accessed += 1
+ self.pending_accesses.remove(ext4_block_i)
+ if ext4_blocks_accessed > 0:
+ self.access_sizes.append(ext4_blocks_accessed)
+
+ def handle_req_complete(self, time, is_read):
+ self.fevent.add_access(is_read, self.time, self.lblk, self.len, self.process_name,\
+ self.access_sizes)
+
+ def handle_merged_req(self, time, offsets, lens, names, is_read):
+ self.fevent.add_merged_access(is_read, time, offsets, lens, names)
+
+ def is_req_complete(self):
+ return len(self.pending_accesses) == 0
+
+ def is_req_started(self):
+ return self.len is not len(self.pending_accesses)
+
+class Trace:
+ def __init__(self):
+ self.files_per_device = {} # key: device, value: { key: inode, value; FileEvent }
+ self.re_open = re.compile(RE_DO_SYS_OPEN)
+ self.re_ext4_access = re.compile(RE_EXT4_MA_BLOCKS_EXIT)
+ self.re_bio_remap = re.compile(RE_BLOCK_BIO_REMAP)
+ self.re_block_issue = re.compile(RE_BLOCK_RQ_ISSUE)
+ # req from ext4 that has not gone down to block level yet, K:block address,
+ # V: list of PendingRead
+ self.pending_accesses = {}
+ self.remap = {}
+ self.process_names = {} # K: PID, V : name
+
+ def handle_line(self, line):
+ match = self.re_open.match(line)
+ if match:
+ self.handle_open(match)
+ return
+ match = self.re_ext4_access.match(line)
+ if match:
+ self.handle_ext4_block_exit(match)
+ return
+ match = self.re_bio_remap.match(line)
+ if match:
+ self.handle_bio_remap(match)
+ return
+ match = self.re_block_issue.match(line)
+ if match:
+ self.handle_block_issue(match)
+ return
+
+ def handle_open(self, match):
+ pid = int(match.group(1))
+ time = match.group(2)
+ process_name = match.group(3)
+ file_name = match.group(4)
+ flag = match.group(5)
+ inode = int(match.group(6))
+ dev_name = None
+ self.process_names[pid] = process_name
+ #print "open", pid, process_name, file_name, inode
+ for p in PARTITION_TO_DEVICE:
+ if file_name.startswith(p):
+ dev_name = PARTITION_TO_DEVICE[p]
+ if not dev_name:
+ if DBG:
+ print "Ignore open for file", file_name
+ return
+ files = self.files_per_device[dev_name]
+ fevent = files.get(inode)
+ if not fevent:
+ fevent = FileEvent(time, file_name, process_name, inode, flag)
+ files[inode] = fevent
+ else:
+ fevent.add_open(time, process_name, flag)
+
+ def handle_ext4_block_exit(self, match):
+ process_name = match.group(1)
+ pid = int(match.group(2))
+ time = float(match.group(3))
+ dev = match.group(4)
+ inode = int(match.group(5))
+ lblk = int(match.group(6))
+ pblk = int(match.group(7)) * EXT4_SIZE_TO_BLOCK_SIZE # address in ext4 blocks, ...
+ l = int(match.group(8))
+ mflags = match.group(9)
+ ret = int(match.group(10))
+ if ret <= 0: # no block access
+ return
+ process_name = self.process_names.get(pid, process_name)
+ if process_name == '<...>':
+ process_name = "pid-" + str(pid)
+ if DBG_ISSUE:
+ print "ext4", pblk, l, inode, process_name
+ files = self.files_per_device.get(dev)
+ if not files:
+ if DEVICE_TO_PARTITION.get(dev):
+ files = {}
+ self.files_per_device[dev] = files
+ else:
+ if DBG:
+ print "access ignored for device", dev
+ return
+ fevent = files.get(inode)
+ if not fevent:
+ if DBG:
+ print 'no open for device %s with inode %s' % (dev, inode)
+ fevent = FileEvent(time, "unknown", process_name, inode, "-")
+ files[inode] = fevent
+ pending_access = PendingAccess(process_name, pid, time, dev, inode, lblk, pblk, l,\
+ fevent)
+ access_list = self.pending_accesses.get(pblk, [])
+ access_list.append(pending_access)
+ self.pending_accesses[pblk] = access_list
+
+ def handle_bio_remap(self, match):
+ new_addr = int(match.group(1))
+ old_addr = int(match.group(2))
+ self.remap[new_addr] = old_addr
+ if DBG_ISSUE:
+ print "remap", new_addr, old_addr
+
+ def handle_block_issue(self, match):
+ pid = int(match.group(1))
+ time = float(match.group(2))
+ dev_major = match.group(3)
+ dev_minor = match.group(4)
+ access = match.group(5)
+ new_address = int(match.group(6))
+ l = int(match.group(7))
+ name = match.group(8)
+ name = self.process_names.get(pid, name)
+ if name == '<...>':
+ name = "pid-" + str(pid)
+ is_read = not 'W' in access
+ accesses_per_inodes = {} # K:inodes, V: list of two entries, 1st: offsets, 2nd: length
+ addrs_to_remove = []
+ completed_reqs = []
+ address = self.remap.get(new_address, new_address)
+ if DBG_ISSUE:
+ print "issue", address, l, is_read, access
+ for access_addr, access_list in self.pending_accesses.iteritems():
+ if (address >= access_addr) and (address + l) > access_addr:
+ reqs_to_remove = []
+ for pending in access_list:
+ offset, valid_access_size = pending.get_valid_access(address - access_addr, l)
+ if valid_access_size > 0:
+ if pending.is_req_started(): # spread across multiple reads. complete alone
+ pending.queue_block_access(offset, valid_access_size)
+ if pending.is_req_complete():
+ pending.handle_req_complete(time, is_read)
+ reqs_to_remove.append(pending)
+ else: # possible multiple reads completed in this read. complete them together
+ pending.queue_block_access(offset, valid_access_size)
+ if pending.is_req_complete():
+ reads = accesses_per_inodes.get(pending.inode, [[], [], []])
+ reads[0].append(offset + pending.lblk)
+ reads[1].append(valid_access_size)
+ reads[2].append(pending.process_name)
+ accesses_per_inodes[pending.inode] = reads
+ completed_reqs.append(pending)
+ reqs_to_remove.append(pending)
+ for to_remove in reqs_to_remove:
+ access_list.remove(to_remove)
+ if len(access_list) == 0:
+ addrs_to_remove.append(access_addr)
+ for addr in addrs_to_remove:
+ del self.pending_accesses[addr]
+ for pending in completed_reqs: # these will be reported as batch
+ accesses = accesses_per_inodes.get(pending.inode)
+ if not accesses: # merged one already dispatched
+ continue
+ if len(accesses[0]) == 1:
+ pending.handle_req_complete(time, is_read)
+ else: #merged
+ pending.handle_merged_req(time, accesses[0], accesses[1], accesses[2], is_read)
+ del accesses_per_inodes[pending.inode]
+
+ def dump_partition(self, partition_name, files):
+ name_to_pid_map = {}
+ for pid, name in self.process_names.iteritems():
+ name_to_pid_map[name] = str(pid)
+ print "**Dump partition:", partition_name, "total number of files:", len(files)
+ total_reads = 0
+ total_writes = 0
+ files_sorted_by_read = files.values()
+ files_sorted_by_read.sort(key=lambda f : f.read.total_access, reverse = True)
+ files_sorted_by_write = files.values()
+ files_sorted_by_write.sort(key=lambda f : f.write.total_access, reverse = True)
+ print " Top 10 readers:"
+ for i in range(min(10, len(files_sorted_by_read))):
+ files_sorted_by_read[i].dump_short()
+ print " Top 10 writers:"
+ for i in range(min(10, len(files_sorted_by_write))):
+ files_sorted_by_write[i].dump_short()
+ for f in files_sorted_by_read:
+ f.dump(name_to_pid_map)
+ total_reads += f.read.total_access
+ total_writes += f.write.total_access
+ print " Total reads:", total_reads, " total writes:", total_writes
+ return total_reads, total_writes, len(files)
+
+
+ def dump(self):
+ print "*Dump R/W per each partition"
+ total_reads = 0
+ total_writes = 0
+ summaries = []
+ for d in self.files_per_device:
+ reads, writes, num_files = self.dump_partition(DEVICE_TO_PARTITION[d], \
+ self.files_per_device[d])
+ total_reads += reads
+ total_writes += writes
+ summaries.append((DEVICE_TO_PARTITION[d], reads, writes, num_files))
+ print "*Summary*"
+ print "Total blocks read", total_reads
+ print "Total blocks wrote", total_writes
+ print "Partition total_reads total_writes num_files"
+ for s in summaries:
+ print s[0], s[1], s[2], s[3]
+
+def main(argv):
+ if (len(argv) < 2):
+ print "check_file_read.py filename"
+ return
+ filename = argv[1]
+ trace = Trace()
+ with open(filename) as f:
+ for l in f:
+ trace.handle_line(l)
+ trace.dump()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/boottime_tools/io_analysis/check_io_trace.py b/boottime_tools/io_analysis/check_io_trace.py
new file mode 100644
index 00000000..bc26c467
--- /dev/null
+++ b/boottime_tools/io_analysis/check_io_trace.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Analyze block trace"""
+
+import collections
+import os
+import re
+import string
+import sys
+
+RE_BLOCK = r'.+\s+(block[a-z_]+):\s+'
+RE_BLOCK_BIO_QUEUE = r'.+\s+([0-9]+\.[0-9]+):\s+block_bio_queue:\s+([0-9]+)\,([0-9]+)\s+([RW]\S*)\s+([0-9]+)\s+\+\s+([0-9]+)\s+\[([^\]]+)'
+
+# dev_num = major * MULTIPLIER + minor
+DEV_MAJOR_MULTIPLIER = 1000
+
+# dm access is remapped to disk access. So account differently
+DM_MAJOR = 253
+
+class RwEvent:
+ def __init__(self, block_num, start_time, size):
+ self.block_num = block_num
+ self.start_time = start_time
+ self.size = size
+ self.latency = 0
+ def set_latency(self, latency):
+ self.latency = latency
+
+def get_string_pos(strings, string_to_find):
+ for i, s in enumerate(strings):
+ if s == string_to_find:
+ return i
+ return -1
+
+
+class Trace:
+ def __init__(self, process):
+ self.process = process
+ self.reads = [] #(start time, RwEvent)
+ self.writes = [] #(start time, RwEvent)
+ self.recent_reads = {} # K:
+ self.total_latency = 0
+ self.total_reads = 0
+ self.total_writes = 0
+ self.total_dm_reads = {} #K: devnum, V: blocks
+ self.total_dm_writes = {}
+ self.re_block_queue = re.compile(RE_BLOCK_BIO_QUEUE)
+ self.processes = set()
+ if process[-1] == '*':
+ print "Process name starts with", process[:-1]
+ self.process_name_is_prefix = True
+ else:
+ print "Process name", process
+ self.process_name_is_prefix = False
+
+ def parse_bio_queue(self, l):
+ match = self.re_block_queue.match(l)
+ if not match:
+ return
+ start_time = int(float(match.group(1))*1000000) #us
+ major = int(match.group(2))
+ minor = int(match.group(3))
+ operation = match.group(4)
+ block_num = int(match.group(5))
+ size = int(match.group(6))
+ process = match.group(7)
+ if self.process_name_is_prefix:
+ if not process.startswith(self.process[:-1]):
+ return
+ self.processes.add(process)
+ else:
+ if process != self.process:
+ return
+ if major == DM_MAJOR:
+ devNum = major * DEV_MAJOR_MULTIPLIER + minor;
+ if operation[0] == 'R':
+ if devNum not in self.total_dm_reads:
+ self.total_dm_reads[devNum] = 0
+ self.total_dm_reads[devNum] += size
+ elif operation[0] == 'W':
+ if devNum not in self.total_dm_writes:
+ self.total_dm_writes[devNum] = 0
+ self.total_dm_writes[devNum] += size
+ return
+ event = RwEvent(block_num, start_time, size)
+ if operation[0] == 'R':
+ self.reads.append((start_time, event))
+ self.recent_reads[block_num] = event
+ self.total_reads += size
+ elif operation[0] == 'W':
+ self.writes.append((start_time, event))
+ self.total_writes += size
+
+ def parse_rq_complete(self, l):
+ words = string.split(l)
+ cmd_pos = get_string_pos(words, "block_rq_complete:")
+ if cmd_pos == -1:
+ cmd_pos = get_string_pos(words, "block_bio_complete:")
+ block_num = int(words[-4])
+ event = self.recent_reads.get(block_num)
+ if not event:
+ return
+ operation = words[cmd_pos + 2]
+ if not operation.startswith("R"):
+ return
+ end_time = int(float(words[cmd_pos - 1][:-1])*1000000) #us
+ latency = end_time - event.start_time
+ if latency > 20000:
+ print "very bad latency:", latency, l
+ print "start time,", event.start_time
+ event.set_latency(latency)
+ del self.recent_reads[block_num]
+ self.total_latency += latency
+
+ def parse_block_trace(self, l, match):
+ try:
+ cmd = match.group(1)
+ if cmd == "block_bio_queue":
+ self.parse_bio_queue(l)
+ elif cmd == "block_rq_complete" or cmd == "block_bio_complete":
+ self.parse_rq_complete(l)
+ except ValueError:
+ print "cannot parse:", l
+ raise
+
+ def dump(self):
+ if self.process_name_is_prefix:
+ print "Processes:", self.processes
+ print "total read blocks,", self.total_reads
+ print "total write blocks,", self.total_writes
+ if len(self.reads) > 0:
+ total_read_time = self.reads[-1][0] + self.reads[-1][1].latency - self.reads[0][0]
+ else:
+ total_read_time = 0
+ print "Total DM R"
+ for dev,size in self.total_dm_reads.items():
+ print dev, size
+ print "Total DM W"
+ for dev,size in self.total_dm_writes.items():
+ print dev, size
+ print "total read time,",total_read_time
+ read_size_histogram = {}
+ latency_per_read_size = {}
+ for (time, event) in self.reads:
+ if not read_size_histogram.get(event.size):
+ read_size_histogram[event.size] = 0
+ if not latency_per_read_size.get(event.size):
+ latency_per_read_size[event.size] = [ 0, 0] # num events, total latency
+ read_size_histogram[event.size] = read_size_histogram[event.size] + 1
+ latency_sum = latency_per_read_size[event.size]
+ latency_sum[0] += 1
+ latency_sum[1] += event.latency
+ read_size_histogram = collections.OrderedDict(sorted(read_size_histogram.items()))
+ print "read histogram"
+ for k,v in read_size_histogram.iteritems():
+ print k, ',', v
+ print "latency per read size"
+ latency_per_read_size = collections.OrderedDict(sorted(latency_per_read_size.items()))
+ for k,v in latency_per_read_size.iteritems():
+ if v[0] != 0:
+ print k, ',', v[1] / v[0], v[0], v[1]
+
+def main(argv):
+ if (len(argv) < 3):
+ print "check_io_trace.py processname filename"
+ return
+ keyword = argv[1]
+ filename = argv[2]
+ trace = Trace(keyword)
+ prog = re.compile(RE_BLOCK)
+ with open(filename) as f:
+ for l in f:
+ result = prog.match(l)
+ if result:
+ trace.parse_block_trace(l, result)
+ trace.dump()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/boottime_tools/io_analysis/check_io_trace_all.py b/boottime_tools/io_analysis/check_io_trace_all.py
new file mode 100644
index 00000000..8ea466d7
--- /dev/null
+++ b/boottime_tools/io_analysis/check_io_trace_all.py
@@ -0,0 +1,386 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Analyze block trace"""
+
+import collections
+import os
+import re
+import string
+import sys
+
+# ex) <...>-52 [001] ...1 1.362574: block_bio_queue: 8,16 R 0 + 8 [kworker/u8:1]
+RE_BLOCK = r'.+-([0-9]+).*\s+([0-9]+\.[0-9]+):\s+block_bio_queue:\s+([0-9]+)\,([0-9]+)\s(\S+)\s+([0-9]+)\s+\+\s+([0-9]+)\s+\[([^\]]+)'
+# ex) <...>-453 [001] d..4 3.181854: sched_blocked_reason: pid=471 iowait=1 caller=__wait_on_buffer+0x24/0x2c
+RE_SCHED_BLOCKED_READSON = r'.+-([0-9]+)\s+\[([0-9]+)\]\s.*\s+([0-9]+\.[0-9]+):\s+sched_blocked_reason:\spid=([0-9]+)\siowait=([01])\scaller=(\S+)'
+# ex) <idle>-0 [000] d..3 3.181864: sched_switch: prev_comm=swapper/0 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=ueventd next_pid=471 next_prio=120
+RE_SCHED_SWITCH = r'.+-([0-9]+)\s+\[([0-9]+)\]\s.*\s+([0-9]+\.[0-9]+):\s+sched_switch:\sprev_comm=(.+)\sprev_pid=([0-9]+)\sprev_prio=([0-9]+)\sprev_state=(\S+).*next_comm=(.+)\snext_pid=([0-9]+)\snext_prio=([0-9]+)'
+
+# dev_num = major * MULTIPLIER + minor
+DEV_MAJOR_MULTIPLIER = 1000
+
+# dm access is remapped to disk access. So account differently
+DM_MAJOR = 253
+
+MAX_PROCESS_DUMP = 10
+
+class RwEvent:
+ def __init__(self, block_num, start_time, size):
+ self.block_num = block_num
+ self.start_time = start_time
+ self.size = size
+
+def get_string_pos(strings, string_to_find):
+ for i, s in enumerate(strings):
+ if s == string_to_find:
+ return i
+ return -1
+
+class ProcessData:
+ def __init__(self, name):
+ self.name = name
+ self.reads = {} # k : dev_num, v : [] of reads
+ self.per_device_total_reads = {}
+ self.writes = {}
+ self.per_device_total_writes = {}
+ self.total_reads = 0
+ self.total_writes = 0
+ self.total_dm_reads = 0
+ self.total_dm_writes = 0
+
+
+ def add_read_event(self, major, minor, event):
+ devNum = major * DEV_MAJOR_MULTIPLIER + minor;
+ events = self.reads.get(devNum)
+ if not events:
+ events = []
+ self.reads[devNum] = events
+ self.per_device_total_reads[devNum] = 0
+ events.append(event)
+ self.total_reads += event.size
+ self.per_device_total_reads[devNum] += event.size
+
+ def add_write_event(self, major, minor, event):
+ devNum = major * DEV_MAJOR_MULTIPLIER + minor;
+ events = self.writes.get(devNum)
+ if not events:
+ events = []
+ self.writes[devNum] = events
+ self.per_device_total_writes[devNum] = 0
+ events.append(event)
+ self.total_writes += event.size
+ self.per_device_total_writes[devNum] += event.size
+
+ def add_dm_read(self, size):
+ self.total_dm_reads += size
+
+ def add_dm_write(self, size):
+ self.total_dm_writes += size
+
+ def dump(self):
+ print "Process,", self.name
+ print " total reads,", self.total_reads
+ print " total writes,", self.total_writes
+ print " total dm reads,", self.total_dm_reads
+ print " total dm writes,", self.total_dm_writes
+ print " R per device"
+ sorted_r = collections.OrderedDict(sorted(self.per_device_total_reads.items(), \
+ key = lambda item: item[1], reverse = True))
+ for i in range(len(sorted_r)):
+ dev = sorted_r.popitem(last=False)
+ print " ", dev[0],dev[1]
+
+ print " W per device"
+ sorted_w = collections.OrderedDict(sorted(self.per_device_total_writes.items(), \
+ key = lambda item: item[1], reverse = True))
+ for i in range(len(sorted_w)):
+ dev = sorted_w.popitem(last=False)
+ print " ", dev[0],dev[1]
+
+class IoTrace:
+
+ def __init__(self):
+ self.ios = {} #K: process name, v:ProcessData
+ self.total_reads = 0
+ self.total_writes = 0
+ self.total_reads_per_device = {} #K: block num, V: total blocks
+ self.total_writes_per_device = {}
+ self.total_dm_reads = {} #K: devnum, V: blocks
+ self.total_dm_writes = {}
+ self.re_block = re.compile(RE_BLOCK)
+
+ def parse(self, l):
+ match = self.re_block.match(l)
+ if not match:
+ return False
+ try:
+ self.do_parse_bio_queue(l, match)
+ except ValueError:
+ print "cannot parse:", l
+ raise
+ return True
+
+ def do_parse_bio_queue(self, l, match):
+ pid = match.group(1)
+ start_time = float(match.group(2))*1000 #ms
+ major = int(match.group(3))
+ minor = int(match.group(4))
+ devNum = major * DEV_MAJOR_MULTIPLIER + minor;
+ operation = match.group(5)
+ block_num = int(match.group(6))
+ size = int(match.group(7))
+ process = match.group(8) + "-" + pid
+ event = RwEvent(block_num, start_time, size)
+ io = self.ios.get(process)
+ if not io:
+ io = ProcessData(process)
+ self.ios[process] = io
+ if major == DM_MAJOR:
+ devNum = major * DEV_MAJOR_MULTIPLIER + minor;
+ if 'R' in operation[0]:
+ if devNum not in self.total_dm_reads:
+ self.total_dm_reads[devNum] = 0
+ self.total_dm_reads[devNum] += size
+ io.add_dm_read(size)
+ elif 'W' in operation[0]:
+ if devNum not in self.total_dm_writes:
+ self.total_dm_writes[devNum] = 0
+ self.total_dm_writes[devNum] += size
+ io.add_dm_write(size)
+ return
+ if 'R' in operation[0]:
+ io.add_read_event(major, minor, event)
+ self.total_reads += size
+ per_device = self.total_reads_per_device.get(devNum)
+ if not per_device:
+ self.total_reads_per_device[devNum] = 0
+ self.total_reads_per_device[devNum] += size
+ elif 'W' in operation[0]:
+ io.add_write_event(major, minor, event)
+ self.total_writes += size
+ per_device = self.total_writes_per_device.get(devNum)
+ if not per_device:
+ self.total_writes_per_device[devNum] = 0
+ self.total_writes_per_device[devNum] += size
+
+ def dump(self):
+ print "total read blocks,", self.total_reads
+ print "total write blocks,", self.total_writes
+ print "Total DM R"
+ for dev,size in self.total_dm_reads.items():
+ print dev, size
+ print "Total DM W"
+ for dev,size in self.total_dm_writes.items():
+ print dev, size
+ print "**Process total R/W"
+ sorted_by_total_rw = collections.OrderedDict(sorted(self.ios.items(), \
+ key = lambda item: item[1].total_reads + item[1].total_writes, reverse = True))
+ for i in range(MAX_PROCESS_DUMP):
+ process = sorted_by_total_rw.popitem(last=False)
+ if not process:
+ break
+ process[1].dump()
+
+ print "**Process total W"
+ sorted_by_total_w = collections.OrderedDict(sorted(self.ios.items(), \
+ key = lambda item: item[1].total_writes, reverse = True))
+ for i in range(5):
+ process = sorted_by_total_w.popitem(last=False)
+ if not process:
+ break
+ process[1].dump()
+
+ print "**Device total R"
+ sorted_by_total_r = collections.OrderedDict(sorted(self.total_reads_per_device.items(), \
+ key = lambda item: item[1], reverse = True))
+ for i in range(len(sorted_by_total_r)):
+ dev = sorted_by_total_r.popitem(last=False)
+ print dev[0],dev[1]
+
+ print "**Device total W"
+ sorted_by_total_w = collections.OrderedDict(sorted(self.total_writes_per_device.items(), \
+ key = lambda item: item[1], reverse = True))
+ for i in range(len(sorted_by_total_w)):
+ dev = sorted_by_total_w.popitem(last=False)
+ print dev[0],dev[1]
+
+class SchedProcess:
+ def __init__(self, pid):
+ self.pid = pid
+ self.name = "unknown"
+ self.total_execution_time = 0.0
+ self.total_io_wait_time = 0.0
+ self.total_other_wait_time = 0.0
+ self.waiting_calls = {} # k: waiting_call, v : waiting counter
+ self.io_waiting_call_times = {} # k: waiting_call, v: total wait time
+ self.in_iowait = False
+ self.last_waiting_call = None
+ self.last_switch_out_time = 0.0
+ self.last_switch_in_time = 0.0
+ self.last_core = -1
+ self.execution_time_per_core = {} # k: core, v : time
+ self.io_latency_histograms = {} # k : delay in ms, v : count
+
+ def handle_reason(self, current_time, iowait, waiting_call):
+ #if self.pid == 1232:
+ # print current_time, iowait, waiting_call
+ if iowait == 1:
+ self.in_iowait = True
+ self.last_waiting_call = waiting_call
+ call_counter = self.waiting_calls.get(waiting_call, 0)
+ call_counter += 1
+ self.waiting_calls[waiting_call] = call_counter
+
+ def handle_switch_out(self, current_time, out_state, priority, name, core):
+ #if self.pid == 1232:
+ # print "out", current_time, out_state
+ if self.name != name:
+ self.name = name
+ self.last_switch_out_time = current_time
+ if self.last_switch_in_time == 0.0: # switch in not recorded. ignore this one
+ return
+ execution_time = current_time - self.last_switch_in_time
+ self.total_execution_time += execution_time
+ core_execution_time = self.execution_time_per_core.get(core, 0.0)
+ core_execution_time += execution_time
+ self.execution_time_per_core[core] = core_execution_time
+
+ def handle_switch_in(self, current_time, priority, name, core):
+ #if self.pid == 1232:
+ # print "in", current_time, self.in_iowait
+ if self.name != name:
+ self.name = name
+ self.last_switch_in_time = current_time
+ if self.last_switch_out_time == 0.0: # in without out, probably 1st
+ self.in_iowait = False
+ return
+ wait_time = current_time - self.last_switch_out_time
+ if self.in_iowait:
+ self.total_io_wait_time += wait_time
+ total_waiting_call_time = self.io_waiting_call_times.get(self.last_waiting_call, 0.0)
+ total_waiting_call_time += wait_time
+ self.io_waiting_call_times[self.last_waiting_call] = total_waiting_call_time
+ wait_time_ms = int(wait_time*10) / 10.0 # resolution up to 0.1 ms
+ histogram_count = self.io_latency_histograms.get(wait_time_ms, 0)
+ histogram_count += 1
+ self.io_latency_histograms[wait_time_ms] = histogram_count
+ else:
+ self.total_other_wait_time += wait_time
+ self.in_iowait = False
+
+
+ def dump(self):
+ print "PID:", self.pid, " name:", self.name
+ print " total execution time:", self.total_execution_time,\
+ " io wait:", self.total_io_wait_time, " other wait:", self.total_other_wait_time
+ sorted_data = collections.OrderedDict(sorted(self.execution_time_per_core.items(), \
+ key = lambda item: item[0], reverse = False))
+ print " Core execution:", sorted_data
+ sorted_data = collections.OrderedDict(sorted(self.waiting_calls.items(), \
+ key = lambda item: item[1], reverse = True))
+ print " Wait calls:", sorted_data
+ sorted_data = collections.OrderedDict(sorted(self.io_waiting_call_times.items(), \
+ key = lambda item: item[1], reverse = True))
+ print " IO Wait time per wait calls:", sorted_data
+ sorted_data = collections.OrderedDict(sorted(self.io_latency_histograms.items(), \
+ key = lambda item: item[0], reverse = False))
+ print " Wait time histogram:", sorted_data
+
+class SchedTrace:
+ def __init__(self):
+ self.re_switch = re.compile(RE_SCHED_SWITCH)
+ self.re_reason = re.compile(RE_SCHED_BLOCKED_READSON)
+ self.processes = {} # key: pid, v : SchedProcess
+
+ def parse(self, l):
+ checked_reason = False
+ match = self.re_switch.match(l)
+ if not match:
+ match = self.re_reason.match(l)
+ checked_reason = True
+ if not match:
+ return False
+ try:
+ if checked_reason:
+ self.do_handle_reason(l, match)
+ else:
+ self.do_handle_switch(l, match)
+ except ValueError:
+ print "cannot parse:", l
+ raise
+ return True
+
+ def do_handle_switch(self, l, match):
+ current_pid = int(match.group(1))
+ cpu_core = int(match.group(2))
+ current_time = float(match.group(3))*1000 #ms
+ out_name = match.group(4)
+ out_pid = int(match.group(5))
+ out_prio = int(match.group(6))
+ out_state = match.group(7)
+ in_name = match.group(8)
+ in_pid = int(match.group(9))
+ in_prio = int(match.group(10))
+ out_process = self.processes.get(out_pid)
+ if not out_process:
+ out_process = SchedProcess(out_pid)
+ self.processes[out_pid] = out_process
+ in_process = self.processes.get(in_pid)
+ if not in_process:
+ in_process = SchedProcess(in_pid)
+ self.processes[in_pid] = in_process
+ out_process.handle_switch_out(current_time, out_state, out_prio, out_name, cpu_core)
+ in_process.handle_switch_in(current_time, in_prio, in_name, cpu_core)
+
+ def do_handle_reason(self, l, match):
+ current_pid = int(match.group(1))
+ cpu_core = int(match.group(2))
+ current_time = float(match.group(3))*1000 #ms
+ pid = int(match.group(4))
+ iowait = int(match.group(5))
+ waiting_call = match.group(6)
+ process = self.processes.get(pid)
+ if not process:
+ process = SchedProcess(pid)
+ self.processes[pid] = process
+ process.handle_reason(current_time, iowait, waiting_call)
+
+ def dump(self):
+ sorted_by_total_execution = collections.OrderedDict(sorted(self.processes.items(), \
+ key = lambda item: item[1].total_io_wait_time, reverse = True))
+ for k, v in sorted_by_total_execution.iteritems():
+ if v.total_execution_time > 10.0 or v.total_io_wait_time != 0.0:
+ v.dump()
+
+def main(argv):
+ if (len(argv) < 2):
+ print "check_io_trace_all.py filename"
+ return
+ filename = argv[1]
+
+ io_trace = IoTrace()
+ sched_trace = SchedTrace()
+ with open(filename) as f:
+ for l in f:
+ if io_trace.parse(l):
+ continue
+ sched_trace.parse(l)
+ io_trace.dump()
+ print "\n\n\n"
+ sched_trace.dump()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/boottime_tools/io_analysis/check_verity.py b/boottime_tools/io_analysis/check_verity.py
new file mode 100644
index 00000000..a69818fb
--- /dev/null
+++ b/boottime_tools/io_analysis/check_verity.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Analyze dm_verity trace"""
+import collections
+import math
+import os
+import re
+import string
+import sys
+
+RE_VERITY = r'.+\s+([0-9]+\.[0-9]+):\s+block_verity_msg:\s+(\S+)\s+([0-9]+)\,([0-9]+)\s+([0-9]+)\s+([0-9]+)'
+
+def get_average_and_std_dev(l):
+ sum_io = 0.0
+ sum_verity = 0.0
+ sum_total = 0.0
+ N = len(l)
+ sum_blocks = 0.0
+ for e in l:
+ sum_io += e.io_latency
+ sum_verity += e.verity_latency
+ sum_total += e.total_latency
+ sum_blocks += e.size
+ average_io = sum_io / N
+ average_verity = sum_verity / N
+ average_total = sum_total / N
+ var_io = 0.0
+ var_verity = 0.0
+ var_total = 0.0
+ for e in l:
+ var_io += (e.io_latency - average_io)**2
+ var_verity += (e.verity_latency - average_verity)**2
+ var_total += (e.total_latency - average_total)**2
+ sigma_io = math.sqrt(var_io / N)
+ sigma_verity = math.sqrt(var_verity / N)
+ sigma_total = math.sqrt(var_total / N)
+ return (average_io, sigma_io, sum_io), (average_verity, sigma_verity, sum_verity), \
+ (average_total, sigma_total, sum_total), sum_blocks
+
+
+class Event:
+ def __init__(self, start_time, block_num, size):
+ self.block_num = block_num
+ self.start_time = start_time
+ self.io_end_time = 0
+ self.finish_time = 0
+ self.size = size
+ self.total_latency = 0
+ self.io_latency = 0
+ self.verity_latency = 0
+
+ def set_io_end_time(self, io_end_time):
+ self.io_end_time = io_end_time
+ self.io_latency = io_end_time - self.start_time
+
+ def set_finish_time(self, finish_time):
+ self.finish_time = finish_time
+ self.verity_latency = finish_time - self.io_end_time
+ self.total_latency = finish_time - self.start_time
+
+class VerityTrace:
+ def __init__(self):
+ self.reads = [] # all events in start time
+ self.block_size_vs_reads_histogram = {} # key: size, value: list of events
+ self.recents = {} # not finished, key: block_nr, value: event
+ self.re = re.compile(RE_VERITY)
+
+ def handle_line(self, line):
+ match = self.re.match(line)
+ if not match:
+ return
+ time = int(float(match.group(1))*1000000) #us
+ step = match.group(2)
+ block_nr = int(match.group(5))
+ size = int(match.group(6))
+ recent_key = block_nr * 1000 + size
+ if step == "map":
+ event = Event(time, block_nr, size)
+ self.recents[recent_key] = event
+ self.reads.append(event)
+ per_size_list = self.block_size_vs_reads_histogram.get(size)
+ if not per_size_list:
+ per_size_list = []
+ self.block_size_vs_reads_histogram[size] = per_size_list
+ per_size_list.append(event)
+ elif step == "end_io":
+ event = self.recents[recent_key]
+ event.set_io_end_time(time)
+ elif step == "finish_io":
+ event = self.recents[recent_key]
+ event.set_finish_time(time)
+ del self.recents[recent_key]
+
+ def dump_list(self, msg, l):
+ io, verity, total, blocks = get_average_and_std_dev(l)
+ print msg, "counts:", len(l), "io latency:", io[0], io[1], io[2], "verity latency:", \
+ verity[0], verity[1], verity[2], "total:", total[0], total[1], total[2]
+ return io, verity, total, blocks
+
+ def dump(self):
+ print "Numbers: average (us), stddev (us), total (us)"
+ io, verity, total, blocks = self.dump_list ("total,", self.reads)
+ io_latency_per_1024KB = io[2] / blocks * (1024 / 4)
+ verity_latency_per_1024KB = verity[2] / blocks * (1024 / 4)
+ total_latency_per_1024KB = io_latency_per_1024KB + verity_latency_per_1024KB
+ print "Average latency for 1024KB (us), IO:", io_latency_per_1024KB, \
+ "Verity:", verity_latency_per_1024KB, "Total:", total_latency_per_1024KB
+ sizes = sorted(self.block_size_vs_reads_histogram.keys())
+ print "Latency per read size"
+ for s in sizes:
+ self.dump_list ("size " + str(s), self.block_size_vs_reads_histogram[s])
+
+def main(argv):
+ if (len(argv) < 2):
+ print "check_io_trace.py filename"
+ return
+ filename = argv[1]
+ trace = VerityTrace()
+ with open(filename) as f:
+ for l in f:
+ trace.handle_line(l)
+ trace.dump()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/ext4_utils/ext4_crypt.cpp b/ext4_utils/ext4_crypt.cpp
index 6540e514..f392046a 100644
--- a/ext4_utils/ext4_crypt.cpp
+++ b/ext4_utils/ext4_crypt.cpp
@@ -48,6 +48,8 @@ struct ext4_encryption_policy {
#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4
+#define EXT4_ENCRYPTION_MODE_SPECK128_256_XTS 7
+#define EXT4_ENCRYPTION_MODE_SPECK128_256_CTS 8
#define EXT4_ENCRYPTION_MODE_AES_256_HEH 126
#define EXT4_ENCRYPTION_MODE_PRIVATE 127
@@ -109,18 +111,15 @@ static bool is_dir_empty(const char *dirname, bool *is_empty)
}
static uint8_t e4crypt_get_policy_flags(int filenames_encryption_mode) {
-
- // With HEH, pad filenames with zeroes to the next 16-byte boundary. This
- // is not required, but it's more secure (helps hide the length of
- // filenames), makes the inputs evenly divisible into blocks which is more
- // efficient for encryption and decryption, and we had the opportunity to
- // make a breaking change when introducing a new mode anyway.
- if (filenames_encryption_mode == EXT4_ENCRYPTION_MODE_AES_256_HEH) {
- return EXT4_POLICY_FLAGS_PAD_16;
+ if (filenames_encryption_mode == EXT4_ENCRYPTION_MODE_AES_256_CTS) {
+ // Use legacy padding with our original filenames encryption mode.
+ return EXT4_POLICY_FLAGS_PAD_4;
}
-
- // Default flags (4-byte padding) for CTS
- return EXT4_POLICY_FLAGS_PAD_4;
+ // With a new mode we can use the better padding flag without breaking existing devices: pad
+ // filenames with zeroes to the next 16-byte boundary. This is more secure (helps hide the
+ // length of filenames) and makes the inputs evenly divisible into blocks which is more
+ // efficient for encryption and decryption.
+ return EXT4_POLICY_FLAGS_PAD_16;
}
static bool e4crypt_policy_set(const char *directory, const char *policy,
@@ -234,6 +233,8 @@ int e4crypt_policy_ensure(const char *directory, const char *policy,
if (!strcmp(contents_encryption_mode, "software") ||
!strcmp(contents_encryption_mode, "aes-256-xts")) {
contents_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
+ } else if (!strcmp(contents_encryption_mode, "speck128/256-xts")) {
+ contents_mode = EXT4_ENCRYPTION_MODE_SPECK128_256_XTS;
} else if (!strcmp(contents_encryption_mode, "ice")) {
contents_mode = EXT4_ENCRYPTION_MODE_PRIVATE;
} else {
@@ -244,6 +245,8 @@ int e4crypt_policy_ensure(const char *directory, const char *policy,
if (!strcmp(filenames_encryption_mode, "aes-256-cts")) {
filenames_mode = EXT4_ENCRYPTION_MODE_AES_256_CTS;
+ } else if (!strcmp(filenames_encryption_mode, "speck128/256-cts")) {
+ filenames_mode = EXT4_ENCRYPTION_MODE_SPECK128_256_CTS;
} else if (!strcmp(filenames_encryption_mode, "aes-256-heh")) {
filenames_mode = EXT4_ENCRYPTION_MODE_AES_256_HEH;
} else {
diff --git a/perfprofd/Android.bp b/perfprofd/Android.bp
index 351c7aab..eb35a039 100644
--- a/perfprofd/Android.bp
+++ b/perfprofd/Android.bp
@@ -51,6 +51,41 @@ cc_defaults {
],
}
+// Static library for the record proto and its I/O.
+
+cc_library_static {
+ name: "libperfprofd_record_proto",
+ defaults: [
+ "perfprofd_defaults",
+ ],
+ host_supported: true,
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+
+ static_libs: [
+ "libbase",
+ "libprotobuf-cpp-lite",
+ "libquipper",
+ "libz",
+ ],
+ srcs: [
+ "perfprofd_io.cc",
+ "perfprofd_record.proto",
+ ],
+
+ proto: {
+ export_proto_headers: true,
+ include_dirs: ["external/perf_data_converter/src/quipper"],
+ type: "lite",
+ },
+
+ export_include_dirs: ["."], // Really only the -fwd.h.
+ export_static_lib_headers: ["libquipper"],
+}
+
//
// Static library containing guts of AWP daemon.
//
@@ -72,23 +107,19 @@ cc_defaults {
"libsimpleperf_elf_read",
],
whole_static_libs: [
+ "libperfprofd_dropbox",
+ "libperfprofd_record_proto",
"libquipper",
],
srcs: [
- "perfprofd_record.proto",
"perf_data_converter.cc",
"configreader.cc",
"cpuconfig.cc",
"perfprofdcore.cc",
- "perfprofd_io.cc",
+ "perfprofd_cmdline.cc",
"symbolizer.cc"
],
- proto: {
- export_proto_headers: true,
- include_dirs: ["external/perf_data_converter/src/quipper"],
- type: "lite",
- },
cflags: [
"-Wno-gnu-anonymous-struct",
],
diff --git a/perfprofd/binder_interface/Android.bp b/perfprofd/binder_interface/Android.bp
index d7bff41b..c40036bd 100644
--- a/perfprofd/binder_interface/Android.bp
+++ b/perfprofd/binder_interface/Android.bp
@@ -30,9 +30,6 @@ cc_library_static {
"libperfprofdcore",
"libprotobuf-cpp-lite",
],
- shared_libs: [
- "libservices",
- ],
srcs: [
"perfprofd_binder.cc",
":perfprofd_aidl",
diff --git a/perfprofd/binder_interface/perfprofd_binder.cc b/perfprofd/binder_interface/perfprofd_binder.cc
index 6667ca5e..87e0c5f6 100644
--- a/perfprofd/binder_interface/perfprofd_binder.cc
+++ b/perfprofd/binder_interface/perfprofd_binder.cc
@@ -35,7 +35,6 @@
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
#include <android-base/unique_fd.h>
-#include <android/os/DropBoxManager.h>
#include <binder/BinderService.h>
#include <binder/IResultReceiver.h>
#include <binder/Status.h>
@@ -49,6 +48,7 @@
#include "perfprofd_record.pb.h"
#include "config.h"
+#include "dropbox.h"
#include "perfprofdcore.h"
#include "perfprofd_io.h"
@@ -60,8 +60,6 @@ using Status = ::android::binder::Status;
class BinderConfig : public Config {
public:
- bool send_to_dropbox = false;
-
bool is_profiling = false;
void Sleep(size_t seconds) override {
@@ -97,8 +95,6 @@ class BinderConfig : public Config {
// Copy base fields.
*static_cast<Config*>(this) = static_cast<const Config&>(rhs);
- send_to_dropbox = rhs.send_to_dropbox;
-
return *this;
}
@@ -151,79 +147,21 @@ class PerfProfdNativeService : public BinderService<PerfProfdNativeService>,
int seq_ = 0;
};
-static Status WriteDropboxFile(android::perfprofd::PerfprofdRecord* encodedProfile,
- Config* config) {
- android::base::unique_fd tmp_fd;
- {
- char path[PATH_MAX];
- snprintf(path,
- sizeof(path),
- "%s%cdropboxtmp-XXXXXX",
- config->destination_directory.c_str(),
- OS_PATH_SEPARATOR);
- tmp_fd.reset(mkstemp(path));
- if (tmp_fd.get() == -1) {
- PLOG(ERROR) << "Could not create temp file " << path;
- return Status::fromExceptionCode(1, "Could not create temp file");
- }
- if (unlink(path) != 0) {
- PLOG(WARNING) << "Could not unlink binder temp file";
- }
- }
-
- // Dropbox takes ownership of the fd, and if it is not readonly,
- // a selinux violation will occur. Get a read-only version.
- android::base::unique_fd read_only;
- {
- char fdpath[64];
- snprintf(fdpath, arraysize(fdpath), "/proc/self/fd/%d", tmp_fd.get());
- read_only.reset(open(fdpath, O_RDONLY | O_CLOEXEC));
- if (read_only.get() < 0) {
- PLOG(ERROR) << "Could not create read-only fd";
- return Status::fromExceptionCode(1, "Could not create read-only fd");
- }
- }
-
- constexpr bool kCompress = true; // Ignore the config here. Dropbox will always end up
- // compressing the data, might as well make the temp
- // file smaller and help it out.
- using DropBoxManager = android::os::DropBoxManager;
- constexpr int kDropboxFlags = DropBoxManager::IS_GZIPPED;
-
- if (!SerializeProtobuf(encodedProfile, std::move(tmp_fd), kCompress)) {
- return Status::fromExceptionCode(1, "Could not serialize to temp file");
- }
-
- sp<DropBoxManager> dropbox(new DropBoxManager());
- return dropbox->addFile(String16("perfprofd"), read_only.release(), kDropboxFlags);
-}
-
bool PerfProfdNativeService::BinderHandler(
android::perfprofd::PerfprofdRecord* encodedProfile,
Config* config) {
CHECK(config != nullptr);
+ if (encodedProfile == nullptr) {
+ return false;
+ }
+
if (static_cast<BinderConfig*>(config)->send_to_dropbox) {
- size_t size = encodedProfile->ByteSize();
- Status status;
- if (size < 1024 * 1024) {
- // For a small size, send as a byte buffer directly.
- std::unique_ptr<uint8_t[]> data(new uint8_t[size]);
- encodedProfile->SerializeWithCachedSizesToArray(data.get());
-
- using DropBoxManager = android::os::DropBoxManager;
- sp<DropBoxManager> dropbox(new DropBoxManager());
- status = dropbox->addData(String16("perfprofd"),
- data.get(),
- size,
- 0);
- } else {
- // For larger buffers, we need to go through the filesystem.
- status = WriteDropboxFile(encodedProfile, config);
+ std::string error_msg;
+ if (!dropbox::SendToDropbox(encodedProfile, config->destination_directory, &error_msg)) {
+ LOG(WARNING) << "Failed dropbox submission: " << error_msg;
+ return false;
}
- if (!status.isOk()) {
- LOG(WARNING) << "Failed dropbox submission: " << status.toString8();
- }
- return status.isOk();
+ return true;
}
if (encodedProfile == nullptr) {
@@ -346,6 +284,7 @@ Status PerfProfdNativeService::StartProfilingProtobuf(ProtoLoaderFn fn) {
CHECK_AND_COPY_FROM_PROTO(process)
CHECK_AND_COPY_FROM_PROTO(use_elf_symbolizer)
CHECK_AND_COPY_FROM_PROTO(send_to_dropbox)
+ CHECK_AND_COPY_FROM_PROTO(compress)
#undef CHECK_AND_COPY_FROM_PROTO
};
return StartProfiling(config_fn);
diff --git a/perfprofd/binder_interface/perfprofd_config.proto b/perfprofd/binder_interface/perfprofd_config.proto
index bb7b52d8..c25aa93b 100644
--- a/perfprofd/binder_interface/perfprofd_config.proto
+++ b/perfprofd/binder_interface/perfprofd_config.proto
@@ -76,4 +76,7 @@ message ProfilingConfig {
// Whether to send the result to dropbox.
optional bool send_to_dropbox = 20;
+
+ // If true, use libz to compress the output proto.
+ optional bool compress = 21;
};
diff --git a/perfprofd/config.h b/perfprofd/config.h
index 4c1f12b1..774f7e86 100644
--- a/perfprofd/config.h
+++ b/perfprofd/config.h
@@ -96,6 +96,9 @@ struct Config {
// If true, use libz to compress the output proto.
bool compress = true;
+ // If true, send the proto to dropbox instead to a file.
+ bool send_to_dropbox = false;
+
// Sleep for the given number of seconds.
virtual void Sleep(size_t seconds) = 0;
diff --git a/perfprofd/configreader.cc b/perfprofd/configreader.cc
index f7d6fd29..d3396b33 100644
--- a/perfprofd/configreader.cc
+++ b/perfprofd/configreader.cc
@@ -130,6 +130,9 @@ void ConfigReader::addDefaultEntries()
// If true, use libz to compress the output proto.
addUnsignedEntry("compress", 0, 0, 1);
+
+ // If true, send the proto to dropbox instead to a file.
+ addUnsignedEntry("dropbox", 0, 0, 1);
}
void ConfigReader::addUnsignedEntry(const char *key,
@@ -329,4 +332,5 @@ void ConfigReader::FillConfig(Config* config) {
config->process = -1;
config->use_elf_symbolizer = getBoolValue("use_elf_symbolizer");
config->compress = getBoolValue("compress");
+ config->send_to_dropbox = getBoolValue("dropbox");
}
diff --git a/perfprofd/dropbox/Android.bp b/perfprofd/dropbox/Android.bp
new file mode 100644
index 00000000..9f55fbae
--- /dev/null
+++ b/perfprofd/dropbox/Android.bp
@@ -0,0 +1,55 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+//
+// Static library for dropbox submission.
+//
+cc_library_static {
+ name: "libperfprofd_dropbox",
+ defaults: [
+ "perfprofd_defaults",
+ ],
+ host_supported: true,
+
+ export_include_dirs: ["."],
+ static_libs: [
+ "libbase",
+ "libperfprofd_record_proto",
+ "libprotobuf-cpp-lite",
+ ],
+ target: {
+ android: {
+ srcs: [
+ "dropbox.cc",
+ ],
+ static_libs: [
+ "libbinder",
+ "libutils",
+ ],
+ shared_libs: [
+ "libservices",
+ ],
+ },
+ darwin: {
+ enabled: false,
+ },
+ host: {
+ srcs: [
+ "dropbox_host.cc",
+ ],
+ },
+ },
+}
diff --git a/perfprofd/dropbox/dropbox.cc b/perfprofd/dropbox/dropbox.cc
new file mode 100644
index 00000000..2b1dc2ef
--- /dev/null
+++ b/perfprofd/dropbox/dropbox.cc
@@ -0,0 +1,129 @@
+/*
+ *
+ * Copyright 2017, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dropbox.h"
+
+#include <cstdio>
+#include <cstdlib>
+#include <memory>
+
+#include <inttypes.h>
+#include <unistd.h>
+
+#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+#include <android-base/unique_fd.h>
+#include <android/os/DropBoxManager.h>
+#include <binder/Status.h>
+#include <utils/String8.h>
+
+#include "perfprofd_record.pb.h"
+
+#include "perfprofd_io.h"
+
+namespace android {
+namespace perfprofd {
+namespace dropbox {
+
+namespace {
+
+bool WriteDropboxFile(android::perfprofd::PerfprofdRecord* encodedProfile,
+ const std::string& temp_dir,
+ std::string* error_msg) {
+ android::base::unique_fd tmp_fd;
+ {
+ char path[PATH_MAX];
+ snprintf(path, sizeof(path), "%s/dropboxtmp-XXXXXX", temp_dir.c_str());
+ tmp_fd.reset(mkstemp(path));
+ if (tmp_fd.get() == -1) {
+ *error_msg = android::base::StringPrintf("Could not create temp file %s: %s",
+ path,
+ strerror(errno));
+ return false;
+ }
+ if (unlink(path) != 0) {
+ PLOG(WARNING) << "Could not unlink binder temp file";
+ }
+ }
+
+ // Dropbox takes ownership of the fd, and if it is not readonly,
+ // a selinux violation will occur. Get a read-only version.
+ android::base::unique_fd read_only;
+ {
+ char fdpath[64];
+ snprintf(fdpath, arraysize(fdpath), "/proc/self/fd/%d", tmp_fd.get());
+ read_only.reset(open(fdpath, O_RDONLY | O_CLOEXEC));
+ if (read_only.get() < 0) {
+ *error_msg = android::base::StringPrintf("Could not create read-only fd: %s",
+ strerror(errno));
+ return false;
+ }
+ }
+
+ constexpr bool kCompress = true; // Ignore the config here. Dropbox will always end up
+ // compressing the data, might as well make the temp
+ // file smaller and help it out.
+ using DropBoxManager = android::os::DropBoxManager;
+ constexpr int kDropboxFlags = DropBoxManager::IS_GZIPPED;
+
+ if (!SerializeProtobuf(encodedProfile, std::move(tmp_fd), kCompress)) {
+ *error_msg = "Could not serialize to temp file";
+ return false;
+ }
+
+ sp<DropBoxManager> dropbox(new DropBoxManager());
+ android::binder::Status status = dropbox->addFile(String16("perfprofd"),
+ read_only.release(),
+ kDropboxFlags);
+ if (!status.isOk()) {
+ *error_msg = status.toString8();
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+bool SendToDropbox(android::perfprofd::PerfprofdRecord* profile,
+ const std::string& temp_directory,
+ std::string* error_msg) {
+ size_t size = profile->ByteSize();
+ if (size < 1024 * 1024) {
+ // For a small size, send as a byte buffer directly.
+ std::unique_ptr<uint8_t[]> data(new uint8_t[size]);
+ profile->SerializeWithCachedSizesToArray(data.get());
+
+ using DropBoxManager = android::os::DropBoxManager;
+ sp<DropBoxManager> dropbox(new DropBoxManager());
+ android::binder::Status status = dropbox->addData(String16("perfprofd"),
+ data.get(),
+ size,
+ 0);
+ if (!status.isOk()) {
+ *error_msg = status.toString8();
+ return false;
+ }
+ return true;
+ } else {
+ // For larger buffers, we need to go through the filesystem.
+ return WriteDropboxFile(profile, temp_directory, error_msg);
+ }
+}
+
+} // namespace dropbox
+} // namespace perfprofd
+} // namespace android
diff --git a/perfprofd/dropbox/dropbox.h b/perfprofd/dropbox/dropbox.h
new file mode 100644
index 00000000..b25d2cc2
--- /dev/null
+++ b/perfprofd/dropbox/dropbox.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SYSTEM_EXTRAS_PERFPROFD_DROPBOX_DROPBOX_H_
+#define SYSTEM_EXTRAS_PERFPROFD_DROPBOX_DROPBOX_H_
+
+#include <string>
+
+#include "perfprofd_record-fwd.h"
+
+namespace android {
+namespace perfprofd {
+namespace dropbox {
+
+bool SendToDropbox(android::perfprofd::PerfprofdRecord* profile,
+ const std::string& temp_directory,
+ std::string* error_msg);
+
+} // namespace dropbox
+} // namespace perfprofd
+} // namespace android
+
+#endif // SYSTEM_EXTRAS_PERFPROFD_DROPBOX_DROPBOX_H_
diff --git a/perfprofd/dropbox/dropbox_host.cc b/perfprofd/dropbox/dropbox_host.cc
new file mode 100644
index 00000000..5c08aa85
--- /dev/null
+++ b/perfprofd/dropbox/dropbox_host.cc
@@ -0,0 +1,35 @@
+/*
+ *
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dropbox.h"
+
+#include <android-base/macros.h>
+
+namespace android {
+namespace perfprofd {
+namespace dropbox {
+
+bool SendToDropbox(android::perfprofd::PerfprofdRecord* profile,
+ const std::string& temp_directory ATTRIBUTE_UNUSED,
+ std::string* error_msg) {
+ *error_msg = "Dropbox not supported on host";
+ return false;
+}
+
+} // namespace dropbox
+} // namespace perfprofd
+} // namespace android
diff --git a/perfprofd/perfprofd_cmdline.cc b/perfprofd/perfprofd_cmdline.cc
new file mode 100644
index 00000000..fb9c2c17
--- /dev/null
+++ b/perfprofd/perfprofd_cmdline.cc
@@ -0,0 +1,255 @@
+/*
+ *
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfprofd_cmdline.h"
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <set>
+#include <string>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/stringprintf.h>
+
+#include "perfprofd_record.pb.h"
+
+#include "configreader.h"
+#include "dropbox.h"
+#include "perfprofdcore.h"
+#include "perfprofd_io.h"
+
+//
+// Perf profiling daemon -- collects system-wide profiles using
+//
+// simpleperf record -a
+//
+// and encodes them so that they can be uploaded by a separate service.
+//
+
+//
+
+//
+// Output file from 'perf record'.
+//
+#define PERF_OUTPUT "perf.data"
+
+//
+// Path to the perf file to convert and exit? Empty value is the default, daemon mode.
+//
+static std::string perf_file_to_convert = "";
+
+//
+// SIGHUP handler. Sending SIGHUP to the daemon can be used to break it
+// out of a sleep() call so as to trigger a new collection (debugging)
+//
+static void sig_hup(int /* signum */)
+{
+ LOG(WARNING) << "SIGHUP received";
+}
+
+//
+// Parse command line args. Currently supported flags:
+// * "-c PATH" sets the path of the config file to PATH.
+// * "-x PATH" reads PATH as a perf data file and saves it as a file in
+// perf_profile.proto format. ".encoded" suffix is appended to PATH to form
+// the output file path.
+//
+static void parse_args(int argc, char** argv)
+{
+ int ac;
+
+ for (ac = 1; ac < argc; ++ac) {
+ if (!strcmp(argv[ac], "-c")) {
+ if (ac >= argc-1) {
+ LOG(ERROR) << "malformed command line: -c option requires argument)";
+ continue;
+ }
+ ConfigReader::setConfigFilePath(argv[ac+1]);
+ ++ac;
+ } else if (!strcmp(argv[ac], "-x")) {
+ if (ac >= argc-1) {
+ LOG(ERROR) << "malformed command line: -x option requires argument)";
+ continue;
+ }
+ perf_file_to_convert = argv[ac+1];
+ ++ac;
+ } else {
+ LOG(ERROR) << "malformed command line: unknown option or arg " << argv[ac] << ")";
+ continue;
+ }
+ }
+}
+
+//
+// Post-processes after profile is collected and converted to protobuf.
+// * GMS core stores processed file sequence numbers in
+// /data/data/com.google.android.gms/files/perfprofd_processed.txt
+// * Update /data/misc/perfprofd/perfprofd_produced.txt to remove the sequence
+// numbers that have been processed and append the current seq number
+// Returns true if the current_seq should increment.
+//
+static bool post_process(const Config& config, int current_seq)
+{
+ const std::string& dest_dir = config.destination_directory;
+ std::string processed_file_path =
+ config.config_directory + "/" + PROCESSED_FILENAME;
+ std::string produced_file_path = dest_dir + "/" + PRODUCED_FILENAME;
+
+
+ std::set<int> processed;
+ FILE *fp = fopen(processed_file_path.c_str(), "r");
+ if (fp != NULL) {
+ int seq;
+ while(fscanf(fp, "%d\n", &seq) > 0) {
+ if (remove(android::base::StringPrintf(
+ "%s/perf.data.encoded.%d", dest_dir.c_str(),seq).c_str()) == 0) {
+ processed.insert(seq);
+ }
+ }
+ fclose(fp);
+ }
+
+ std::set<int> produced;
+ fp = fopen(produced_file_path.c_str(), "r");
+ if (fp != NULL) {
+ int seq;
+ while(fscanf(fp, "%d\n", &seq) > 0) {
+ if (processed.find(seq) == processed.end()) {
+ produced.insert(seq);
+ }
+ }
+ fclose(fp);
+ }
+
+ uint32_t maxLive = config.max_unprocessed_profiles;
+ if (produced.size() >= maxLive) {
+ return false;
+ }
+
+ produced.insert(current_seq);
+ fp = fopen(produced_file_path.c_str(), "w");
+ if (fp == NULL) {
+ PLOG(WARNING) << "Cannot write " << produced_file_path;
+ return false;
+ }
+ for (std::set<int>::const_iterator iter = produced.begin();
+ iter != produced.end(); ++iter) {
+ fprintf(fp, "%d\n", *iter);
+ }
+ fclose(fp);
+ chmod(produced_file_path.c_str(),
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
+ return true;
+}
+
+//
+// Initialization
+//
+
+static void init(ConfigReader &config)
+{
+ if (!config.readFile()) {
+ LOG(ERROR) << "unable to open configuration file " << config.getConfigFilePath();
+ }
+
+ CommonInit(static_cast<uint32_t>(config.getUnsignedValue("use_fixed_seed")),
+ config.getStringValue("destination_directory").c_str());
+
+ signal(SIGHUP, sig_hup);
+}
+
+//
+// Main routine:
+// 1. parse cmd line args
+// 2. read config file
+// 3. loop: {
+// sleep for a while
+// perform a profile collection
+// }
+//
+int perfprofd_main(int argc, char** argv, Config* config)
+{
+ ConfigReader config_reader;
+
+ LOG(INFO) << "starting Android Wide Profiling daemon";
+
+ parse_args(argc, argv);
+ init(config_reader);
+ config_reader.FillConfig(config);
+
+ if (!perf_file_to_convert.empty()) {
+ std::string encoded_path = perf_file_to_convert + ".encoded";
+ encode_to_proto(perf_file_to_convert, encoded_path.c_str(), *config, 0, nullptr);
+ return 0;
+ }
+
+ // Early exit if we're not supposed to run on this build flavor
+ if (!IsDebugBuild() && config->only_debug_build) {
+ LOG(INFO) << "early exit due to inappropriate build type";
+ return 0;
+ }
+
+ auto config_fn = [config]() {
+ return config;
+ };
+ auto reread_config = [&config_reader, config]() {
+ // Reread config file -- the uploader may have rewritten it as a result
+ // of a gservices change
+ config_reader.readFile();
+ config_reader.FillConfig(config);
+ };
+ int seq = 0;
+ auto handler = [&seq](android::perfprofd::PerfprofdRecord* proto, Config* handler_config) {
+ if (proto == nullptr) {
+ return false;
+ }
+ if (handler_config->send_to_dropbox) {
+ std::string error_msg;
+ if (!android::perfprofd::dropbox::SendToDropbox(proto,
+ handler_config->destination_directory,
+ &error_msg)) {
+ LOG(ERROR) << "Failed dropbox submission: " << error_msg;
+ return false;
+ }
+ } else {
+ std::string data_file_path(handler_config->destination_directory);
+ data_file_path += "/";
+ data_file_path += PERF_OUTPUT;
+ std::string path = android::base::StringPrintf("%s.encoded.%d", data_file_path.c_str(), seq);
+ if (!android::perfprofd::SerializeProtobuf(proto, path.c_str(), handler_config->compress)) {
+ return false;
+ }
+ if (!post_process(*handler_config, seq)) {
+ return false;
+ }
+ }
+ seq++;
+ return true;
+ };
+ ProfilingLoop(config_fn, reread_config, handler);
+
+ LOG(INFO) << "finishing Android Wide Profiling daemon";
+ return 0;
+}
diff --git a/perfprofd/perfprofd_cmdline.h b/perfprofd/perfprofd_cmdline.h
new file mode 100644
index 00000000..5a6b766c
--- /dev/null
+++ b/perfprofd/perfprofd_cmdline.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SYSTEM_EXTRAS_PERFPROFD_PERFPROFD_CMDLINE_H_
+#define SYSTEM_EXTRAS_PERFPROFD_PERFPROFD_CMDLINE_H_
+
+// Semaphore file that indicates that the user is opting in
+#define SEMAPHORE_FILENAME "perf_profile_collection_enabled.txt"
+
+// File containing a list of sequence numbers corresponding to profiles
+// that have been processed/uploaded. Written by the GmsCore uploader,
+// within the GmsCore files directory.
+#define PROCESSED_FILENAME "perfprofd_processed.txt"
+
+// File containing a list of sequence numbers corresponding to profiles
+// that have been created by the perfprofd but not yet uploaded. Written
+// by perfprofd within the destination directory; consumed by GmsCore.
+#define PRODUCED_FILENAME "perfprofd_produced.txt"
+
+struct Config;
+
+// Main routine for perfprofd daemon
+int perfprofd_main(int argc, char **argv, Config* config);
+
+#endif // SYSTEM_EXTRAS_PERFPROFD_PERFPROFD_CMDLINE_H_
diff --git a/perfprofd/perfprofdcore.cc b/perfprofd/perfprofdcore.cc
index 4fa666d3..a5b4b480 100644
--- a/perfprofd/perfprofdcore.cc
+++ b/perfprofd/perfprofdcore.cc
@@ -29,10 +29,7 @@
#include <time.h>
#include <unistd.h>
-#include <cctype>
-#include <map>
#include <memory>
-#include <set>
#include <sstream>
#include <string>
@@ -40,7 +37,6 @@
#include <android-base/logging.h>
#include <android-base/macros.h>
#include <android-base/stringprintf.h>
-#include <android-base/unique_fd.h>
#ifdef __BIONIC__
#include <android-base/properties.h>
@@ -48,7 +44,7 @@
#include "perfprofd_record.pb.h"
-#include "configreader.h"
+#include "config.h"
#include "cpuconfig.h"
#include "perf_data_converter.h"
#include "perfprofdcore.h"
@@ -97,6 +93,8 @@ typedef enum {
} CKPROFILE_RESULT;
+static bool common_initialized = false;
+
//
// Are we running in the emulator? If so, stub out profile collection
// Starts as uninitialized (-1), then set to 1 or 0 at init time.
@@ -105,14 +103,8 @@ static int running_in_emulator = -1;
//
// Is this a debug build ('userdebug' or 'eng')?
-// Starts as uninitialized (-1), then set to 1 or 0 at init time.
-//
-static int is_debug_build = -1;
-
-//
-// Path to the perf file to convert and exit? Empty value is the default, daemon mode.
//
-static std::string perf_file_to_convert = "";
+static bool is_debug_build = false;
//
// Random number generator seed (set at startup time).
@@ -120,51 +112,9 @@ static std::string perf_file_to_convert = "";
static unsigned short random_seed[3];
//
-// SIGHUP handler. Sending SIGHUP to the daemon can be used to break it
-// out of a sleep() call so as to trigger a new collection (debugging)
-//
-static void sig_hup(int /* signum */)
-{
- LOG(WARNING) << "SIGHUP received";
-}
-
-//
-// Parse command line args. Currently supported flags:
-// * "-c PATH" sets the path of the config file to PATH.
-// * "-x PATH" reads PATH as a perf data file and saves it as a file in
-// perf_profile.proto format. ".encoded" suffix is appended to PATH to form
-// the output file path.
-//
-static void parse_args(int argc, char** argv)
-{
- int ac;
-
- for (ac = 1; ac < argc; ++ac) {
- if (!strcmp(argv[ac], "-c")) {
- if (ac >= argc-1) {
- LOG(ERROR) << "malformed command line: -c option requires argument)";
- continue;
- }
- ConfigReader::setConfigFilePath(argv[ac+1]);
- ++ac;
- } else if (!strcmp(argv[ac], "-x")) {
- if (ac >= argc-1) {
- LOG(ERROR) << "malformed command line: -x option requires argument)";
- continue;
- }
- perf_file_to_convert = argv[ac+1];
- ++ac;
- } else {
- LOG(ERROR) << "malformed command line: unknown option or arg " << argv[ac] << ")";
- continue;
- }
- }
-}
-
-//
// Convert a CKPROFILE_RESULT to a string
//
-const char *ckprofile_result_to_string(CKPROFILE_RESULT result)
+static const char *ckprofile_result_to_string(CKPROFILE_RESULT result)
{
switch (result) {
case DO_COLLECT_PROFILE:
@@ -183,29 +133,6 @@ const char *ckprofile_result_to_string(CKPROFILE_RESULT result)
}
//
-// Convert a PROFILE_RESULT to a string
-//
-const char *profile_result_to_string(PROFILE_RESULT result)
-{
- switch(result) {
- case OK_PROFILE_COLLECTION:
- return "profile collection succeeded";
- case ERR_FORK_FAILED:
- return "fork() system call failed";
- case ERR_PERF_RECORD_FAILED:
- return "perf record returned bad exit status";
- case ERR_PERF_ENCODE_FAILED:
- return "failure encoding perf.data to protobuf";
- case ERR_OPEN_ENCODED_FILE_FAILED:
- return "failed to open encoded perf file";
- case ERR_WRITE_ENCODED_FILE_FAILED:
- return "write to encoded perf file failed";
- default:
- return "unknown";
- }
-}
-
-//
// Check to see whether we should perform a profile collection
//
static CKPROFILE_RESULT check_profiling_enabled(const Config& config)
@@ -387,9 +314,9 @@ bool get_charging()
return result;
}
-bool postprocess_proc_stat_contents(const std::string &pscontents,
- long unsigned *idleticks,
- long unsigned *remainingticks)
+static bool postprocess_proc_stat_contents(const std::string &pscontents,
+ long unsigned *idleticks,
+ long unsigned *remainingticks)
{
long unsigned usertime, nicetime, systime, idletime, iowaittime;
long unsigned irqtime, softirqtime;
@@ -663,68 +590,6 @@ static void cleanup_destination_dir(const std::string& dest_dir)
}
//
-// Post-processes after profile is collected and converted to protobuf.
-// * GMS core stores processed file sequence numbers in
-// /data/data/com.google.android.gms/files/perfprofd_processed.txt
-// * Update /data/misc/perfprofd/perfprofd_produced.txt to remove the sequence
-// numbers that have been processed and append the current seq number
-// Returns true if the current_seq should increment.
-//
-static bool post_process(const Config& config, int current_seq)
-{
- const std::string& dest_dir = config.destination_directory;
- std::string processed_file_path =
- config.config_directory + "/" + PROCESSED_FILENAME;
- std::string produced_file_path = dest_dir + "/" + PRODUCED_FILENAME;
-
-
- std::set<int> processed;
- FILE *fp = fopen(processed_file_path.c_str(), "r");
- if (fp != NULL) {
- int seq;
- while(fscanf(fp, "%d\n", &seq) > 0) {
- if (remove(android::base::StringPrintf(
- "%s/perf.data.encoded.%d", dest_dir.c_str(),seq).c_str()) == 0) {
- processed.insert(seq);
- }
- }
- fclose(fp);
- }
-
- std::set<int> produced;
- fp = fopen(produced_file_path.c_str(), "r");
- if (fp != NULL) {
- int seq;
- while(fscanf(fp, "%d\n", &seq) > 0) {
- if (processed.find(seq) == processed.end()) {
- produced.insert(seq);
- }
- }
- fclose(fp);
- }
-
- uint32_t maxLive = config.max_unprocessed_profiles;
- if (produced.size() >= maxLive) {
- return false;
- }
-
- produced.insert(current_seq);
- fp = fopen(produced_file_path.c_str(), "w");
- if (fp == NULL) {
- PLOG(WARNING) << "Cannot write " << produced_file_path;
- return false;
- }
- for (std::set<int>::const_iterator iter = produced.begin();
- iter != produced.end(); ++iter) {
- fprintf(fp, "%d\n", *iter);
- }
- fclose(fp);
- chmod(produced_file_path.c_str(),
- S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH);
- return true;
-}
-
-//
// Collect a perf profile. Steps for this operation are:
// - kick off 'perf record'
// - read perf.data, convert to protocol buf
@@ -852,7 +717,7 @@ static void set_seed(uint32_t use_fixed_seed)
random_seed[2] = (random_seed[0] ^ random_seed[1]);
}
-static void CommonInit(uint32_t use_fixed_seed, const char* dest_dir) {
+void CommonInit(uint32_t use_fixed_seed, const char* dest_dir) {
// Children of init inherit an artificially low OOM score -- this is not
// desirable for perfprofd (its OOM score should be on par with
// other user processes).
@@ -874,27 +739,13 @@ static void CommonInit(uint32_t use_fixed_seed, const char* dest_dir) {
running_in_emulator = false;
is_debug_build = true;
#endif
-}
-//
-// Initialization
-//
-static void init(const Config& config)
-{
- // TODO: Consider whether we want to clean things or just overwrite.
- CommonInit(config.use_fixed_seed, nullptr);
+ common_initialized = true;
}
-static void init(ConfigReader &config)
-{
- if (!config.readFile()) {
- LOG(ERROR) << "unable to open configuration file " << config.getConfigFilePath();
- }
-
- CommonInit(static_cast<uint32_t>(config.getUnsignedValue("use_fixed_seed")),
- config.getStringValue("destination_directory").c_str());
-
- signal(SIGHUP, sig_hup);
+bool IsDebugBuild() {
+ CHECK(common_initialized);
+ return is_debug_build;
}
template <typename ConfigFn, typename UpdateFn>
@@ -952,7 +803,7 @@ static void ProfilingLoopImpl(ConfigFn config, UpdateFn update, HandlerFn handle
}
void ProfilingLoop(Config& config, HandlerFn handler) {
- init(config);
+ CommonInit(config.use_fixed_seed, nullptr);
auto config_fn = [&config]() {
return &config;;
@@ -962,67 +813,8 @@ void ProfilingLoop(Config& config, HandlerFn handler) {
ProfilingLoopImpl(config_fn, do_nothing, handler);
}
-//
-// Main routine:
-// 1. parse cmd line args
-// 2. read config file
-// 3. loop: {
-// sleep for a while
-// perform a profile collection
-// }
-//
-int perfprofd_main(int argc, char** argv, Config* config)
-{
- ConfigReader config_reader;
-
- LOG(INFO) << "starting Android Wide Profiling daemon";
-
- parse_args(argc, argv);
- init(config_reader);
- config_reader.FillConfig(config);
-
- if (!perf_file_to_convert.empty()) {
- std::string encoded_path = perf_file_to_convert + ".encoded";
- encode_to_proto(perf_file_to_convert, encoded_path.c_str(), *config, 0, nullptr);
- return 0;
- }
-
- // Early exit if we're not supposed to run on this build flavor
- if (is_debug_build != 1 && config->only_debug_build) {
- LOG(INFO) << "early exit due to inappropriate build type";
- return 0;
- }
-
- auto config_fn = [config]() {
- return config;
- };
- auto reread_config = [&config_reader, config]() {
- // Reread config file -- the uploader may have rewritten it as a result
- // of a gservices change
- config_reader.readFile();
- config_reader.FillConfig(config);
- };
- int seq = 0;
- auto handler = [&seq](android::perfprofd::PerfprofdRecord* proto, Config* handler_config) {
- if (proto == nullptr) {
- return false;
- }
- std::string data_file_path(handler_config->destination_directory);
- data_file_path += "/";
- data_file_path += PERF_OUTPUT;
- std::string path = android::base::StringPrintf("%s.encoded.%d", data_file_path.c_str(), seq);
- if (!android::perfprofd::SerializeProtobuf(proto, path.c_str(), handler_config->compress)) {
- return false;
- }
-
- if (!post_process(*handler_config, seq)) {
- return false;
- }
- seq++;
- return true;
- };
- ProfilingLoopImpl(config_fn, reread_config, handler);
-
- LOG(INFO) << "finishing Android Wide Profiling daemon";
- return 0;
+void ProfilingLoop(std::function<Config*()> config_fn,
+ std::function<void()> update_fn,
+ HandlerFn handler) {
+ ProfilingLoopImpl(config_fn, update_fn, handler);
}
diff --git a/perfprofd/perfprofdcore.h b/perfprofd/perfprofdcore.h
index 73a9c567..2adf114d 100644
--- a/perfprofd/perfprofdcore.h
+++ b/perfprofd/perfprofdcore.h
@@ -29,21 +29,7 @@ namespace perfprofd {
struct Symbolizer;
}
-// Semaphore file that indicates that the user is opting in
-#define SEMAPHORE_FILENAME "perf_profile_collection_enabled.txt"
-
-// File containing a list of sequence numbers corresponding to profiles
-// that have been processed/uploaded. Written by the GmsCore uploader,
-// within the GmsCore files directory.
-#define PROCESSED_FILENAME "perfprofd_processed.txt"
-
-// File containing a list of sequence numbers corresponding to profiles
-// that have been created by the perfprofd but not yet uploaded. Written
-// by perfprofd within the destination directory; consumed by GmsCore.
-#define PRODUCED_FILENAME "perfprofd_produced.txt"
-
-// Main routine for perfprofd daemon
-extern int perfprofd_main(int argc, char **argv, Config* config);
+void CommonInit(uint32_t use_fixed_seed, const char* dest_dir);
//
// This enumeration holds the results of what happened when on an
@@ -86,6 +72,9 @@ using HandlerFn = std::function<bool(android::perfprofd::PerfprofdRecord* proto,
Config* config)>;
void ProfilingLoop(Config& config, HandlerFn handler);
+void ProfilingLoop(std::function<Config*()> config_fn,
+ std::function<void()> update_fn,
+ HandlerFn handler);
//
// Exposed for unit testing
@@ -95,4 +84,6 @@ extern bool get_booting();
extern bool get_charging();
extern bool get_camera_active();
+bool IsDebugBuild();
+
#endif
diff --git a/perfprofd/perfprofdmain.cc b/perfprofd/perfprofdmain.cc
index 403e0253..0f9f53e9 100644
--- a/perfprofd/perfprofdmain.cc
+++ b/perfprofd/perfprofdmain.cc
@@ -21,6 +21,7 @@
#include "config.h"
#include "perfprofd_binder.h"
+#include "perfprofd_cmdline.h"
#include "perfprofdcore.h"
extern int perfprofd_main(int argc, char** argv, Config* config);
diff --git a/perfprofd/tests/Android.bp b/perfprofd/tests/Android.bp
index 7d0b7061..50d9ca83 100644
--- a/perfprofd/tests/Android.bp
+++ b/perfprofd/tests/Android.bp
@@ -50,6 +50,11 @@ cc_test {
required: [
"simpleperf",
],
+ shared_libs: [
+ "libbinder",
+ "libservices",
+ "libutils",
+ ],
},
},
diff --git a/perfprofd/tests/perfprofd_test.cc b/perfprofd/tests/perfprofd_test.cc
index 61eb09d2..79f8ea64 100644
--- a/perfprofd/tests/perfprofd_test.cc
+++ b/perfprofd/tests/perfprofd_test.cc
@@ -43,6 +43,7 @@
#include "configreader.h"
#include "map_utils.h"
#include "perfprofdcore.h"
+#include "perfprofd_cmdline.h"
#include "quipper_helper.h"
#include "symbolizer.h"
diff --git a/simpleperf/Android.mk b/simpleperf/Android.mk
index 01263516..05a94717 100644
--- a/simpleperf/Android.mk
+++ b/simpleperf/Android.mk
@@ -178,10 +178,12 @@ LOCAL_CFLAGS := $(simpleperf_cflags_target)
LOCAL_SRC_FILES := main.cpp
LOCAL_STATIC_LIBRARIES := libsimpleperf $(simpleperf_static_libraries_with_libc_target)
ifdef TARGET_2ND_ARCH
+ifneq ($(TARGET_TRANSLATE_2ND_ARCH),true)
LOCAL_MULTILIB := both
LOCAL_MODULE_STEM_32 := simpleperf32
LOCAL_MODULE_STEM_64 := simpleperf
endif
+endif
LOCAL_FORCE_STATIC_EXECUTABLE := true
include $(LLVM_DEVICE_BUILD_MK)
include $(BUILD_EXECUTABLE)
diff --git a/verity/Android.mk b/verity/Android.mk
index 7ddf75f6..2e4ac273 100644
--- a/verity/Android.mk
+++ b/verity/Android.mk
@@ -1,19 +1,5 @@
LOCAL_PATH:= $(call my-dir)
-ifeq ($(HOST_OS),linux)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := verify_boot_signature
-LOCAL_SRC_FILES := verify_boot_signature.c
-LOCAL_CFLAGS := -Wall -Werror
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE_TAGS := optional
-LOCAL_SHARED_LIBRARIES := libcrypto
-LOCAL_C_INCLUDES += system/core/mkbootimg
-include $(BUILD_HOST_EXECUTABLE)
-
-endif # HOST_OS == linux
-
include $(CLEAR_VARS)
LOCAL_MODULE := generate_verity_key
LOCAL_SRC_FILES := generate_verity_key.c
diff --git a/verity/BootSignature.java b/verity/BootSignature.java
index 3cf94990..10171c31 100644
--- a/verity/BootSignature.java
+++ b/verity/BootSignature.java
@@ -72,6 +72,11 @@ public class BootSignature extends ASN1Object
private PublicKey publicKey;
private static final int FORMAT_VERSION = 1;
+ /**
+ * Offset of recovery DTBO length in a boot image header of version greater than
+ * or equal to 1.
+ */
+ private static final int BOOT_IMAGE_HEADER_V1_RECOVERY_DTBO_SIZE_OFFSET = 1632;
/**
* Initializes the object for signing an image file
@@ -209,6 +214,22 @@ public class BootSignature extends ASN1Object
+ ((ramdskSize + pageSize - 1) / pageSize) * pageSize
+ ((secondSize + pageSize - 1) / pageSize) * pageSize;
+ int headerVersion = image.getInt(); // boot image header version
+ if (headerVersion > 0) {
+ image.position(BOOT_IMAGE_HEADER_V1_RECOVERY_DTBO_SIZE_OFFSET);
+ int recoveryDtboLength = image.getInt();
+ length += ((recoveryDtboLength + pageSize - 1) / pageSize) * pageSize;
+
+ image.getLong(); // recovery_dtbo address
+ if (headerVersion == 1) {
+ int headerSize = image.getInt();
+ if (image.position() != headerSize) {
+ throw new IllegalArgumentException(
+ "Invalid image header: invalid header length");
+ }
+ }
+ }
+
length = ((length + pageSize - 1) / pageSize) * pageSize;
if (length <= 0) {
diff --git a/verity/verify_boot_signature.c b/verity/verify_boot_signature.c
deleted file mode 100644
index e1c53d81..00000000
--- a/verity/verify_boot_signature.c
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define _LARGEFILE64_SOURCE
-
-#include <endian.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <errno.h>
-
-#include <openssl/asn1.h>
-#include <openssl/asn1t.h>
-#include <openssl/crypto.h>
-#include <openssl/err.h>
-#include <openssl/evp.h>
-#include <openssl/pem.h>
-#include <openssl/rsa.h>
-#include <openssl/x509.h>
-
-#include "bootimg.h"
-
-#define FORMAT_VERSION 1
-#define BUFFER_SIZE (1024 * 1024)
-
-typedef struct {
- ASN1_STRING *target;
- ASN1_INTEGER *length;
-} AuthAttrs;
-
-ASN1_SEQUENCE(AuthAttrs) = {
- ASN1_SIMPLE(AuthAttrs, target, ASN1_PRINTABLE),
- ASN1_SIMPLE(AuthAttrs, length, ASN1_INTEGER)
-} ASN1_SEQUENCE_END(AuthAttrs)
-
-IMPLEMENT_ASN1_FUNCTIONS(AuthAttrs)
-
-typedef struct {
- ASN1_INTEGER *formatVersion;
- X509 *certificate;
- X509_ALGOR *algorithmIdentifier;
- AuthAttrs *authenticatedAttributes;
- ASN1_OCTET_STRING *signature;
-} BootSignature;
-
-ASN1_SEQUENCE(BootSignature) = {
- ASN1_SIMPLE(BootSignature, formatVersion, ASN1_INTEGER),
- ASN1_SIMPLE(BootSignature, certificate, X509),
- ASN1_SIMPLE(BootSignature, algorithmIdentifier, X509_ALGOR),
- ASN1_SIMPLE(BootSignature, authenticatedAttributes, AuthAttrs),
- ASN1_SIMPLE(BootSignature, signature, ASN1_OCTET_STRING)
-} ASN1_SEQUENCE_END(BootSignature)
-
-IMPLEMENT_ASN1_FUNCTIONS(BootSignature)
-
-static BIO *g_error = NULL;
-
-/**
- * Rounds n up to the nearest multiple of page_size
- * @param n The value to round
- * @param page_size Page size
- */
-static uint64_t page_align(uint64_t n, uint64_t page_size)
-{
- return (((n + page_size - 1) / page_size) * page_size);
-}
-
-/**
- * Calculates the offset to the beginning of the BootSignature block
- * based on the boot image header. The signature will start after the
- * the boot image contents.
- * @param fd File descriptor to the boot image
- * @param offset Receives the offset in bytes
- */
-static int get_signature_offset(int fd, off64_t *offset)
-{
- struct boot_img_hdr hdr;
-
- if (!offset) {
- return -1;
- }
-
- if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr)) {
- return -1;
- }
-
- if (memcmp(BOOT_MAGIC, hdr.magic, BOOT_MAGIC_SIZE) != 0) {
- printf("Invalid boot image: missing magic\n");
- return -1;
- }
-
- if (!hdr.page_size) {
- printf("Invalid boot image: page size must be non-zero\n");
- return -1;
- }
-
- *offset = page_align(hdr.page_size
- + page_align(hdr.kernel_size, hdr.page_size)
- + page_align(hdr.ramdisk_size, hdr.page_size)
- + page_align(hdr.second_size, hdr.page_size),
- hdr.page_size);
-
- return 0;
-}
-
-/**
- * Reads and parses the ASN.1 BootSignature block from the given offset
- * @param fd File descriptor to the boot image
- * @param offset Offset from the beginning of file to the signature
- * @param bs Pointer to receive the BootImage structure
- */
-static int read_signature(int fd, off64_t offset, BootSignature **bs)
-{
- BIO *in = NULL;
-
- if (!bs) {
- return -1;
- }
-
- if (lseek64(fd, offset, SEEK_SET) == -1) {
- return -1;
- }
-
- if ((in = BIO_new_fd(fd, BIO_NOCLOSE)) == NULL) {
- ERR_print_errors(g_error);
- return -1;
- }
-
- if ((*bs = ASN1_item_d2i_bio(ASN1_ITEM_rptr(BootSignature), in, bs)) == NULL) {
- ERR_print_errors(g_error);
- BIO_free(in);
- return -1;
- }
-
- BIO_free(in);
- return 0;
-}
-
-/**
- * Validates the format of the boot signature block, and checks that
- * the length in authenticated attributes matches the actual length of
- * the image.
- * @param bs The boot signature block to validate
- * @param length The actual length of the boot image without the signature
- */
-static int validate_signature_block(const BootSignature *bs, uint64_t length)
-{
- BIGNUM expected;
- BIGNUM value;
- int rc = -1;
-
- if (!bs) {
- return -1;
- }
-
- BN_init(&expected);
- BN_init(&value);
-
- /* Confirm that formatVersion matches our supported version */
- if (!BN_set_word(&expected, FORMAT_VERSION)) {
- ERR_print_errors(g_error);
- goto vsb_done;
- }
-
- ASN1_INTEGER_to_BN(bs->formatVersion, &value);
-
- if (BN_cmp(&expected, &value) != 0) {
- printf("Unsupported signature version\n");
- goto vsb_done;
- }
-
- BN_clear(&expected);
- BN_clear(&value);
-
- /* Confirm that the length of the image matches with the length in
- the authenticated attributes */
- length = htobe64(length);
- BN_bin2bn((const unsigned char *) &length, sizeof(length), &expected);
-
- ASN1_INTEGER_to_BN(bs->authenticatedAttributes->length, &value);
-
- if (BN_cmp(&expected, &value) != 0) {
- printf("Image length doesn't match signature attributes\n");
- goto vsb_done;
- }
-
- rc = 0;
-
-vsb_done:
- BN_free(&expected);
- BN_free(&value);
-
- return rc;
-}
-
-/**
- * Creates a SHA-256 hash from the boot image contents and the encoded
- * authenticated attributes.
- * @param fd File descriptor to the boot image
- * @param length Length of the boot image without the signature block
- * @param aa Pointer to AuthAttrs
- * @param digest Pointer to a buffer where the hash is written
- */
-static int hash_image(int fd, uint64_t length, const AuthAttrs *aa,
- unsigned char *digest)
-{
- EVP_MD_CTX *ctx = NULL;
- int rc = -1;
-
- ssize_t bytes = 0;
- unsigned char *attrs = NULL;
- unsigned char *buffer = NULL;
- unsigned char *p = NULL;
- uint64_t total = 0;
-
- if (!aa || !digest) {
- goto hi_done;
- }
-
- if ((buffer = malloc(BUFFER_SIZE)) == NULL) {
- goto hi_done;
- }
-
- if (lseek64(fd, 0, SEEK_SET) != 0) {
- goto hi_done;
- }
-
- if ((ctx = EVP_MD_CTX_create()) == NULL) {
- ERR_print_errors(g_error);
- goto hi_done;
- }
-
- EVP_DigestInit(ctx, EVP_sha256());
-
- do {
- bytes = BUFFER_SIZE;
-
- if ((length - total) < BUFFER_SIZE) {
- bytes = length - total;
- }
-
- if ((bytes = read(fd, buffer, bytes)) == -1) {
- printf("%s\n", strerror(errno));
- goto hi_done;
- }
-
- EVP_DigestUpdate(ctx, buffer, bytes);
- total += bytes;
- } while (total < length);
-
- if ((bytes = i2d_AuthAttrs((AuthAttrs *) aa, NULL)) < 0) {
- ERR_print_errors(g_error);
- goto hi_done;
- }
-
- if ((attrs = OPENSSL_malloc(bytes)) == NULL) {
- ERR_print_errors(g_error);
- goto hi_done;
- }
-
- p = attrs;
-
- if (i2d_AuthAttrs((AuthAttrs *) aa, &p) < 0) {
- ERR_print_errors(g_error);
- goto hi_done;
- }
-
- EVP_DigestUpdate(ctx, attrs, bytes);
- EVP_DigestFinal(ctx, digest, NULL);
-
- rc = 0;
-
-hi_done:
- if (buffer) {
- free(buffer);
- }
-
- if (ctx) {
- EVP_MD_CTX_destroy(ctx);
- }
-
- if (attrs) {
- OPENSSL_free(attrs);
- }
-
- return rc;
-}
-
-/**
- * Verifies the RSA signature against the pubkey (certificate) in the
- * BootSignature, and additionally against the pubkey file if provided.
- * @param fd File descriptor to the boot image
- * @param length Length of the boot image without the signature block
- * @param bs The boot signature block
- * @param pkey The external pubkey file
- */
-static int verify_signature(int fd, uint64_t length, const BootSignature *bs,
- const char *pkey)
-{
- int rc = -1;
- EVP_PKEY *pkey_bs = NULL;
- RSA *rsa_bs = NULL;
- RSA *rsa_pkey = NULL;
- BIO *bio_pkey = NULL;
- unsigned char digest[SHA256_DIGEST_LENGTH];
-
- if (!bs) {
- goto vs_done;
- }
-
- if (hash_image(fd, length, bs->authenticatedAttributes, digest) == -1) {
- goto vs_done;
- }
-
- if ((pkey_bs = X509_get_pubkey(bs->certificate)) == NULL) {
- ERR_print_errors(g_error);
- goto vs_done;
- }
-
- if ((rsa_bs = EVP_PKEY_get1_RSA(pkey_bs)) == NULL) {
- ERR_print_errors(g_error);
- goto vs_done;
- }
-
- if (!RSA_verify(NID_sha256, digest, SHA256_DIGEST_LENGTH,
- bs->signature->data, bs->signature->length, rsa_bs)) {
- ERR_print_errors(g_error);
- goto vs_done;
- }
-
- if (pkey) {
- if ((bio_pkey = BIO_new_file(pkey, "r")) == NULL) {
- ERR_print_errors(g_error);
- goto vs_done;
- }
-
- if ((rsa_pkey = PEM_read_bio_RSA_PUBKEY(bio_pkey, NULL, NULL, NULL)) == NULL) {
- ERR_print_errors(g_error);
- goto vs_done;
- }
-
- if (!RSA_verify(NID_sha256, digest, SHA256_DIGEST_LENGTH,
- bs->signature->data, bs->signature->length, rsa_pkey)) {
- ERR_print_errors(g_error);
- goto vs_done;
- }
- }
-
- rc = 0;
-
-vs_done:
- if (pkey_bs) {
- EVP_PKEY_free(pkey_bs);
- }
-
- if (rsa_bs) {
- RSA_free(rsa_bs);
- }
-
- if (bio_pkey) {
- BIO_free_all(bio_pkey);
- }
-
- if (rsa_pkey) {
- RSA_free(rsa_pkey);
- }
-
- return rc;
-}
-
-/**
- * Given the file name of a signed boot image, verifies the signature
- * @param image_file Name of the boot image file
- */
-static int verify(const char *image_file, const char *pkey)
-{
- BootSignature *bs = NULL;
- int fd = -1;
- int rc = 1;
- off64_t offset = 0;
-
- if (!image_file) {
- return rc;
- }
-
- if ((fd = open(image_file, O_RDONLY | O_LARGEFILE)) == -1) {
- return rc;
- }
-
- if (get_signature_offset(fd, &offset) == -1) {
- goto out;
- }
-
- if (read_signature(fd, offset, &bs) == -1) {
- goto out;
- }
-
- if (validate_signature_block(bs, offset) == -1) {
- goto out;
- }
-
- if (verify_signature(fd, offset, bs, pkey) == -1) {
- goto out;
- }
-
- printf("Signature is VALID\n");
- rc = 0;
-
-out:
- if (bs) {
- BootSignature_free(bs);
- }
-
- if (fd != -1) {
- close(fd);
- }
-
- return rc;
-}
-
-static void usage()
-{
- printf("Usage: verify_boot_signature <path-to-boot-image>\n"
- " verify_boot_signature <path-to-boot-image> <pubkey>\n");
-}
-
-int main(int argc, char *argv[])
-{
- if (argc != 2 && argc != 3) {
- usage();
- return 1;
- }
-
- /* BIO descriptor for logging OpenSSL errors to stderr */
- if ((g_error = BIO_new_fd(STDERR_FILENO, BIO_NOCLOSE)) == NULL) {
- printf("Failed to allocate a BIO handle for error output\n");
- return 1;
- }
-
- ERR_load_crypto_strings();
-
- const char *pkey = (argc == 2) ? NULL : argv[2];
-
- return verify(argv[1], pkey);
-}