#! /usr/bin/env python3 # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import getopt import hashlib import posixpath import signal import struct import sys def usage(argv0): print(""" Usage: %s [-v] [-s] [-c ] sparse_image_file ... -v verbose output -s show sha1sum of data blocks -c save .csv file of blocks """ % (argv0)) sys.exit(2) def main(): signal.signal(signal.SIGPIPE, signal.SIG_DFL) me = posixpath.basename(sys.argv[0]) # Parse the command line verbose = 0 # -v showhash = 0 # -s csvfilename = None # -c try: opts, args = getopt.getopt(sys.argv[1:], "vsc:", ["verbose", "showhash", "csvfile"]) except getopt.GetoptError as e: print(e) usage(me) for o, a in opts: if o in ("-v", "--verbose"): verbose += 1 elif o in ("-s", "--showhash"): showhash = True elif o in ("-c", "--csvfile"): csvfilename = a else: print("Unrecognized option \"%s\"" % (o)) usage(me) if not args: print("No sparse_image_file specified") usage(me) if csvfilename: csvfile = open(csvfilename, "wb") csvwriter = csv.writer(csvfile) output = verbose or csvfilename or showhash for path in args: FH = open(path, "rb") header_bin = FH.read(28) header = struct.unpack(" 0: print(" input_bytes output_blocks") print("chunk offset number offset number") if csvfilename: csvwriter.writerow(["chunk", "input offset", "input bytes", "output offset", "output blocks", "type", "hash"]) offset = 0 for i in range(1, total_chunks + 1): header_bin = FH.read(12) header = struct.unpack("<2H2I", header_bin) chunk_type = header[0] chunk_sz = header[2] total_sz = header[3] data_sz = total_sz - 12 curhash = "" curtype = "" curpos = FH.tell() if verbose > 0: print("%4u %10u %10u %7u %7u" % (i, curpos, data_sz, offset, chunk_sz), end=" ") if chunk_type == 0xCAC1: if data_sz != (chunk_sz * blk_sz): print("Raw chunk input size (%u) does not match output size (%u)" % (data_sz, chunk_sz * blk_sz)) break else: curtype = "Raw data" data = FH.read(data_sz) if showhash: h = hashlib.sha1() h.update(data) curhash = h.hexdigest() elif chunk_type == 0xCAC2: if data_sz != 4: print("Fill chunk should have 4 bytes of fill, but this has %u" % (data_sz)) break else: fill_bin = FH.read(4) fill = struct.unpack(" 0: print("%-18s" % (curtype), end=" ") if verbose > 1: header = struct.unpack("<12B", header_bin) print(" (%02X%02X %02X%02X %02X%02X%02X%02X %02X%02X%02X%02X)" % (header[0], header[1], header[2], header[3], header[4], header[5], header[6], header[7], header[8], header[9], header[10], header[11]), end=" ") print(curhash) if csvfilename: csvwriter.writerow([i, curpos, data_sz, offset, chunk_sz, curtype, curhash]) offset += chunk_sz if verbose > 0: print(" %10u %7u End" % (FH.tell(), offset)) if total_blks != offset: print("The header said we should have %u output blocks, but we saw %u" % (total_blks, offset)) junk_len = len(FH.read()) if junk_len: print("There were %u bytes of extra data at the end of the file." % (junk_len)) if csvfilename: csvfile.close() sys.exit(0) if __name__ == "__main__": main()