aboutsummaryrefslogtreecommitdiff
path: root/tools/generate-notice-files.py
blob: bf958fbae4a9e5ef0a2d8e33bea964ec7f11c77f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
#!/usr/bin/env python
#
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: generate-notice-files --text-output [plain text output file] \
               --html-output [html output file] \
               --xml-output [xml output file] \
               -t [file title] -s [directory of notices]

Generate the Android notice files, including both text and html files.

-h to display this usage message and exit.
"""
from collections import defaultdict
import argparse
import hashlib
import itertools
import os
import os.path
import re
import sys

MD5_BLOCKSIZE = 1024 * 1024
HTML_ESCAPE_TABLE = {
    "&": "&",
    '"': """,
    "'": "'",
    ">": ">",
    "<": "&lt;",
    }

def hexify(s):
    return ("%02x"*len(s)) % tuple(map(ord, s))

def md5sum(filename):
    """Calculate an MD5 of the file given by FILENAME,
    and return hex digest as a string.
    Output should be compatible with md5sum command"""

    f = open(filename, "rb")
    sum = hashlib.md5()
    while 1:
        block = f.read(MD5_BLOCKSIZE)
        if not block:
            break
        sum.update(block)
    f.close()
    return hexify(sum.digest())


def html_escape(text):
    """Produce entities within text."""
    return "".join(HTML_ESCAPE_TABLE.get(c,c) for c in text)

HTML_OUTPUT_CSS="""
<style type="text/css">
body { padding: 0; font-family: sans-serif; }
.same-license { background-color: #eeeeee; border-top: 20px solid white; padding: 10px; }
.label { font-weight: bold; }
.file-list { margin-left: 1em; color: blue; }
</style>
"""

def combine_notice_files_html(file_hash, input_dirs, output_filename):
    """Combine notice files in FILE_HASH and output a HTML version to OUTPUT_FILENAME."""

    SRC_DIR_STRIP_RE = re.compile("(?:" + "|".join(input_dirs) + ")(/.*).txt")

    # Set up a filename to row id table (anchors inside tables don't work in
    # most browsers, but href's to table row ids do)
    id_table = {}
    id_count = 0
    for value in file_hash:
        for filename in value:
             id_table[filename] = id_count
        id_count += 1

    # Open the output file, and output the header pieces
    output_file = open(output_filename, "wb")

    print >> output_file, "<html><head>"
    print >> output_file, HTML_OUTPUT_CSS
    print >> output_file, '</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">'

    # Output our table of contents
    print >> output_file, '<div class="toc">'
    print >> output_file, "<ul>"

    # Flatten the list of lists into a single list of filenames
    sorted_filenames = sorted(itertools.chain.from_iterable(file_hash))

    # Print out a nice table of contents
    for filename in sorted_filenames:
        stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
        print >> output_file, '<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename)

    print >> output_file, "</ul>"
    print >> output_file, "</div><!-- table of contents -->"
    # Output the individual notice file lists
    print >>output_file, '<table cellpadding="0" cellspacing="0" border="0">'
    for value in file_hash:
        print >> output_file, '<tr id="id%d"><td class="same-license">' % id_table.get(value[0])
        print >> output_file, '<div class="label">Notices for file(s):</div>'
        print >> output_file, '<div class="file-list">'
        for filename in value:
            print >> output_file, "%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename))
        print >> output_file, "</div><!-- file-list -->"
        print >> output_file
        print >> output_file, '<pre class="license-text">'
        print >> output_file, html_escape(open(value[0]).read())
        print >> output_file, "</pre><!-- license-text -->"
        print >> output_file, "</td></tr><!-- same-license -->"
        print >> output_file
        print >> output_file
        print >> output_file

    # Finish off the file output
    print >> output_file, "</table>"
    print >> output_file, "</body></html>"
    output_file.close()

def combine_notice_files_text(file_hash, input_dirs, output_filename, file_title):
    """Combine notice files in FILE_HASH and output a text version to OUTPUT_FILENAME."""

    SRC_DIR_STRIP_RE = re.compile("(?:" + "|".join(input_dirs) + ")(/.*).txt")
    output_file = open(output_filename, "wb")
    print >> output_file, file_title
    for value in file_hash:
      print >> output_file, "============================================================"
      print >> output_file, "Notices for file(s):"
      for filename in value:
        print >> output_file, SRC_DIR_STRIP_RE.sub(r"\1", filename)
      print >> output_file, "------------------------------------------------------------"
      print >> output_file, open(value[0]).read()
    output_file.close()

def combine_notice_files_xml(files_with_same_hash, input_dirs, output_filename):
    """Combine notice files in FILE_HASH and output a XML version to OUTPUT_FILENAME."""

    SRC_DIR_STRIP_RE = re.compile("(?:" + "|".join(input_dirs) + ")(/.*).txt")

    # Set up a filename to row id table (anchors inside tables don't work in
    # most browsers, but href's to table row ids do)
    id_table = {}
    for file_key in files_with_same_hash.keys():
        for filename in files_with_same_hash[file_key]:
             id_table[filename] = file_key

    # Open the output file, and output the header pieces
    output_file = open(output_filename, "wb")

    print >> output_file, '<?xml version="1.0" encoding="utf-8"?>'
    print >> output_file, "<licenses>"

    # Flatten the list of lists into a single list of filenames
    sorted_filenames = sorted(id_table.keys())

    # Print out a nice table of contents
    for filename in sorted_filenames:
        stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
        print >> output_file, '<file-name contentId="%s">%s</file-name>' % (id_table.get(filename), stripped_filename)

    print >> output_file
    print >> output_file

    processed_file_keys = []
    # Output the individual notice file lists
    for filename in sorted_filenames:
        file_key = id_table.get(filename)
        if file_key in processed_file_keys:
            continue
        processed_file_keys.append(file_key)

        print >> output_file, '<file-content contentId="%s"><![CDATA[%s]]></file-content>' % (file_key, html_escape(open(filename).read()))
        print >> output_file

    # Finish off the file output
    print >> output_file, "</licenses>"
    output_file.close()

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--text-output', required=True,
        help='The text output file path.')
    parser.add_argument(
        '--html-output',
        help='The html output file path.')
    parser.add_argument(
        '--xml-output',
        help='The xml output file path.')
    parser.add_argument(
        '-t', '--title', required=True,
        help='The file title.')
    parser.add_argument(
        '-s', '--source-dir', required=True, action='append',
        help='The directory containing notices.')
    parser.add_argument(
        '-i', '--included-subdirs', action='append',
        help='The sub directories which should be included.')
    parser.add_argument(
        '-e', '--excluded-subdirs', action='append',
        help='The sub directories which should be excluded.')
    return parser.parse_args()

def main(argv):
    args = get_args()

    txt_output_file = args.text_output
    html_output_file = args.html_output
    xml_output_file = args.xml_output
    file_title = args.title
    included_subdirs = []
    excluded_subdirs = []
    if args.included_subdirs is not None:
        included_subdirs = args.included_subdirs
    if args.excluded_subdirs is not None:
        excluded_subdirs = args.excluded_subdirs

    input_dirs = [os.path.normpath(source_dir) for source_dir in args.source_dir]
    # Find all the notice files and md5 them
    files_with_same_hash = defaultdict(list)
    for input_dir in input_dirs:
        for root, dir, files in os.walk(input_dir):
            for file in files:
                matched = True
                if len(included_subdirs) > 0:
                    matched = False
                    for subdir in included_subdirs:
                        if (root == (input_dir + '/' + subdir) or
                            root.startswith(input_dir + '/' + subdir + '/')):
                            matched = True
                            break
                elif len(excluded_subdirs) > 0:
                    for subdir in excluded_subdirs:
                        if (root == (input_dir + '/' + subdir) or
                            root.startswith(input_dir + '/' + subdir + '/')):
                            matched = False
                            break
                if matched and file.endswith(".txt"):
                    filename = os.path.join(root, file)
                    file_md5sum = md5sum(filename)
                    files_with_same_hash[file_md5sum].append(filename)

    filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
    combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title)

    if html_output_file is not None:
        combine_notice_files_html(filesets, input_dirs, html_output_file)

    if xml_output_file is not None:
        combine_notice_files_xml(files_with_same_hash, input_dirs, xml_output_file)

if __name__ == "__main__":
    main(sys.argv)