aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-03-04 04:38:33 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2022-03-04 04:38:33 +0000
commitcff055cd81fa6a7c6c6988a1983f57442d52dbc8 (patch)
treed16a51b013b635a07e165898c3d41aeb41db3b27
parent15b387746263de3414d14ee6aadf1181c388ceae (diff)
parent3626a00be8d17abe88629eee2876b4a6dace1b35 (diff)
downloadbuild-cff055cd81fa6a7c6c6988a1983f57442d52dbc8.tar.gz
Merge "Snap for 8253222 from f2a0110d365e5edd53cdcf37582bb3dfe405d150 to sdk-release" into sdk-releaseplatform-tools-33.0.1
-rw-r--r--Changes.md30
-rw-r--r--core/Makefile106
-rw-r--r--core/base_rules.mk22
-rw-r--r--core/board_config.mk1
-rw-r--r--core/clear_vars.mk10
-rw-r--r--core/combo/select.mk2
-rw-r--r--core/definitions.mk77
-rw-r--r--core/main.mk7
-rw-r--r--core/notice_files.mk4
-rw-r--r--core/product-graph.mk70
-rw-r--r--core/product_config.mk3
-rw-r--r--core/product_config.rbc37
-rw-r--r--core/soong_config.mk4
-rw-r--r--core/sysprop.mk8
-rw-r--r--core/tasks/module-info.mk4
-rw-r--r--core/version_defaults.mk13
-rw-r--r--core/version_util.mk13
-rw-r--r--target/board/ndk/BoardConfig.mk21
-rw-r--r--target/board/ndk/README.md2
-rw-r--r--target/product/AndroidProducts.mk1
-rw-r--r--target/product/base_system.mk1
-rw-r--r--target/product/core_no_zygote.mk30
-rw-r--r--target/product/default_art_config.mk2
-rw-r--r--target/product/ndk.mk21
-rw-r--r--target/product/virtual_ab_ota/compression.mk1
-rwxr-xr-xtools/buildinfo.sh2
-rw-r--r--tools/compliance/Android.bp27
-rw-r--r--tools/compliance/cmd/checkshare/checkshare_test.go2
-rw-r--r--tools/compliance/cmd/htmlnotice/htmlnotice.go8
-rw-r--r--tools/compliance/cmd/htmlnotice/htmlnotice_test.go2
-rw-r--r--tools/compliance/cmd/shippedlibs/shippedlibs.go54
-rw-r--r--tools/compliance/cmd/textnotice/textnotice.go2
-rw-r--r--tools/compliance/noticeindex.go212
-rw-r--r--tools/compliance/readgraph_test.go2
-rwxr-xr-xtools/product_debug.py159
-rw-r--r--tools/releasetools/Android.bp2
-rw-r--r--tools/releasetools/OWNERS3
-rw-r--r--tools/releasetools/add_img_to_target_files.py7
-rw-r--r--tools/releasetools/apex_utils.py102
-rwxr-xr-xtools/releasetools/build_image.py2
-rwxr-xr-xtools/releasetools/check_ota_package_signature.py3
-rw-r--r--tools/releasetools/check_partition_sizes.py3
-rwxr-xr-xtools/releasetools/check_target_files_vintf.py5
-rw-r--r--tools/releasetools/common.py70
-rwxr-xr-xtools/releasetools/img_from_target_files.py3
-rwxr-xr-xtools/releasetools/merge_target_files.py1019
-rwxr-xr-xtools/releasetools/ota_from_target_files.py4
-rw-r--r--tools/releasetools/ota_utils.py3
-rwxr-xr-xtools/releasetools/sign_apex.py35
-rwxr-xr-xtools/releasetools/sign_target_files_apks.py56
-rw-r--r--tools/releasetools/test_common.py4
-rw-r--r--tools/releasetools/test_merge_target_files.py134
-rw-r--r--tools/releasetools/test_sign_apex.py18
-rw-r--r--tools/releasetools/testdata/sepolicy.apexbin0 -> 303104 bytes
-rw-r--r--tools/releasetools/verity_utils.py5
55 files changed, 1225 insertions, 1213 deletions
diff --git a/Changes.md b/Changes.md
index 1ab005fa3c..5edb1d83fb 100644
--- a/Changes.md
+++ b/Changes.md
@@ -1,5 +1,35 @@
# Build System Changes for Android.mk Writers
+## Genrule starts disallowing directory inputs
+
+To better specify the inputs to the build, we are restricting use of directories
+as inputs to genrules.
+
+To fix existing uses, change inputs to specify the inputs and update the command
+accordingly. For example:
+
+```
+genrule: {
+ name: "foo",
+ srcs: ["bar"],
+ cmd: "cp $(location bar)/*.xml $(gendir)",
+ ...
+}
+```
+
+would become
+
+```
+genrule: {
+ name: "foo",
+ srcs: ["bar/*.xml"],
+ cmd: "cp $(in) $(gendir)",
+ ...
+}
+
+`BUILD_BROKEN_INPUT_DIR_MODULES` can be used to allowlist specific directories
+with genrules that have input directories.
+
## Dexpreopt starts enforcing `<uses-library>` checks (for Java modules)
In order to construct correct class loader context for dexpreopt, build system
diff --git a/core/Makefile b/core/Makefile
index b9103abeae..f7b55e6a65 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -892,7 +892,12 @@ endif
# $1: boot image file name
# $2: boot image variant (boot, boot-debug, boot-test-harness)
define get-bootimage-partition-size
- $(BOARD_$(call to-upper,$(subst .img,,$(subst $(2),kernel,$(notdir $(1)))))_BOOTIMAGE_PARTITION_SIZE)
+$(BOARD_$(call to-upper,$(subst .img,,$(subst $(2),kernel,$(notdir $(1)))))_BOOTIMAGE_PARTITION_SIZE)
+endef
+
+# $1: partition size
+define get-partition-size-argument
+ $(if $(1),--partition_size $(1),--dynamic_partition_size)
endef
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
@@ -901,11 +906,9 @@ INTERNAL_BOOTIMAGE_ARGS := \
INTERNAL_INIT_BOOT_IMAGE_ARGS :=
-INTERNAL_BOOT_HAS_RAMDISK :=
ifneq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
ifneq ($(BUILDING_INIT_BOOT_IMAGE),true)
INTERNAL_BOOTIMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
- INTERNAL_BOOT_HAS_RAMDISK := true
else
INTERNAL_INIT_BOOT_IMAGE_ARGS += --ramdisk $(INSTALLED_RAMDISK_TARGET)
endif
@@ -968,7 +971,6 @@ endef
INTERNAL_GKI_CERTIFICATE_ARGS :=
INTERNAL_GKI_CERTIFICATE_DEPS :=
-INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE :=
ifdef BOARD_GKI_SIGNING_KEY_PATH
ifndef BOARD_GKI_SIGNING_ALGORITHM
$(error BOARD_GKI_SIGNING_ALGORITHM should be defined with BOARD_GKI_SIGNING_KEY_PATH)
@@ -989,13 +991,6 @@ ifdef BOARD_GKI_SIGNING_KEY_PATH
$(BOARD_GKI_SIGNING_KEY_PATH) \
$(AVBTOOL)
- ifdef INSTALLED_RAMDISK_TARGET
- INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE := \
- $(call intermediates-dir-for,PACKAGING,generic_ramdisk)/boot_signature
-
- $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE): $(INSTALLED_RAMDISK_TARGET) $(INTERNAL_GKI_CERTIFICATE_DEPS)
- $(call generate_generic_boot_image_certificate,$(INSTALLED_RAMDISK_TARGET),$@,generic_ramdisk,$(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS))
- endif
endif
# Define these only if we are building boot
@@ -1013,25 +1008,24 @@ ifeq (true,$(BOARD_AVB_ENABLE))
# $1: boot image target
define build_boot_board_avb_enabled
$(eval kernel := $(call bootimage-to-kernel,$(1)))
+ $(MKBOOTIMG) --kernel $(kernel) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
$(if $(BOARD_GKI_SIGNING_KEY_PATH), \
+ $(eval boot_signature := $(call intermediates-dir-for,PACKAGING,generic_boot)/$(notdir $(1)).boot_signature) \
$(eval kernel_signature := $(call intermediates-dir-for,PACKAGING,generic_kernel)/$(notdir $(kernel)).boot_signature) \
+ $(call generate_generic_boot_image_certificate,$(1),$(boot_signature),boot,$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)) $(newline) \
$(call generate_generic_boot_image_certificate,$(kernel),$(kernel_signature),generic_kernel,$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)) $(newline) \
- $(if $(INTERNAL_BOOT_HAS_RAMDISK), \
- cat $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE) >> $(kernel_signature) $(newline)))
- $(MKBOOTIMG) --kernel $(kernel) $(INTERNAL_BOOTIMAGE_ARGS) \
- $(if $(BOARD_GKI_SIGNING_KEY_PATH),--boot_signature "$(kernel_signature)",$(INTERNAL_MKBOOTIMG_VERSION_ARGS)) \
- $(BOARD_MKBOOTIMG_ARGS) --output $(1)
+ cat $(kernel_signature) >> $(boot_signature) $(newline) \
+ $(call assert-max-image-size,$(boot_signature),16 << 10) $(newline) \
+ truncate -s $$(( 16 << 10 )) $(boot_signature) $(newline) \
+ cat "$(boot_signature)" >> $(1))
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),boot)))
$(AVBTOOL) add_hash_footer \
--image $(1) \
- --partition_size $(call get-bootimage-partition-size,$(1),boot) \
+ $(call get-partition-size-argument,$(call get-bootimage-partition-size,$(1),boot)) \
--partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
endef
-ifdef INTERNAL_BOOT_HAS_RAMDISK
-$(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE)
-endif
$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(INTERNAL_GKI_CERTIFICATE_DEPS)
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
@@ -1107,7 +1101,7 @@ $(INSTALLED_BOOTIMAGE_TARGET): $(INTERNAL_PREBUILT_BOOTIMAGE) $(AVBTOOL) $(BOARD
cp $(INTERNAL_PREBUILT_BOOTIMAGE) $@
$(AVBTOOL) add_hash_footer \
--image $@ \
- --partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_BOOTIMAGE_PARTITION_SIZE)) \
--partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
else
@@ -1136,16 +1130,13 @@ ifdef BOARD_KERNEL_PAGESIZE
endif
ifeq ($(BOARD_AVB_ENABLE),true)
-$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE)
$(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(AVBTOOL) $(BOARD_AVB_INIT_BOOT_KEY_PATH)
$(call pretty,"Target init_boot image: $@")
- $(MKBOOTIMG) $(INTERNAL_INIT_BOOT_IMAGE_ARGS) \
- $(if $(BOARD_GKI_SIGNING_KEY_PATH),--boot_signature "$(INTERNAL_GENERIC_RAMDISK_BOOT_SIGNATURE)",$(INTERNAL_MKBOOTIMG_VERSION_ARGS)) \
- $(BOARD_MKBOOTIMG_INIT_ARGS) --output "$@"
+ $(MKBOOTIMG) $(INTERNAL_INIT_BOOT_IMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_INIT_ARGS) --output "$@"
$(call assert-max-image-size,$@,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE))
$(AVBTOOL) add_hash_footer \
--image $@ \
- --partition_size $(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE)) \
--partition_name init_boot $(INTERNAL_AVB_INIT_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS)
else
@@ -1166,7 +1157,7 @@ $(INSTALLED_INIT_BOOT_IMAGE_TARGET): $(INTERNAL_PREBUILT_INIT_BOOT_IMAGE) $(AVBT
cp $(INTERNAL_PREBUILT_INIT_BOOT_IMAGE) $@
$(AVBTOOL) add_hash_footer \
--image $@ \
- --partition_size $(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_INIT_BOOT_IMAGE_PARTITION_SIZE)) \
--partition_name boot $(INTERNAL_AVB_INIT_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS)
else
@@ -1294,7 +1285,7 @@ $(INSTALLED_VENDOR_BOOTIMAGE_TARGET): $(AVBTOOL) $(BOARD_AVB_VENDOR_BOOTIMAGE_KE
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
$(AVBTOOL) add_hash_footer \
--image $@ \
- --partition_size $(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE)) \
--partition_name vendor_boot $(INTERNAL_AVB_VENDOR_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS)
else
@@ -1369,8 +1360,6 @@ ifndef TARGET_BUILD_APPS
# TARGET_OUT_NOTICE_FILES now that the notice files are gathered from
# the src subdirectory.
target_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE.txt
-tools_notice_file_txt := $(HOST_OUT_INTERMEDIATES)/NOTICE.txt
-tools_notice_file_html := $(HOST_OUT_INTERMEDIATES)/NOTICE.html
kernel_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/kernel.txt
winpthreads_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/winpthreads.txt
@@ -1636,15 +1625,6 @@ endif # PRODUCT_NOTICE_SPLIT
ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-$(eval $(call combine-notice-files, html, \
- $(tools_notice_file_txt), \
- $(tools_notice_file_html), \
- "Notices for files contained in the tools directory:", \
- $(HOST_OUT_NOTICE_FILES), \
- $(ALL_DEFAULT_INSTALLED_MODULES) \
- $(winpthreads_notice_file), \
- $(exclude_target_dirs)))
-
endif # TARGET_BUILD_APPS
# The kernel isn't really a module, so to get its module file in there, we
@@ -2395,8 +2375,8 @@ define build-recoveryimage-target
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))))
$(if $(filter true,$(BOARD_AVB_ENABLE)), \
$(if $(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)), \
- $(AVBTOOL) add_hash_footer --image $(1) --partition_size $(call get-bootimage-partition-size,$(1),boot) --partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS),\
- $(AVBTOOL) add_hash_footer --image $(1) --partition_size $(BOARD_RECOVERYIMAGE_PARTITION_SIZE) --partition_name recovery $(INTERNAL_AVB_RECOVERY_SIGNING_ARGS) $(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)))
+ $(AVBTOOL) add_hash_footer --image $(1) $(call get-partition-size-argument,$(call get-bootimage-partition-size,$(1),boot)) --partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) $(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS),\
+ $(AVBTOOL) add_hash_footer --image $(1) $(call get-partition-size-argument,$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)) --partition_name recovery $(INTERNAL_AVB_RECOVERY_SIGNING_ARGS) $(BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS)))
endef
recoveryimage-deps := $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel)
@@ -2562,7 +2542,7 @@ define test-key-sign-bootimage
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),$(2))))
$(AVBTOOL) add_hash_footer \
--image $(1) \
- --partition_size $(call get-bootimage-partition-size,$(1),$(2))\
+ $(call get-partition-size-argument,$(call get-bootimage-partition-size,$(1),$(2)))\
--partition_name boot $(INTERNAL_AVB_BOOT_TEST_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
$(call assert-max-image-size,$(1),$(call get-bootimage-partition-size,$(1),$(2)))
@@ -2650,7 +2630,7 @@ define test-key-sign-vendor-bootimage
$(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE)))
$(AVBTOOL) add_hash_footer \
--image $(1) \
- --partition_size $(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE)) \
--partition_name vendor_boot $(INTERNAL_AVB_VENDOR_BOOT_TEST_SIGNING_ARGS) \
$(BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS)
$(call assert-max-image-size,$(1),$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
@@ -3680,7 +3660,7 @@ $(INSTALLED_DTBOIMAGE_TARGET): $(BOARD_PREBUILT_DTBOIMAGE) $(AVBTOOL) $(BOARD_AV
cp $(BOARD_PREBUILT_DTBOIMAGE) $@
$(AVBTOOL) add_hash_footer \
--image $@ \
- --partition_size $(BOARD_DTBOIMG_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_DTBOIMG_PARTITION_SIZE)) \
--partition_name dtbo $(INTERNAL_AVB_DTBO_SIGNING_ARGS) \
$(BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS)
else
@@ -3694,7 +3674,9 @@ endif # BOARD_PREBUILT_DTBOIMAGE
# Protected VM firmware image
ifeq ($(BOARD_USES_PVMFWIMAGE),true)
INSTALLED_PVMFWIMAGE_TARGET := $(PRODUCT_OUT)/pvmfw.img
+INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET := $(PRODUCT_OUT)/pvmfw_embedded.avbpubkey
INTERNAL_PREBUILT_PVMFWIMAGE := packages/modules/Virtualization/pvmfw/pvmfw.img
+INTERNAL_PVMFW_EMBEDDED_AVBKEY := external/avb/test/data/testkey_rsa4096_pub.bin
ifdef BOARD_PREBUILT_PVMFWIMAGE
PREBUILT_PVMFWIMAGE_TARGET := $(BOARD_PREBUILT_PVMFWIMAGE)
@@ -3707,13 +3689,17 @@ $(INSTALLED_PVMFWIMAGE_TARGET): $(PREBUILT_PVMFWIMAGE_TARGET) $(AVBTOOL) $(BOARD
cp $< $@
$(AVBTOOL) add_hash_footer \
--image $@ \
- --partition_size $(BOARD_PVMFWIMAGE_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_PVMFWIMAGE_PARTITION_SIZE)) \
--partition_name pvmfw $(INTERNAL_AVB_PVMFW_SIGNING_ARGS) \
$(BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS)
else
$(eval $(call copy-one-file,$(PREBUILT_PVMFWIMAGE_TARGET),$(INSTALLED_PVMFWIMAGE_TARGET)))
endif
+$(INSTALLED_PVMFWIMAGE_TARGET): $(INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET)
+
+$(eval $(call copy-one-file,$(INTERNAL_PVMFW_EMBEDDED_AVBKEY),$(INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET)))
+
endif # BOARD_USES_PVMFWIMAGE
# Returns a list of image targets corresponding to the given list of partitions. For example, it
@@ -3749,7 +3735,7 @@ ifeq ($(BOARD_AVB_ENABLE),true)
--image $(3) \
--key $(BOARD_AVB_$(call to-upper,$(2))_KEY_PATH) \
--algorithm $(BOARD_AVB_$(call to-upper,$(2))_ALGORITHM) \
- --partition_size $(BOARD_AVB_$(call to-upper,$(2))_PARTITION_SIZE) \
+ $(call get-partition-size-argument,$(BOARD_AVB_$(call to-upper,$(2))_PARTITION_SIZE)) \
--partition_name $(2) \
$(INTERNAL_AVB_CUSTOMIMAGES_SIGNING_ARGS) \
$(BOARD_AVB_$(call to-upper,$(2))_ADD_HASHTREE_FOOTER_ARGS)
@@ -3838,8 +3824,7 @@ BOARD_AVB_SYSTEM_EXT_ADD_HASHTREE_FOOTER_ARGS += \
--prop com.android.build.system_ext.security_patch:$(PLATFORM_SECURITY_PATCH)
BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
- --prop com.android.build.boot.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE) \
- --prop com.android.build.boot.os_version:$(PLATFORM_VERSION_LAST_STABLE)
+ --prop com.android.build.boot.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE)
BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.init_boot.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE) \
@@ -3880,6 +3865,14 @@ BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS += \
# The following vendor- and odm-specific images needs explicit SPL set per board.
# TODO(b/210875415) Is this security_patch property used? Should it be removed from
# boot.img when there is no platform ramdisk included in it?
+ifdef BOOT_OS_VERSION
+BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
+ --prop com.android.build.boot.os_version:$(BOOT_OS_VERSION)
+else
+BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
+ --prop com.android.build.boot.os_version:$(PLATFORM_VERSION_LAST_STABLE)
+endif
+
ifdef BOOT_SECURITY_PATCH
BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.boot.security_patch:$(BOOT_SECURITY_PATCH)
@@ -3923,13 +3916,6 @@ BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.pvmfw.security_patch:$(PVMFW_SECURITY_PATCH)
endif
-# For upgrading devices without a init_boot partition, the init_boot footer args
-# should fallback to boot partition footer.
-ifndef INSTALLED_INIT_BOOT_IMAGE_TARGET
-BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
- $(BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS)
-endif
-
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
INIT_BOOT_FOOTER_ARGS := BOARD_AVB_INIT_BOOT_ADD_HASH_FOOTER_ARGS
VENDOR_BOOT_FOOTER_ARGS := BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS
@@ -5170,6 +5156,11 @@ define dump-dynamic-partitions-info
echo "virtual_ab=true" >> $(1))
$(if $(filter true,$(PRODUCT_VIRTUAL_AB_COMPRESSION)), \
echo "virtual_ab_compression=true" >> $(1))
+# This value controls the compression algorithm used for VABC
+# valid options are defined in system/core/fs_mgr/libsnapshot/cow_writer.cpp
+# e.g. "none", "gz", "brotli"
+ $(if $(PRODUCT_VIRTUAL_AB_COMPRESSION_METHOD), \
+ echo "virtual_ab_compression_method=$(PRODUCT_VIRTUAL_AB_COMPRESSION_METHOD)" >> $(1))
$(if $(filter true,$(PRODUCT_VIRTUAL_AB_OTA_RETROFIT)), \
echo "virtual_ab_retrofit=true" >> $(1))
endef
@@ -5288,6 +5279,7 @@ $(BUILT_TARGET_FILES_PACKAGE): \
$(INSTALLED_CACHEIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
$(INSTALLED_PVMFWIMAGE_TARGET) \
+ $(INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET) \
$(INSTALLED_CUSTOMIMAGES_TARGET) \
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
$(INSTALLED_KERNEL_TARGET) \
@@ -5558,7 +5550,7 @@ ifeq ($(AB_OTA_UPDATER),true)
$(hide) cp $(TOPDIR)system/update_engine/update_engine.conf $(zip_root)/META/update_engine_config.txt
$(hide) cp $(TOPDIR)external/zucchini/version_info.h $(zip_root)/META/zucchini_config.txt
$(hide) cp $(HOST_OUT_SHARED_LIBRARIES)/liblz4.so $(zip_root)/META/liblz4.so
- $(hide) for part in $(strip $(AB_OTA_PARTITIONS)); do \
+ $(hide) for part in $(sort $(AB_OTA_PARTITIONS)); do \
echo "$${part}" >> $(zip_root)/META/ab_partitions.txt; \
done
$(hide) for conf in $(strip $(AB_OTA_POSTINSTALL_CONFIG)); do \
@@ -5625,6 +5617,7 @@ endif # BOARD_PREBUILT_DTBOIMAGE
ifeq ($(BOARD_USES_PVMFWIMAGE),true)
$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
$(hide) cp $(INSTALLED_PVMFWIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
+ $(hide) cp $(INSTALLED_PVMFW_EMBEDDED_AVBKEY_TARGET) $(zip_root)/PREBUILT_IMAGES/
endif
ifdef BOARD_PREBUILT_BOOTLOADER
$(hide) mkdir -p $(zip_root)/IMAGES
@@ -6355,7 +6348,6 @@ include $(BUILD_SYSTEM)/sdk_font.mk
deps := \
$(target_notice_file_txt) \
- $(tools_notice_file_txt) \
$(OUT_DOCS)/offline-sdk-timestamp \
$(SDK_METADATA_FILES) \
$(SYMBOLS_ZIP) \
diff --git a/core/base_rules.mk b/core/base_rules.mk
index cec7792474..e26f456b2c 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -875,6 +875,16 @@ $(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
endif # LOCAL_UNINSTALLABLE_MODULE
endif # LOCAL_COMPATIBILITY_SUITE
+my_supported_variant :=
+ifeq ($(my_host_cross),true)
+ my_supported_variant := HOST_CROSS
+else
+ ifdef LOCAL_IS_HOST_MODULE
+ my_supported_variant := HOST
+ else
+ my_supported_variant := DEVICE
+ endif
+endif
###########################################################
## Add test module to ALL_DISABLED_PRESUBMIT_TESTS if LOCAL_PRESUBMIT_DISABLED is set to true.
###########################################################
@@ -981,6 +991,9 @@ ALL_MODULES.$(my_register_name).SHARED_LIBS := \
ALL_MODULES.$(my_register_name).SYSTEM_SHARED_LIBS := \
$(ALL_MODULES.$(my_register_name).SYSTEM_SHARED_LIBS) $(LOCAL_SYSTEM_SHARED_LIBRARIES)
+ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES := \
+ $(ALL_MODULES.$(my_register_name).LOCAL_RUNTIME_LIBRARIES) $(LOCAL_RUNTIME_LIBRARIES)
+
ifdef LOCAL_TEST_DATA
# Export the list of targets that are handled as data inputs and required
# by tests at runtime. The LOCAL_TEST_DATA format is generated from below
@@ -993,6 +1006,15 @@ ifdef LOCAL_TEST_DATA
$(call word-colon,2,$(f))))
endif
+ifdef LOCAL_TEST_DATA_BINS
+ ALL_MODULES.$(my_register_name).TEST_DATA_BINS := \
+ $(ALL_MODULES.$(my_register_name).TEST_DATA_BINS) $(LOCAL_TEST_DATA_BINS)
+endif
+
+ALL_MODULES.$(my_register_name).SUPPORTED_VARIANTS := \
+ $(ALL_MODULES.$(my_register_name).SUPPORTED_VARIANTS) \
+ $(filter-out $(ALL_MODULES.$(my_register_name).SUPPORTED_VARIANTS),$(my_supported_variant))
+
##########################################################################
## When compiling against the VNDK, add the .vendor or .product suffix to
## required modules.
diff --git a/core/board_config.mk b/core/board_config.mk
index 405fea6e6a..97b258d27e 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -174,6 +174,7 @@ _build_broken_var_list := \
BUILD_BROKEN_DUP_SYSPROP \
BUILD_BROKEN_ELF_PREBUILT_PRODUCT_COPY_FILES \
BUILD_BROKEN_ENFORCE_SYSPROP_OWNER \
+ BUILD_BROKEN_INPUT_DIR_MODULES \
BUILD_BROKEN_MISSING_REQUIRED_MODULES \
BUILD_BROKEN_OUTSIDE_INCLUDE_DIRS \
BUILD_BROKEN_PREBUILT_ELF_FILES \
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 415334f199..57f9ef8aef 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -264,6 +264,8 @@ LOCAL_RES_LIBRARIES:=
LOCAL_RESOURCE_DIR:=
LOCAL_RLIB_LIBRARIES:=
LOCAL_RMTYPEDEFS:=
+LOCAL_ROTATION_MIN_SDK_VERSION:=
+LOCAL_RUNTIME_LIBRARIES:=
LOCAL_RRO_THEME:=
LOCAL_RTTI_FLAG:=
LOCAL_SANITIZE:=
@@ -316,6 +318,7 @@ LOCAL_SYSTEM_SHARED_LIBRARIES:=none
LOCAL_TARGET_REQUIRED_MODULES:=
LOCAL_TEST_CONFIG:=
LOCAL_TEST_DATA:=
+LOCAL_TEST_DATA_BINS:=
LOCAL_TEST_MAINLINE_MODULES:=
LOCAL_TEST_MODULE_TO_PROGUARD_WITH:=
LOCAL_TIDY:=
@@ -358,6 +361,7 @@ LOCAL_LDFLAGS_$(TARGET_ARCH):=
LOCAL_PACK_MODULE_RELOCATIONS_$(TARGET_ARCH):=
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_ARCH):=
+LOCAL_RUNTIME_LIBRARIES_$(TARGET_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_ARCH):=
LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_SOONG_JNI_LIBS_SYMBOLS:=
@@ -382,6 +386,7 @@ LOCAL_LDFLAGS_$(TARGET_2ND_ARCH):=
LOCAL_PACK_MODULE_RELOCATIONS_$(TARGET_2ND_ARCH):=
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_2ND_ARCH):=
+LOCAL_RUNTIME_LIBRARIES_$(TARGET_2ND_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_2ND_ARCH):=
LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_2ND_ARCH):=
@@ -403,6 +408,7 @@ LOCAL_GENERATED_SOURCES_$(HOST_ARCH):=
LOCAL_HEADER_LIBRARIES_$(HOST_ARCH):=
LOCAL_LDFLAGS_$(HOST_ARCH):=
LOCAL_REQUIRED_MODULES_$(HOST_ARCH):=
+LOCAL_RUNTIME_LIBRARIES_$(HOST_ARCH):=
LOCAL_SHARED_LIBRARIES_$(HOST_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(HOST_ARCH):=
LOCAL_SRC_FILES_$(HOST_ARCH):=
@@ -422,6 +428,7 @@ LOCAL_GENERATED_SOURCES_$(HOST_2ND_ARCH):=
LOCAL_HEADER_LIBRARIES_$(HOST_2ND_ARCH):=
LOCAL_LDFLAGS_$(HOST_2ND_ARCH):=
LOCAL_REQUIRED_MODULES_$(HOST_2ND_ARCH):=
+LOCAL_RUNTIME_LIBRARIES_$(HOST_2ND_ARCH):=
LOCAL_SHARED_LIBRARIES_$(HOST_2ND_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(HOST_2ND_ARCH):=
LOCAL_SRC_FILES_$(HOST_2ND_ARCH):=
@@ -438,6 +445,7 @@ LOCAL_HEADER_LIBRARIES_$(HOST_OS):=
LOCAL_LDFLAGS_$(HOST_OS):=
LOCAL_LDLIBS_$(HOST_OS):=
LOCAL_REQUIRED_MODULES_$(HOST_OS):=
+LOCAL_RUNTIME_LIBRARIES_$(HOST_OS):=
LOCAL_SHARED_LIBRARIES_$(HOST_OS):=
LOCAL_SRC_FILES_$(HOST_OS):=
LOCAL_STATIC_LIBRARIES_$(HOST_OS):=
@@ -479,6 +487,8 @@ LOCAL_MODULE_STEM_32:=
LOCAL_MODULE_STEM_64:=
LOCAL_MODULE_SYMLINKS_32:=
LOCAL_MODULE_SYMLINKS_64:=
+LOCAL_RUNTIME_LIBRARIES_32:=
+LOCAL_RUNTIME_LIBRARIES_64:=
LOCAL_SHARED_LIBRARIES_32:=
LOCAL_SHARED_LIBRARIES_64:=
LOCAL_SRC_FILES_32:=
diff --git a/core/combo/select.mk b/core/combo/select.mk
index 761755866c..9c7e69e439 100644
--- a/core/combo/select.mk
+++ b/core/combo/select.mk
@@ -35,7 +35,7 @@ $(KATI_obsolete_var \
,HOST_CROSS builds are not supported in Make)
else
-$(combo_var_prefix)GLOBAL_ARFLAGS := crsPD -format=gnu
+$(combo_var_prefix)GLOBAL_ARFLAGS := crsPD --format=gnu
$(combo_var_prefix)STATIC_LIB_SUFFIX := .a
diff --git a/core/definitions.mk b/core/definitions.mk
index c981152276..314ba0a261 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -577,6 +577,15 @@ $(call generated-sources-dir-for,META,lic,)
endef
###########################################################
+# License metadata targets corresponding to targets in $(1)
+###########################################################
+define corresponding-license-metadata
+$(strip $(eval _dir := $(call license-metadata-dir)) \
+$(foreach target, $(sort $(1)), $(_dir)/$(target).meta_lic) \
+)
+endef
+
+###########################################################
## License metadata build rule for my_register_name $(1)
###########################################################
define license-metadata-rule
@@ -728,6 +737,22 @@ $(strip \
endef
###########################################################
+## Declare that non-module targets copied from project $(1) and
+## optionally ending in $(2) have the following license
+## metadata:
+##
+## $(3) -- license kinds e.g. SPDX-license-identifier-Apache-2.0
+## $(4) -- license conditions e.g. notice by_exception_only
+## $(5) -- license text filenames (notices)
+## $(6) -- package name
+###########################################################
+define declare-copy-files-license-metadata
+$(strip \
+ $(foreach _pair,$(filter $(1)%$(2),$(PRODUCT_COPY_FILES)),$(eval $(call declare-license-metadata,$(PRODUCT_OUT)/$(call word-colon,2,$(_pair)),$(3),$(4),$(5),$(6),$(1)))) \
+)
+endef
+
+###########################################################
## Declare the license metadata for non-module container-type target $(1).
##
## Container-type targets are targets like .zip files that
@@ -765,6 +790,18 @@ $(strip \
endef
###########################################################
+## Declare that non-module targets copied from project $(1) and
+## optionally ending in $(2) are non-copyrightable files.
+##
+## e.g. an information-only file merely listing other files.
+###########################################################
+define declare-0p-copy-files
+$(strip \
+ $(foreach _pair,$(filter $(1)%$(2),$(PRODUCT_COPY_FILES)),$(eval $(call declare-0p-target,$(PRODUCT_OUT)/$(call word-colon,2,$(_pair))))) \
+)
+endef
+
+###########################################################
## Declare non-module target $(1) to have a first-party license
## (Android Apache 2.0)
##
@@ -775,6 +812,15 @@ $(call declare-license-metadata,$(1),SPDX-license-identifier-Apache-2.0,notice,b
endef
###########################################################
+## Declare that non-module targets copied from project $(1) and
+## optionally ending in $(2) are first-party licensed
+## (Android Apache 2.0)
+###########################################################
+define declare-1p-copy-files
+$(foreach _pair,$(filter $(1)%$(2),$(PRODUCT_COPY_FILES)),$(call declare-1p-target,$(PRODUCT_OUT)/$(call word-colon,2,$(_pair)),$(1)))
+endef
+
+###########################################################
## Declare non-module container-type target $(1) to have a
## first-party license (Android Apache 2.0).
##
@@ -828,6 +874,34 @@ reportmissinglicenses:
endef
+
+###########################################################
+# Returns the unique list of built license metadata files.
+###########################################################
+define all-license-metadata
+$(sort \
+ $(foreach t,$(ALL_NON_MODULES),$(if $(filter 0p,$(ALL_TARGETS.$(t).META_LIC)),, $(ALL_TARGETS.$(t).META_LIC))) \
+ $(foreach m,$(ALL_MODULES), $(ALL_MODULES.$(m).META_LIC)) \
+)
+endef
+
+###########################################################
+# Declares the rule to report all library names used in any notice files.
+###########################################################
+define report-all-notice-library-names-rule
+$(strip $(eval _all := $(call all-license-metadata)))
+
+.PHONY: reportallnoticelibrarynames
+reportallnoticelibrarynames: PRIVATE_LIST_FILE := $(call license-metadata-dir)/filelist
+reportallnoticelibrarynames: | $(COMPLIANCENOTICE_SHIPPEDLIBS)
+reportallnoticelibrarynames: $(_all)
+ @echo Reporting notice library names for at least $$(words $(_all)) license metadata files
+ $(hide) rm -f $$(PRIVATE_LIST_FILE)
+ $(hide) mkdir -p $$(dir $$(PRIVATE_LIST_FILE))
+ $(hide) find out -name '*meta_lic' -type f -printf '"%p"\n' >$$(PRIVATE_LIST_FILE)
+ $(COMPLIANCENOTICE_SHIPPEDLIBS) @$$(PRIVATE_LIST_FILE)
+endef
+
###########################################################
## Declares a license metadata build rule for ALL_MODULES
###########################################################
@@ -842,7 +916,8 @@ $(strip \
) \
$(foreach t,$(sort $(ALL_NON_MODULES)),$(eval $(call non-module-license-metadata-rule,$(t)))) \
$(foreach m,$(sort $(ALL_MODULES)),$(eval $(call license-metadata-rule,$(m)))) \
- $(eval $(call report-missing-licenses-rule)))
+ $(eval $(call report-missing-licenses-rule)) \
+ $(eval $(call report-all-notice-library-names-rule)))
endef
###########################################################
diff --git a/core/main.mk b/core/main.mk
index e9cbc60b33..d5dc49f6ae 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -142,11 +142,6 @@ $(KATI_obsolete_var ADDITIONAL_BUILD_PROPERTIES, Please use ADDITIONAL_SYSTEM_PR
#
# -----------------------------------------------------------------
# Add the product-defined properties to the build properties.
-ifdef PRODUCT_SHIPPING_API_LEVEL
-ADDITIONAL_SYSTEM_PROPERTIES += \
- ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
-endif
-
ifneq ($(BOARD_PROPERTY_OVERRIDES_SPLIT_ENABLED), true)
ADDITIONAL_SYSTEM_PROPERTIES += $(PRODUCT_PROPERTY_OVERRIDES)
else
@@ -348,7 +343,7 @@ endif
ADDITIONAL_PRODUCT_PROPERTIES += ro.build.characteristics=$(TARGET_AAPT_CHARACTERISTICS)
ifeq ($(AB_OTA_UPDATER),true)
-ADDITIONAL_PRODUCT_PROPERTIES += ro.product.ab_ota_partitions=$(subst $(space),$(comma),$(strip $(AB_OTA_PARTITIONS)))
+ADDITIONAL_PRODUCT_PROPERTIES += ro.product.ab_ota_partitions=$(subst $(space),$(comma),$(sort $(AB_OTA_PARTITIONS)))
endif
# -----------------------------------------------------------------
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 4edbbb8ffc..4ebbe2eeef 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -11,10 +11,6 @@ endif
ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
-else ifdef my_register_name
-license_package_name:=$(my_register_name)
-else
-license_package_name:=$(strip $(LOCAL_MODULE))
endif
ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
diff --git a/core/product-graph.mk b/core/product-graph.mk
index d425b22f7a..6d51db17a9 100644
--- a/core/product-graph.mk
+++ b/core/product-graph.mk
@@ -14,13 +14,10 @@
# limitations under the License.
#
-# the foreach and the if remove the single space entries that creep in because of the evals
+# the sort also acts as a strip to remove the single space entries that creep in because of the evals
define gather-all-products
-$(sort $(foreach p, \
- $(eval _all_products_visited := )
- $(call all-products-inner, $(PARENT_PRODUCT_FILES)) \
- , $(if $(strip $(p)),$(strip $(p)),)) \
-)
+$(eval _all_products_visited := )\
+$(sort $(call all-products-inner, $(PARENT_PRODUCT_FILES)))
endef
define all-products-inner
@@ -72,7 +69,7 @@ define emit-product-node-props
$(hide) echo \"$(1)\" [ \
label=\"$(dir $(1))\\n$(notdir $(1))\\n\\n$(subst $(close_parenthesis),,$(subst $(open_parethesis),,$(call get-product-var,$(1),PRODUCT_MODEL)))\\n$(call get-product-var,$(1),PRODUCT_DEVICE)\" \
style=\"filled\" fillcolor=\"$(strip $(call node-color,$(1)))\" \
-colorscheme=\"svg\" fontcolor=\"darkblue\" href=\"products/$(1).html\" \
+colorscheme=\"svg\" fontcolor=\"darkblue\" \
] >> $(2)
endef
@@ -95,66 +92,7 @@ else
false
endif
-# Evaluates to the name of the product file
-# $(1) product file
-define product-debug-filename
-$(OUT_DIR)/products/$(strip $(1)).html
-endef
-
-# Makes a rule for the product debug info
-# $(1) product file
-define transform-product-debug
-$(OUT_DIR)/products/$(strip $(1)).txt: $(this_makefile)
- @echo Product debug info file: $$@
- $(hide) rm -f $$@
- $(hide) mkdir -p $$(dir $$@)
- $(hide) echo 'FILE=$(strip $(1))' >> $$@
- $(hide) echo 'PRODUCT_NAME=$(call get-product-var,$(1),PRODUCT_NAME)' >> $$@
- $(hide) echo 'PRODUCT_MODEL=$(call get-product-var,$(1),PRODUCT_MODEL)' >> $$@
- $(hide) echo 'PRODUCT_LOCALES=$(call get-product-var,$(1),PRODUCT_LOCALES)' >> $$@
- $(hide) echo 'PRODUCT_AAPT_CONFIG=$(call get-product-var,$(1),PRODUCT_AAPT_CONFIG)' >> $$@
- $(hide) echo 'PRODUCT_AAPT_PREF_CONFIG=$(call get-product-var,$(1),PRODUCT_AAPT_PREF_CONFIG)' >> $$@
- $(hide) echo 'PRODUCT_PACKAGES=$(call get-product-var,$(1),PRODUCT_PACKAGES)' >> $$@
- $(hide) echo 'PRODUCT_DEVICE=$(call get-product-var,$(1),PRODUCT_DEVICE)' >> $$@
- $(hide) echo 'PRODUCT_MANUFACTURER=$(call get-product-var,$(1),PRODUCT_MANUFACTURER)' >> $$@
- $(hide) echo 'PRODUCT_PROPERTY_OVERRIDES=$(call get-product-var,$(1),PRODUCT_PROPERTY_OVERRIDES)' >> $$@
- $(hide) echo 'PRODUCT_DEFAULT_PROPERTY_OVERRIDES=$(call get-product-var,$(1),PRODUCT_DEFAULT_PROPERTY_OVERRIDES)' >> $$@
- $(hide) echo 'PRODUCT_SYSTEM_DEFAULT_PROPERTIES=$(call get-product-var,$(1),PRODUCT_SYSTEM_DEFAULT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_PRODUCT_PROPERTIES=$(call get-product-var,$(1),PRODUCT_PRODUCT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_SYSTEM_EXT_PROPERTIES=$(call get-product-var,$(1),PRODUCT_SYSTEM_EXT_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_ODM_PROPERTIES=$(call get-product-var,$(1),PRODUCT_ODM_PROPERTIES)' >> $$@
- $(hide) echo 'PRODUCT_CHARACTERISTICS=$(call get-product-var,$(1),PRODUCT_CHARACTERISTICS)' >> $$@
- $(hide) echo 'PRODUCT_COPY_FILES=$(call get-product-var,$(1),PRODUCT_COPY_FILES)' >> $$@
- $(hide) echo 'PRODUCT_OTA_PUBLIC_KEYS=$(call get-product-var,$(1),PRODUCT_OTA_PUBLIC_KEYS)' >> $$@
- $(hide) echo 'PRODUCT_EXTRA_OTA_KEYS=$(call get-product-var,$(1),PRODUCT_EXTRA_OTA_KEYS)' >> $$@
- $(hide) echo 'PRODUCT_EXTRA_RECOVERY_KEYS=$(call get-product-var,$(1),PRODUCT_EXTRA_RECOVERY_KEYS)' >> $$@
- $(hide) echo 'PRODUCT_PACKAGE_OVERLAYS=$(call get-product-var,$(1),PRODUCT_PACKAGE_OVERLAYS)' >> $$@
- $(hide) echo 'DEVICE_PACKAGE_OVERLAYS=$(call get-product-var,$(1),DEVICE_PACKAGE_OVERLAYS)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_NAME=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_NAME)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_COPY_FILES=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_COPY_FILES)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_COPY_MODULES=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_COPY_MODULES)' >> $$@
- $(hide) echo 'PRODUCT_SDK_ADDON_DOC_MODULES=$(call get-product-var,$(1),PRODUCT_SDK_ADDON_DOC_MODULES)' >> $$@
- $(hide) echo 'PRODUCT_DEFAULT_WIFI_CHANNELS=$(call get-product-var,$(1),PRODUCT_DEFAULT_WIFI_CHANNELS)' >> $$@
- $(hide) echo 'PRODUCT_DEFAULT_DEV_CERTIFICATE=$(call get-product-var,$(1),PRODUCT_DEFAULT_DEV_CERTIFICATE)' >> $$@
- $(hide) echo 'PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES=$(call get-product-var,$(1),PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES)' >> $$@
- $(hide) echo 'PRODUCT_RESTRICT_VENDOR_FILES=$(call get-product-var,$(1),PRODUCT_RESTRICT_VENDOR_FILES)' >> $$@
- $(hide) echo 'PRODUCT_VENDOR_KERNEL_HEADERS=$(call get-product-var,$(1),PRODUCT_VENDOR_KERNEL_HEADERS)' >> $$@
-
-$(call product-debug-filename, $(p)): \
- $(OUT_DIR)/products/$(strip $(1)).txt \
- build/make/tools/product_debug.py \
- $(this_makefile)
- @echo Product debug html file: $$@
- $(hide) mkdir -p $$(dir $$@)
- $(hide) cat $$< | build/make/tools/product_debug.py > $$@
-endef
-
ifeq (,$(RBC_PRODUCT_CONFIG)$(RBC_NO_PRODUCT_GRAPH)$(RBC_BOARD_CONFIG))
-product_debug_files:=
-$(foreach p,$(all_products), \
- $(eval $(call transform-product-debug, $(p))) \
- $(eval product_debug_files += $(call product-debug-filename, $(p))) \
- )
.PHONY: product-graph
product-graph: $(products_graph)
diff --git a/core/product_config.mk b/core/product_config.mk
index 15935eab52..1deb39bbdf 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -112,8 +112,7 @@ endef
# Return empty unless the board is QCOM
define is-vendor-board-qcom
-$(if $(strip $(TARGET_BOARD_PLATFORM) $(QCOM_BOARD_PLATFORMS)),\
- $(filter $(TARGET_BOARD_PLATFORM),$(QCOM_BOARD_PLATFORMS)),\
+$(if $(strip $(TARGET_BOARD_PLATFORM) $(QCOM_BOARD_PLATFORMS)),$(filter $(TARGET_BOARD_PLATFORM),$(QCOM_BOARD_PLATFORMS)),\
$(error both TARGET_BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) and QCOM_BOARD_PLATFORMS=$(QCOM_BOARD_PLATFORMS)))
endef
diff --git a/core/product_config.rbc b/core/product_config.rbc
index 2820695afd..469b0f75ee 100644
--- a/core/product_config.rbc
+++ b/core/product_config.rbc
@@ -165,10 +165,10 @@ def _product_configuration(top_pcm_name, top_pcm, input_variables_init):
pcm(globals, handle)
# Now we know everything about this PCM, record it in 'configs'.
- children = __h_inherited_modules(handle)
+ children = handle.inherited_modules
if _options.trace_modules:
print("# ", " ".join(children.keys()))
- configs[name] = (pcm, __h_cfg(handle), children.keys(), False)
+ configs[name] = (pcm, handle.cfg, children.keys(), False)
pcm_count = pcm_count + 1
if len(children) == 0:
@@ -235,7 +235,7 @@ def _board_configuration(board_config_init, input_variables_init):
input_variables_init(globals_base, h_base)
input_variables_init(globals, h)
board_config_init(globals, h)
- return (globals, _dictionary_difference(h[0], h_base[0]), globals_base)
+ return (globals, _dictionary_difference(h.cfg, h_base.cfg), globals_base)
def _substitute_inherited(configs, pcm_name, cfg):
@@ -392,11 +392,11 @@ def __words(string_or_list):
# default value list (initially empty, modified by inheriting)
def __h_new():
"""Constructs a handle which is passed to PCM."""
- return (dict(), dict(), list())
-
-def __h_inherited_modules(handle):
- """Returns PCM's inherited modules dict."""
- return handle[1]
+ return struct(
+ cfg = dict(),
+ inherited_modules = dict(),
+ default_list_value = list()
+ )
def __h_cfg(handle):
"""Returns PCM's product configuration attributes dict.
@@ -404,7 +404,7 @@ def __h_cfg(handle):
This function is also exported as rblf.cfg, and every PCM
calls it at the beginning.
"""
- return handle[0]
+ return handle.cfg
def _setdefault(handle, attr):
"""If attribute has not been set, assigns default value to it.
@@ -413,9 +413,9 @@ def _setdefault(handle, attr):
Only list attributes are initialized this way. The default
value is kept in the PCM's handle. Calling inherit() updates it.
"""
- cfg = handle[0]
+ cfg = handle.cfg
if cfg.get(attr) == None:
- cfg[attr] = list(handle[2])
+ cfg[attr] = list(handle.default_list_value)
return cfg[attr]
def _inherit(handle, pcm_name, pcm):
@@ -424,12 +424,11 @@ def _inherit(handle, pcm_name, pcm):
This function is exported as rblf.inherit, PCM calls it when
a module is inherited.
"""
- cfg, inherited, default_lv = handle
- inherited[pcm_name] = pcm
- default_lv.append(_indirect(pcm_name))
+ handle.inherited_modules[pcm_name] = pcm
+ handle.default_list_value.append(_indirect(pcm_name))
# Add inherited module reference to all configuration values
- for attr, val in cfg.items():
+ for attr, val in handle.cfg.items():
if type(val) == "list":
val.append(_indirect(pcm_name))
@@ -467,6 +466,13 @@ def _enforce_product_packages_exist(pkg_string_or_list):
#TODO(asmundak)
pass
+def _add_product_dex_preopt_module_config(handle, modules, config):
+ """Equivalent to add-product-dex-preopt-module-config from build/make/core/product.mk."""
+ modules = __words(modules)
+ config = _mkstrip(config).replace(" ", "|@SP@|")
+ _setdefault(handle, "PRODUCT_DEX_PREOPT_MODULE_CONFIGS")
+ handle.cfg["PRODUCT_DEX_PREOPT_MODULE_CONFIGS"] += [m + "=" + config for m in modules]
+
def _file_wildcard_exists(file_pattern):
"""Return True if there are files matching given bash pattern."""
return len(rblf_wildcard(file_pattern)) > 0
@@ -719,6 +725,7 @@ rblf = struct(
soong_config_set = _soong_config_set,
soong_config_get = _soong_config_get,
abspath = _abspath,
+ add_product_dex_preopt_module_config = _add_product_dex_preopt_module_config,
addprefix = _addprefix,
addsuffix = _addsuffix,
board_platform_in = _board_platform_in,
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 7f9dbe5e6e..c24df60a4b 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -28,6 +28,7 @@ $(call add_json_val, Platform_sdk_version, $(PLATFORM_SDK_VERSION)
$(call add_json_str, Platform_sdk_codename, $(PLATFORM_VERSION_CODENAME))
$(call add_json_bool, Platform_sdk_final, $(filter REL,$(PLATFORM_VERSION_CODENAME)))
$(call add_json_val, Platform_sdk_extension_version, $(PLATFORM_SDK_EXTENSION_VERSION))
+$(call add_json_val, Platform_base_sdk_extension_version, $(PLATFORM_BASE_SDK_EXTENSION_VERSION))
$(call add_json_csv, Platform_version_active_codenames, $(PLATFORM_VERSION_ALL_CODENAMES))
$(call add_json_str, Platform_security_patch, $(PLATFORM_SECURITY_PATCH))
$(call add_json_str, Platform_preview_sdk_version, $(PLATFORM_PREVIEW_SDK_VERSION))
@@ -206,6 +207,8 @@ $(call add_json_list, SystemExtPublicSepolicyDirs, $(SYSTEM_EXT_PUBLIC_SEP
$(call add_json_list, SystemExtPrivateSepolicyDirs, $(SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS) $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
$(call add_json_list, BoardSepolicyM4Defs, $(BOARD_SEPOLICY_M4DEFS))
$(call add_json_str, BoardSepolicyVers, $(BOARD_SEPOLICY_VERS))
+$(call add_json_str, SystemExtSepolicyPrebuiltApiDir, $(BOARD_SYSTEM_EXT_PREBUILT_DIR))
+$(call add_json_str, ProductSepolicyPrebuiltApiDir, $(BOARD_PRODUCT_PREBUILT_DIR))
$(call add_json_str, PlatformSepolicyVersion, $(PLATFORM_SEPOLICY_VERSION))
$(call add_json_str, TotSepolicyVersion, $(TOT_SEPOLICY_VERSION))
@@ -267,6 +270,7 @@ $(call add_json_str, ShippingApiLevel, $(PRODUCT_SHIPPING_API_LEVEL))
$(call add_json_bool, BuildBrokenEnforceSyspropOwner, $(filter true,$(BUILD_BROKEN_ENFORCE_SYSPROP_OWNER)))
$(call add_json_bool, BuildBrokenTrebleSyspropNeverallow, $(filter true,$(BUILD_BROKEN_TREBLE_SYSPROP_NEVERALLOW)))
$(call add_json_bool, BuildBrokenVendorPropertyNamespace, $(filter true,$(BUILD_BROKEN_VENDOR_PROPERTY_NAMESPACE)))
+$(call add_json_list, BuildBrokenInputDirModules, $(BUILD_BROKEN_INPUT_DIR_MODULES))
$(call add_json_bool, BuildDebugfsRestrictionsEnabled, $(filter true,$(PRODUCT_SET_DEBUGFS_RESTRICTIONS)))
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 12ead6ee73..43b8953569 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -262,6 +262,7 @@ $(gen_from_buildinfo_sh): $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) | $(B
BOARD_BUILD_SYSTEM_ROOT_IMAGE="$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)" \
BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT="$(BOARD_USE_VBMETA_DIGTEST_IN_FINGERPRINT)" \
PLATFORM_VERSION="$(PLATFORM_VERSION)" \
+ PLATFORM_DISPLAY_VERSION="$(PLATFORM_DISPLAY_VERSION)" \
PLATFORM_VERSION_LAST_STABLE="$(PLATFORM_VERSION_LAST_STABLE)" \
PLATFORM_SECURITY_PATCH="$(PLATFORM_SECURITY_PATCH)" \
PLATFORM_BASE_OS="$(PLATFORM_BASE_OS)" \
@@ -270,6 +271,7 @@ $(gen_from_buildinfo_sh): $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) | $(B
PLATFORM_PREVIEW_SDK_FINGERPRINT="$$(cat $(API_FINGERPRINT))" \
PLATFORM_VERSION_CODENAME="$(PLATFORM_VERSION_CODENAME)" \
PLATFORM_VERSION_ALL_CODENAMES="$(PLATFORM_VERSION_ALL_CODENAMES)" \
+ PLATFORM_VERSION_KNOWN_CODENAMES="$(PLATFORM_VERSION_KNOWN_CODENAMES)" \
PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION="$(PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION)" \
BUILD_VERSION_TAGS="$(BUILD_VERSION_TAGS)" \
$(if $(OEM_THUMBPRINT_PROPERTIES),BUILD_THUMBPRINT="$(BUILD_THUMBPRINT_FROM_FILE)") \
@@ -306,10 +308,6 @@ _prop_vars_ += \
PRODUCT_VENDOR_PROPERTIES
endif
-_blacklist_names_ := \
- $(PRODUCT_SYSTEM_PROPERTY_BLACKLIST) \
- ro.product.first_api_level
-
INSTALLED_BUILD_PROP_TARGET := $(TARGET_OUT)/build.prop
$(eval $(call build-properties,\
@@ -317,7 +315,7 @@ $(eval $(call build-properties,\
$(INSTALLED_BUILD_PROP_TARGET),\
$(_prop_files_),\
$(_prop_vars_),\
- $(_blacklist_names_),\
+ $(PRODUCT_SYSTEM_PROPERTY_BLACKLIST),\
$(empty),\
$(empty)))
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index aeeb403484..8097535d1d 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -1,4 +1,5 @@
# Print a list of the modules that could be built
+# Currently runtime_dependencies only include the runtime libs information for cc binaries.
MODULE_INFO_JSON := $(PRODUCT_OUT)/module-info.json
@@ -24,6 +25,9 @@ $(MODULE_INFO_JSON):
'"test_mainline_modules": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_MAINLINE_MODULES)),"$(w)", )], ' \
'"is_unit_test": "$(ALL_MODULES.$(m).IS_UNIT_TEST)", ' \
'"data": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA)),"$(w)", )], ' \
+ '"runtime_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).LOCAL_RUNTIME_LIBRARIES)),"$(w)", )], ' \
+ '"data_dependencies": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_DATA_BINS)),"$(w)", )], ' \
+ '"supported_variants": [$(foreach w,$(sort $(ALL_MODULES.$(m).SUPPORTED_VARIANTS)),"$(w)", )], ' \
'},\n' \
) | sed -e 's/, *\]/]/g' -e 's/, *\}/ }/g' -e '$$s/,$$//' >> $@
$(hide) echo '}' >> $@
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 4dbc941824..f19e841ec9 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -19,6 +19,7 @@
#
# Guarantees that the following are defined:
# PLATFORM_VERSION
+# PLATFORM_DISPLAY_VERSION
# PLATFORM_SDK_VERSION
# PLATFORM_VERSION_CODENAME
# DEFAULT_APP_TARGET_SDK
@@ -54,6 +55,11 @@ PLATFORM_VERSION_LAST_STABLE := 12
# release build. If this is a final release build, it is simply "REL".
PLATFORM_VERSION_CODENAME.TP1A := Tiramisu
+# This is the user-visible version. In a final release build it should
+# be empty to use PLATFORM_VERSION as the user-visible version. For
+# a preview release it can be set to a user-friendly value like `12 Preview 1`
+PLATFORM_DISPLAY_VERSION :=
+
ifndef PLATFORM_SDK_VERSION
# This is the canonical definition of the SDK version, which defines
# the set of APIs and functionality available in the platform. It
@@ -79,6 +85,13 @@ PLATFORM_SDK_EXTENSION_VERSION := 1
PLATFORM_BASE_SDK_EXTENSION_VERSION := 1
.KATI_READONLY := PLATFORM_BASE_SDK_EXTENSION_VERSION
+# This is are all known codenames starting from Q.
+PLATFORM_VERSION_KNOWN_CODENAMES := Q R S Sv2 Tiramisu
+# Convert from space separated list to comma separated
+PLATFORM_VERSION_KNOWN_CODENAMES := \
+ $(call normalize-comma-list,$(PLATFORM_VERSION_KNOWN_CODENAMES))
+.KATI_READONLY := PLATFORM_VERSION_KNOWN_CODENAMES
+
ifndef PLATFORM_SECURITY_PATCH
# Used to indicate the security patch that has been applied to the device.
# It must signify that the build includes all security patches issued up through the designated Android Public Security Bulletin.
diff --git a/core/version_util.mk b/core/version_util.mk
index b7c4e48641..3a0d4b586a 100644
--- a/core/version_util.mk
+++ b/core/version_util.mk
@@ -90,6 +90,15 @@ endif
PLATFORM_VERSION_CODENAME \
PLATFORM_VERSION_ALL_CODENAMES
+ifneq (REL,$(PLATFORM_VERSION_CODENAME))
+ codenames := \
+ $(subst $(comma),$(space),$(strip $(PLATFORM_VERSION_KNOWN_CODENAMES)))
+ ifeq ($(filter $(PLATFORM_VERSION_CODENAME),$(codenames)),)
+ $(error '$(PLATFORM_VERSION_CODENAME)' is not in '$(codenames)'. \
+ Add PLATFORM_VERSION_CODENAME to PLATFORM_VERSION_KNOWN_CODENAMES)
+ endif
+endif
+
ifndef PLATFORM_VERSION
ifeq (REL,$(PLATFORM_VERSION_CODENAME))
PLATFORM_VERSION := $(PLATFORM_VERSION_LAST_STABLE)
@@ -99,6 +108,10 @@ ifndef PLATFORM_VERSION
endif
.KATI_READONLY := PLATFORM_VERSION
+ifndef PLATFORM_DISPLAY_VERSION
+ PLATFORM_DISPLAY_VERSION := $(PLATFORM_VERSION)
+endif
+.KATI_READONLY := PLATFORM_DISPLAY_VERSION
ifeq (REL,$(PLATFORM_VERSION_CODENAME))
PLATFORM_PREVIEW_SDK_VERSION := 0
diff --git a/target/board/ndk/BoardConfig.mk b/target/board/ndk/BoardConfig.mk
new file mode 100644
index 0000000000..da8b5f3e74
--- /dev/null
+++ b/target/board/ndk/BoardConfig.mk
@@ -0,0 +1,21 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+TARGET_ARCH_SUITE := ndk
+TARGET_USES_64_BIT_BINDER := true
+
+MALLOC_SVELTE := true
+
+USE_SAFESTACK := false
diff --git a/target/board/ndk/README.md b/target/board/ndk/README.md
new file mode 100644
index 0000000000..d8f3a1616a
--- /dev/null
+++ b/target/board/ndk/README.md
@@ -0,0 +1,2 @@
+This device is suitable for a soong-only build that builds for all the architectures
+needed for the ndk.
diff --git a/target/product/AndroidProducts.mk b/target/product/AndroidProducts.mk
index 7d9d90e92a..ee702e5d16 100644
--- a/target/product/AndroidProducts.mk
+++ b/target/product/AndroidProducts.mk
@@ -61,6 +61,7 @@ PRODUCT_MAKEFILES := \
$(LOCAL_DIR)/mainline_system_x86.mk \
$(LOCAL_DIR)/mainline_system_x86_64.mk \
$(LOCAL_DIR)/mainline_system_x86_arm.mk \
+ $(LOCAL_DIR)/ndk.mk \
$(LOCAL_DIR)/sdk_arm64.mk \
$(LOCAL_DIR)/sdk.mk \
$(LOCAL_DIR)/sdk_phone_arm64.mk \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 3d299fb08a..55047dff6b 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -78,6 +78,7 @@ PRODUCT_PACKAGES += \
device_config \
dmctl \
dnsmasq \
+ dmesgd \
DownloadProvider \
dpm \
dump.erofs \
diff --git a/target/product/core_no_zygote.mk b/target/product/core_no_zygote.mk
new file mode 100644
index 0000000000..205a8976bd
--- /dev/null
+++ b/target/product/core_no_zygote.mk
@@ -0,0 +1,30 @@
+#
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Inherit from this product for devices that do not include a zygote using:
+# $(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit_only.mk)
+# The inheritance for this must come before the inheritance chain that leads
+# to core_minimal.mk.
+
+# Copy the no-zygote startup script
+PRODUCT_COPY_FILES += system/core/rootdir/init.no_zygote.rc:system/etc/init/hw/init.no_zygote.rc
+
+# Set the zygote property to select the no-zygote script.
+# This line must be parsed before the one in core_minimal.mk
+PRODUCT_VENDOR_PROPERTIES += ro.zygote=no_zygote
+
+TARGET_SUPPORTS_32_BIT_APPS := false
+TARGET_SUPPORTS_64_BIT_APPS := false
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
index 32230023ae..851a2cb116 100644
--- a/target/product/default_art_config.mk
+++ b/target/product/default_art_config.mk
@@ -63,7 +63,7 @@ PRODUCT_APEX_BOOT_JARS := \
com.android.scheduling:framework-scheduling \
com.android.sdkext:framework-sdkextensions \
com.android.tethering:framework-connectivity \
- com.android.tethering:framework-connectivity-tiramisu \
+ com.android.tethering:framework-connectivity-t \
com.android.tethering:framework-tethering \
com.android.wifi:framework-wifi
diff --git a/target/product/ndk.mk b/target/product/ndk.mk
new file mode 100644
index 0000000000..1dfd0db328
--- /dev/null
+++ b/target/product/ndk.mk
@@ -0,0 +1,21 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This device is suitable for soong-only build that builds for all the architectures
+# needed for the ndk. It is not going to work for normal `lunch <foo> && m` workflows.
+
+PRODUCT_NAME := ndk
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := ndk
diff --git a/target/product/virtual_ab_ota/compression.mk b/target/product/virtual_ab_ota/compression.mk
index 88c58b87a0..d5bd2a5395 100644
--- a/target/product/virtual_ab_ota/compression.mk
+++ b/target/product/virtual_ab_ota/compression.mk
@@ -18,6 +18,7 @@ $(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/launch_with_ven
PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.userspace.snapshots.enabled=true
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.io_uring.enabled=true
PRODUCT_VIRTUAL_AB_COMPRESSION := true
PRODUCT_PACKAGES += \
snapuserd.vendor_ramdisk \
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index a349cba0f0..536a381c07 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -16,8 +16,10 @@ echo "ro.build.version.preview_sdk=$PLATFORM_PREVIEW_SDK_VERSION"
echo "ro.build.version.preview_sdk_fingerprint=$PLATFORM_PREVIEW_SDK_FINGERPRINT"
echo "ro.build.version.codename=$PLATFORM_VERSION_CODENAME"
echo "ro.build.version.all_codenames=$PLATFORM_VERSION_ALL_CODENAMES"
+echo "ro.build.version.known_codenames=$PLATFORM_VERSION_KNOWN_CODENAMES"
echo "ro.build.version.release=$PLATFORM_VERSION_LAST_STABLE"
echo "ro.build.version.release_or_codename=$PLATFORM_VERSION"
+echo "ro.build.version.release_or_preview_display=$PLATFORM_DISPLAY_VERSION"
echo "ro.build.version.security_patch=$PLATFORM_SECURITY_PATCH"
echo "ro.build.version.base_os=$PLATFORM_BASE_OS"
echo "ro.build.version.min_supported_target_sdk=$PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION"
diff --git a/tools/compliance/Android.bp b/tools/compliance/Android.bp
index d5965f8c82..ec0f2f9f5d 100644
--- a/tools/compliance/Android.bp
+++ b/tools/compliance/Android.bp
@@ -18,17 +18,27 @@ package {
}
blueprint_go_binary {
- name: "bom",
+ name: "checkshare",
+ srcs: ["cmd/checkshare/checkshare.go"],
+ deps: ["compliance-module"],
+ testSrcs: ["cmd/checkshare/checkshare_test.go"],
+}
+
+blueprint_go_binary {
+ name: "compliancenotice_bom",
srcs: ["cmd/bom/bom.go"],
deps: ["compliance-module"],
testSrcs: ["cmd/bom/bom_test.go"],
}
blueprint_go_binary {
- name: "checkshare",
- srcs: ["cmd/checkshare/checkshare.go"],
- deps: ["compliance-module"],
- testSrcs: ["cmd/checkshare/checkshare_test.go"],
+ name: "compliancenotice_shippedlibs",
+ srcs: ["cmd/shippedlibs/shippedlibs.go"],
+ deps: [
+ "compliance-module",
+ "soong-response",
+ ],
+ testSrcs: ["cmd/shippedlibs/shippedlibs_test.go"],
}
blueprint_go_binary {
@@ -70,13 +80,6 @@ blueprint_go_binary {
}
blueprint_go_binary {
- name: "shippedlibs",
- srcs: ["cmd/shippedlibs/shippedlibs.go"],
- deps: ["compliance-module"],
- testSrcs: ["cmd/shippedlibs/shippedlibs_test.go"],
-}
-
-blueprint_go_binary {
name: "textnotice",
srcs: ["cmd/textnotice/textnotice.go"],
deps: [
diff --git a/tools/compliance/cmd/checkshare/checkshare_test.go b/tools/compliance/cmd/checkshare/checkshare_test.go
index 4589595ca8..c9b62e1144 100644
--- a/tools/compliance/cmd/checkshare/checkshare_test.go
+++ b/tools/compliance/cmd/checkshare/checkshare_test.go
@@ -259,7 +259,7 @@ func Test(t *testing.T) {
if len(ts) < 1 {
continue
}
- if 0 < len(actualStdout) {
+ if len(actualStdout) > 0 {
t.Errorf("checkshare: unexpected multiple output lines %q, want %q", actualStdout+"\n"+ts, tt.expectedStdout)
}
actualStdout = ts
diff --git a/tools/compliance/cmd/htmlnotice/htmlnotice.go b/tools/compliance/cmd/htmlnotice/htmlnotice.go
index 0e3ba09321..ffb05859ff 100644
--- a/tools/compliance/cmd/htmlnotice/htmlnotice.go
+++ b/tools/compliance/cmd/htmlnotice/htmlnotice.go
@@ -204,17 +204,17 @@ func htmlNotice(ctx *context, files ...string) error {
fmt.Fprintln(ctx.stdout, "li { padding-left: 1em; }")
fmt.Fprintln(ctx.stdout, ".file-list { margin-left: 1em; }")
fmt.Fprintln(ctx.stdout, "</style>")
- if 0 < len(ctx.title) {
+ if len(ctx.title) > 0 {
fmt.Fprintf(ctx.stdout, "<title>%s</title>\n", html.EscapeString(ctx.title))
- } else if 0 < len(ctx.product) {
+ } else if len(ctx.product) > 0 {
fmt.Fprintf(ctx.stdout, "<title>%s</title>\n", html.EscapeString(ctx.product))
}
fmt.Fprintln(ctx.stdout, "</head>")
fmt.Fprintln(ctx.stdout, "<body>")
- if 0 < len(ctx.title) {
+ if len(ctx.title) > 0 {
fmt.Fprintf(ctx.stdout, " <h1>%s</h1>\n", html.EscapeString(ctx.title))
- } else if 0 < len(ctx.product) {
+ } else if len(ctx.product) > 0 {
fmt.Fprintf(ctx.stdout, " <h1>%s</h1>\n", html.EscapeString(ctx.product))
}
ids := make(map[string]string)
diff --git a/tools/compliance/cmd/htmlnotice/htmlnotice_test.go b/tools/compliance/cmd/htmlnotice/htmlnotice_test.go
index b8bc47fb4d..1b01d16a53 100644
--- a/tools/compliance/cmd/htmlnotice/htmlnotice_test.go
+++ b/tools/compliance/cmd/htmlnotice/htmlnotice_test.go
@@ -678,7 +678,7 @@ func Test(t *testing.T) {
}
if !inBody {
if expectTitle {
- if tl := checkTitle(line); 0 < len(tl) {
+ if tl := checkTitle(line); len(tl) > 0 {
if tl != ttle.t {
t.Errorf("htmlnotice: unexpected title: got %q, want %q", tl, ttle.t)
}
diff --git a/tools/compliance/cmd/shippedlibs/shippedlibs.go b/tools/compliance/cmd/shippedlibs/shippedlibs.go
index fddc4896ed..94b19f197e 100644
--- a/tools/compliance/cmd/shippedlibs/shippedlibs.go
+++ b/tools/compliance/cmd/shippedlibs/shippedlibs.go
@@ -22,13 +22,13 @@ import (
"io/fs"
"os"
"path/filepath"
+ "strings"
+ "android/soong/response"
"android/soong/tools/compliance"
)
var (
- outputFile = flag.String("o", "-", "Where to write the library list. (default stdout)")
-
failNoneRequested = fmt.Errorf("\nNo license metadata files requested")
failNoLicenses = fmt.Errorf("No licenses found")
)
@@ -40,28 +40,58 @@ type context struct {
}
func init() {
- flag.Usage = func() {
+}
+
+func main() {
+ var expandedArgs []string
+ for _, arg := range os.Args[1:] {
+ if strings.HasPrefix(arg, "@") {
+ f, err := os.Open(strings.TrimPrefix(arg, "@"))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+
+ respArgs, err := response.ReadRspFile(f)
+ f.Close()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Exit(1)
+ }
+ expandedArgs = append(expandedArgs, respArgs...)
+ } else {
+ expandedArgs = append(expandedArgs, arg)
+ }
+ }
+
+ flags := flag.NewFlagSet("flags", flag.ExitOnError)
+
+ outputFile := flags.String("o", "-", "Where to write the library list. (default stdout)")
+
+ flags.Usage = func() {
fmt.Fprintf(os.Stderr, `Usage: %s {options} file.meta_lic {file.meta_lic...}
Outputs a list of libraries used in the shipped images.
Options:
`, filepath.Base(os.Args[0]))
- flag.PrintDefaults()
+ flags.PrintDefaults()
}
-}
-func main() {
- flag.Parse()
+ err := flags.Parse(expandedArgs)
+ if err != nil {
+ flags.Usage()
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ }
// Must specify at least one root target.
- if flag.NArg() == 0 {
- flag.Usage()
+ if flags.NArg() == 0 {
+ flags.Usage()
os.Exit(2)
}
if len(*outputFile) == 0 {
- flag.Usage()
+ flags.Usage()
fmt.Fprintf(os.Stderr, "must specify file for -o; use - for stdout\n")
os.Exit(2)
} else {
@@ -89,10 +119,10 @@ func main() {
ctx := &context{ofile, os.Stderr, os.DirFS(".")}
- err := shippedLibs(ctx, flag.Args()...)
+ err = shippedLibs(ctx, flags.Args()...)
if err != nil {
if err == failNoneRequested {
- flag.Usage()
+ flags.Usage()
}
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(1)
diff --git a/tools/compliance/cmd/textnotice/textnotice.go b/tools/compliance/cmd/textnotice/textnotice.go
index 9e9229f2bd..58afb48d4c 100644
--- a/tools/compliance/cmd/textnotice/textnotice.go
+++ b/tools/compliance/cmd/textnotice/textnotice.go
@@ -192,7 +192,7 @@ func textNotice(ctx *context, files ...string) error {
return fmt.Errorf("Unable to read license text file(s) for %q: %v\n", files, err)
}
- if 0 < len(ctx.title) {
+ if len(ctx.title) > 0 {
fmt.Fprintf(ctx.stdout, "%s\n\n", ctx.title)
}
for h := range ni.Hashes() {
diff --git a/tools/compliance/noticeindex.go b/tools/compliance/noticeindex.go
index 7bebe3d124..f0823837d4 100644
--- a/tools/compliance/noticeindex.go
+++ b/tools/compliance/noticeindex.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/fs"
+ "net/url"
"path/filepath"
"regexp"
"sort"
@@ -93,13 +94,14 @@ func IndexLicenseTexts(rootFS fs.FS, lg *LicenseGraph, rs ResolutionSet) (*Notic
}
hashes := make(map[hash]struct{})
for _, text := range tn.LicenseTexts() {
- if _, ok := ni.hash[text]; !ok {
- err := ni.addText(text)
+ fname := strings.SplitN(text, ":", 2)[0]
+ if _, ok := ni.hash[fname]; !ok {
+ err := ni.addText(fname)
if err != nil {
return nil, err
}
}
- hash := ni.hash[text]
+ hash := ni.hash[fname]
if _, ok := hashes[hash]; !ok {
hashes[hash] = struct{}{}
}
@@ -108,11 +110,12 @@ func IndexLicenseTexts(rootFS fs.FS, lg *LicenseGraph, rs ResolutionSet) (*Notic
return hashes, nil
}
- link := func(libName string, hashes map[hash]struct{}, installPaths []string) {
- if _, ok := ni.libHash[libName]; !ok {
- ni.libHash[libName] = make(map[hash]struct{})
- }
+ link := func(tn *TargetNode, hashes map[hash]struct{}, installPaths []string) {
for h := range hashes {
+ libName := ni.getLibName(tn, h)
+ if _, ok := ni.libHash[libName]; !ok {
+ ni.libHash[libName] = make(map[hash]struct{})
+ }
if _, ok := ni.hashLibInstall[h]; !ok {
ni.hashLibInstall[h] = make(map[string]map[string]struct{})
}
@@ -160,7 +163,7 @@ func IndexLicenseTexts(rootFS fs.FS, lg *LicenseGraph, rs ResolutionSet) (*Notic
if err != nil {
return false
}
- link(ni.getLibName(tn), hashes, installPaths)
+ link(tn, hashes, installPaths)
if tn.IsContainer() {
return true
}
@@ -170,7 +173,7 @@ func IndexLicenseTexts(rootFS fs.FS, lg *LicenseGraph, rs ResolutionSet) (*Notic
if err != nil {
return false
}
- link(ni.getLibName(r.actsOn), hashes, installPaths)
+ link(r.actsOn, hashes, installPaths)
}
return false
})
@@ -305,7 +308,31 @@ func (ni *NoticeIndex) HashText(h hash) []byte {
}
// getLibName returns the name of the library associated with `noticeFor`.
-func (ni *NoticeIndex) getLibName(noticeFor *TargetNode) string {
+func (ni *NoticeIndex) getLibName(noticeFor *TargetNode, h hash) string {
+ for _, text := range noticeFor.LicenseTexts() {
+ if !strings.Contains(text, ":") {
+ if ni.hash[text].key != h.key {
+ continue
+ }
+ ln := ni.checkMetadataForLicenseText(noticeFor, text)
+ if len(ln) > 0 {
+ return ln
+ }
+ continue
+ }
+
+ fields := strings.SplitN(text, ":", 2)
+ fname, pname := fields[0], fields[1]
+ if ni.hash[fname].key != h.key {
+ continue
+ }
+
+ ln, err := url.QueryUnescape(pname)
+ if err != nil {
+ continue
+ }
+ return ln
+ }
// use name from METADATA if available
ln := ni.checkMetadata(noticeFor)
if len(ln) > 0 {
@@ -322,6 +349,17 @@ func (ni *NoticeIndex) getLibName(noticeFor *TargetNode) string {
if !strings.HasPrefix(licenseText, "prebuilts/") {
continue
}
+ if !strings.Contains(licenseText, ":") {
+ if ni.hash[licenseText].key != h.key {
+ continue
+ }
+ } else {
+ fields := strings.SplitN(licenseText, ":", 2)
+ fname := fields[0]
+ if ni.hash[fname].key != h.key {
+ continue
+ }
+ }
for r, prefix := range SafePrebuiltPrefixes {
match := r.FindString(licenseText)
if len(match) == 0 {
@@ -337,14 +375,14 @@ func (ni *NoticeIndex) getLibName(noticeFor *TargetNode) string {
}
// remove LICENSE or NOTICE or other filename
li := strings.LastIndex(match, "/")
- if 0 < li {
+ if li > 0 {
match = match[:li]
}
// remove *licenses/ path segment and subdirectory if in path
- if offsets := licensesPathRegexp.FindAllStringIndex(match, -1); offsets != nil && 0 < offsets[len(offsets)-1][0] {
+ if offsets := licensesPathRegexp.FindAllStringIndex(match, -1); offsets != nil && offsets[len(offsets)-1][0] > 0 {
match = match[:offsets[len(offsets)-1][0]]
li = strings.LastIndex(match, "/")
- if 0 < li {
+ if li > 0 {
match = match[:li]
}
}
@@ -366,9 +404,13 @@ func (ni *NoticeIndex) getLibName(noticeFor *TargetNode) string {
// strip off [./]meta_lic from license metadata path and extract base name
n := noticeFor.name[:len(noticeFor.name)-9]
li := strings.LastIndex(n, "/")
- if 0 < li {
+ if li > 0 {
n = n[li+1:]
}
+ fi := strings.Index(n, "@")
+ if fi > 0 {
+ n = n[:fi]
+ }
return n
}
@@ -381,65 +423,113 @@ func (ni *NoticeIndex) checkMetadata(noticeFor *TargetNode) string {
}
return name
}
- f, err := ni.rootFS.Open(filepath.Join(p, "METADATA"))
+ name, err := ni.checkMetadataFile(filepath.Join(p, "METADATA"))
if err != nil {
ni.projectName[p] = noProjectName
continue
}
- name := ""
- description := ""
- version := ""
- s := bufio.NewScanner(f)
- for s.Scan() {
- line := s.Text()
- m := nameRegexp.FindStringSubmatch(line)
- if m != nil {
- if 1 < len(m) && m[1] != "" {
- name = m[1]
- }
- if version != "" {
- break
- }
- continue
+ if len(name) == 0 {
+ ni.projectName[p] = noProjectName
+ continue
+ }
+ ni.projectName[p] = name
+ return name
+ }
+ return ""
+}
+
+// checkMetadataForLicenseText
+func (ni *NoticeIndex) checkMetadataForLicenseText(noticeFor *TargetNode, licenseText string) string {
+ p := ""
+ for _, proj := range noticeFor.Projects() {
+ if strings.HasPrefix(licenseText, proj) {
+ p = proj
+ }
+ }
+ if len(p) == 0 {
+ p = filepath.Dir(licenseText)
+ for {
+ fi, err := fs.Stat(ni.rootFS, filepath.Join(p, ".git"))
+ if err == nil && fi.IsDir() {
+ break
}
- m = versionRegexp.FindStringSubmatch(line)
- if m != nil {
- if 1 < len(m) && m[1] != "" {
- version = m[1]
- }
- if name != "" {
- break
- }
+ if strings.Contains(p, "/") && p != "/" {
+ p = filepath.Dir(p)
continue
}
- m = descRegexp.FindStringSubmatch(line)
- if m != nil {
- if 1 < len(m) && m[1] != "" {
- description = m[1]
- }
- }
+ return ""
+ }
+ }
+ if name, ok := ni.projectName[p]; ok {
+ if name == noProjectName {
+ return ""
}
- _ = s.Err()
- _ = f.Close()
- if name != "" {
+ return name
+ }
+ name, err := ni.checkMetadataFile(filepath.Join(p, "METADATA"))
+ if err == nil && len(name) > 0 {
+ ni.projectName[p] = name
+ return name
+ }
+ ni.projectName[p] = noProjectName
+ return ""
+}
+
+// checkMetadataFile tries to look up a library name from a METADATA file at `path`.
+func (ni *NoticeIndex) checkMetadataFile(path string) (string, error) {
+ f, err := ni.rootFS.Open(path)
+ if err != nil {
+ return "", err
+ }
+ name := ""
+ description := ""
+ version := ""
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ m := nameRegexp.FindStringSubmatch(line)
+ if m != nil {
+ if 1 < len(m) && m[1] != "" {
+ name = m[1]
+ }
if version != "" {
- if version[0] == 'v' || version[0] == 'V' {
- ni.projectName[p] = name + "_" + version
- } else {
- ni.projectName[p] = name + "_v_" + version
- }
- } else {
- ni.projectName[p] = name
+ break
}
- return ni.projectName[p]
+ continue
}
- if description != "" {
- ni.projectName[p] = description
- return ni.projectName[p]
+ m = versionRegexp.FindStringSubmatch(line)
+ if m != nil {
+ if 1 < len(m) && m[1] != "" {
+ version = m[1]
+ }
+ if name != "" {
+ break
+ }
+ continue
+ }
+ m = descRegexp.FindStringSubmatch(line)
+ if m != nil {
+ if 1 < len(m) && m[1] != "" {
+ description = m[1]
+ }
}
- ni.projectName[p] = noProjectName
}
- return ""
+ _ = s.Err()
+ _ = f.Close()
+ if name != "" {
+ if version != "" {
+ if version[0] == 'v' || version[0] == 'V' {
+ return name + "_" + version, nil
+ } else {
+ return name + "_v_" + version, nil
+ }
+ }
+ return name, nil
+ }
+ if description != "" {
+ return description, nil
+ }
+ return "", nil
}
// addText reads and indexes the content of a license text file.
@@ -580,7 +670,7 @@ func (l hashList) Swap(i, j int) { (*l.hashes)[i], (*l.hashes)[j] = (*l.hashes)[
// the `j`th element.
func (l hashList) Less(i, j int) bool {
var insti, instj int
- if 0 < len(l.libName) {
+ if len(l.libName) > 0 {
insti = len(l.ni.hashLibInstall[(*l.hashes)[i]][l.libName])
instj = len(l.ni.hashLibInstall[(*l.hashes)[j]][l.libName])
} else {
diff --git a/tools/compliance/readgraph_test.go b/tools/compliance/readgraph_test.go
index db52fb193b..bcf9f39603 100644
--- a/tools/compliance/readgraph_test.go
+++ b/tools/compliance/readgraph_test.go
@@ -94,7 +94,7 @@ func TestReadLicenseGraph(t *testing.T) {
}
return
}
- if 0 < len(tt.expectedError) {
+ if len(tt.expectedError) > 0 {
t.Errorf("unexpected success: got no error, want %q err", tt.expectedError)
return
}
diff --git a/tools/product_debug.py b/tools/product_debug.py
deleted file mode 100755
index ff2657c6d8..0000000000
--- a/tools/product_debug.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-import sys
-
-def break_lines(key, val):
- # these don't get split
- if key in ("PRODUCT_MODEL"):
- return (key,val)
- return (key, "\n".join(val.split()))
-
-def split_line(line):
- words = line.split("=", 1)
- if len(words) == 1:
- return (words[0], "")
- else:
- return (words[0], words[1])
-
-def sort_lines(text):
- lines = text.split()
- lines.sort()
- return "\n".join(lines)
-
-def parse_variables(lines):
- return [split_line(line) for line in lines if line.strip()]
-
-def render_variables(variables):
- variables = dict(variables)
- del variables["FILE"]
- variables = list(variables.iteritems())
- variables.sort(lambda a, b: cmp(a[0], b[0]))
- return ("<table id='variables'>"
- + "\n".join([ "<tr><th>%(key)s</th><td>%(val)s</td></tr>" % { "key": key, "val": val }
- for key,val in variables])
- +"</table>")
-
-def linkify_inherit(variables, text, func_name):
- groups = re.split("(\\$\\(call " + func_name + ",.*\\))", text)
- result = ""
- for i in range(0,len(groups)/2):
- i = i * 2
- result = result + groups[i]
- s = groups[i+1]
- href = s.split(",", 1)[1].strip()[:-1]
- href = href.replace("$(SRC_TARGET_DIR)", "build/target")
- href = ("../" * variables["FILE"].count("/")) + href + ".html"
- result = result + "<a href=\"%s\">%s</a>" % (href,s)
- result = result + groups[-1]
- return result
-
-def render_original(variables, text):
- text = linkify_inherit(variables, text, "inherit-product")
- text = linkify_inherit(variables, text, "inherit-product-if-exists")
- return text
-
-def read_file(fn):
- f = file(fn)
- text = f.read()
- f.close()
- return text
-
-def main(argv):
- # read the variables
- lines = sys.stdin.readlines()
- variables = parse_variables(lines)
-
- # format the variables
- variables = [break_lines(key,val) for key,val in variables]
-
- # now it's a dict
- variables = dict(variables)
-
- sorted_vars = (
- "PRODUCT_COPY_FILES",
- "PRODUCT_PACKAGES",
- "PRODUCT_LOCALES",
- "PRODUCT_PROPERTY_OVERRIDES",
- )
-
- for key in sorted_vars:
- variables[key] = sort_lines(variables[key])
-
- # the original file
- original = read_file(variables["FILE"])
-
- # formatting
- values = dict(variables)
- values.update({
- "variables": render_variables(variables),
- "original": render_original(variables, original),
- })
- print """<html>
-
-
-<head>
- <title>%(FILE)s</title>
- <style type="text/css">
- body {
- font-family: Helvetica, Arial, sans-serif;
- padding-bottom: 20px;
- }
- #variables {
- border-collapse: collapse;
- }
- #variables th, #variables td {
- vertical-align: top;
- text-align: left;
- border-top: 1px solid #c5cdde;
- border-bottom: 1px solid #c5cdde;
- padding: 2px 10px 2px 10px;
- }
- #variables th {
- font-size: 10pt;
- background-color: #e2ecff
- }
- #variables td {
- background-color: #ebf2ff;
- white-space: pre;
- font-size: 10pt;
- }
- #original {
- background-color: #ebf2ff;
- border-top: 1px solid #c5cdde;
- border-bottom: 1px solid #c5cdde;
- padding: 2px 10px 2px 10px;
- white-space: pre;
- font-size: 10pt;
- }
- </style>
-</head>
-<body>
-<h1>%(FILE)s</h1>
-<a href="#Original">Original</a>
-<a href="#Variables">Variables</a>
-<h2><a name="Original"></a>Original</h2>
-<div id="original">%(original)s</div>
-<h2><a name="Variables"></a>Variables</h2>
-%(variables)s
-</body>
-</html>
-""" % values
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 7b2c290ce5..25483f3838 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -558,6 +558,7 @@ python_binary_host {
python_binary_host {
name: "fsverity_manifest_generator",
+ defaults: ["releasetools_binary_defaults"],
srcs: [
"fsverity_manifest_generator.py",
],
@@ -574,6 +575,7 @@ python_binary_host {
python_binary_host {
name: "fsverity_metadata_generator",
+ defaults: ["releasetools_binary_defaults"],
srcs: [
"fsverity_metadata_generator.py",
],
diff --git a/tools/releasetools/OWNERS b/tools/releasetools/OWNERS
index d0b862762d..4ceb6ff07f 100644
--- a/tools/releasetools/OWNERS
+++ b/tools/releasetools/OWNERS
@@ -1,7 +1,6 @@
elsk@google.com
nhdo@google.com
-xunchang@google.com
zhangkelvin@google.com
-per-file merge_*.py = danielnorman@google.com
+per-file *merge_*.py = danielnorman@google.com, jgalmes@google.com, rseymour@google.com
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 7143775333..da7e11a93f 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -64,7 +64,7 @@ import verity_utils
import ota_metadata_pb2
from apex_utils import GetApexInfoFromTargetFiles
-from common import AddCareMapForAbOta
+from common import AddCareMapForAbOta, ZipDelete
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -1034,9 +1034,10 @@ def OptimizeCompressedEntries(zipfile_path):
if zinfo.compress_size > zinfo.file_size * 0.80 and zinfo.compress_type != zipfile.ZIP_STORED:
entries_to_store.append(zinfo)
zfp.extract(zinfo, tmpdir)
+ if len(entries_to_store) == 0:
+ return
# Remove these entries, then re-add them as ZIP_STORED
- common.RunAndCheckOutput(
- ["zip", "-d", zipfile_path] + [entry.filename for entry in entries_to_store])
+ ZipDelete(zipfile_path, [entry.filename for entry in entries_to_store])
with zipfile.ZipFile(zipfile_path, "a", allowZip64=True) as zfp:
for entry in entries_to_store:
zfp.write(os.path.join(tmpdir, entry.filename), entry.filename, compress_type=zipfile.ZIP_STORED)
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 2a39f656df..3f13a4a5de 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -54,7 +54,7 @@ class ApexSigningError(Exception):
class ApexApkSigner(object):
"""Class to sign the apk files and other files in an apex payload image and repack the apex"""
- def __init__(self, apex_path, key_passwords, codename_to_api_level_map, avbtool=None, sign_tool=None):
+ def __init__(self, apex_path, key_passwords, codename_to_api_level_map, avbtool=None, sign_tool=None, fsverity_tool=None):
self.apex_path = apex_path
if not key_passwords:
self.key_passwords = dict()
@@ -65,8 +65,9 @@ class ApexApkSigner(object):
OPTIONS.search_path, "bin", "debugfs_static")
self.avbtool = avbtool if avbtool else "avbtool"
self.sign_tool = sign_tool
+ self.fsverity_tool = fsverity_tool if fsverity_tool else "fsverity"
- def ProcessApexFile(self, apk_keys, payload_key, signing_args=None):
+ def ProcessApexFile(self, apk_keys, payload_key, signing_args=None, is_sepolicy=False, sepolicy_key=None, sepolicy_cert=None):
"""Scans and signs the payload files and repack the apex
Args:
@@ -84,10 +85,14 @@ class ApexApkSigner(object):
self.debugfs_path, 'list', self.apex_path]
entries_names = common.RunAndCheckOutput(list_cmd).split()
apk_entries = [name for name in entries_names if name.endswith('.apk')]
+ sepolicy_entries = []
+ if is_sepolicy:
+ sepolicy_entries = [name for name in entries_names if
+ name.startswith('./etc/SEPolicy') and name.endswith('.zip')]
# No need to sign and repack, return the original apex path.
- if not apk_entries and self.sign_tool is None:
- logger.info('No apk file to sign in %s', self.apex_path)
+ if not apk_entries and not sepolicy_entries and self.sign_tool is None:
+ logger.info('No payload (apk or zip) file to sign in %s', self.apex_path)
return self.apex_path
for entry in apk_entries:
@@ -101,15 +106,16 @@ class ApexApkSigner(object):
logger.warning('Apk path does not contain the intended directory name:'
' %s', entry)
- payload_dir, has_signed_content = self.ExtractApexPayloadAndSignContents(
- apk_entries, apk_keys, payload_key, signing_args)
+ payload_dir, has_signed_content = self.ExtractApexPayloadAndSignContents(apk_entries,
+ apk_keys, payload_key, sepolicy_entries, sepolicy_key, sepolicy_cert, signing_args)
if not has_signed_content:
logger.info('No contents has been signed in %s', self.apex_path)
return self.apex_path
return self.RepackApexPayload(payload_dir, payload_key, signing_args)
- def ExtractApexPayloadAndSignContents(self, apk_entries, apk_keys, payload_key, signing_args):
+ def ExtractApexPayloadAndSignContents(self, apk_entries, apk_keys, payload_key,
+ sepolicy_entries, sepolicy_key, sepolicy_cert, signing_args):
"""Extracts the payload image and signs the containing apk files."""
if not os.path.exists(self.debugfs_path):
raise ApexSigningError(
@@ -141,6 +147,11 @@ class ApexApkSigner(object):
codename_to_api_level_map=self.codename_to_api_level_map)
has_signed_content = True
+ for entry in sepolicy_entries:
+ sepolicy_key = sepolicy_key if sepolicy_key else payload_key
+ self.SignSePolicy(payload_dir, entry, sepolicy_key, sepolicy_cert)
+ has_signed_content = True
+
if self.sign_tool:
logger.info('Signing payload contents in apex %s with %s', self.apex_path, self.sign_tool)
# Pass avbtool to the custom signing tool
@@ -154,6 +165,36 @@ class ApexApkSigner(object):
return payload_dir, has_signed_content
+ def SignSePolicy(self, payload_dir, sepolicy_zip, sepolicy_key, sepolicy_cert):
+ sepolicy_sig = sepolicy_zip + '.sig'
+ sepolicy_fsv_sig = sepolicy_zip + '.fsv_sig'
+
+ policy_zip_path = os.path.join(payload_dir, sepolicy_zip)
+ sig_out_path = os.path.join(payload_dir, sepolicy_sig)
+ sig_old = sig_out_path + '.old'
+ if os.path.exists(sig_out_path):
+ os.rename(sig_out_path, sig_old)
+ sign_cmd = ['openssl', 'dgst', '-sign', sepolicy_key, '-keyform', 'PEM', '-sha256',
+ '-out', sig_out_path, '-binary', policy_zip_path]
+ common.RunAndCheckOutput(sign_cmd)
+ if os.path.exists(sig_old):
+ os.remove(sig_old)
+
+ if not sepolicy_cert:
+ logger.info('No cert provided for SEPolicy, skipping fsverity sign')
+ return
+
+ fsv_sig_out_path = os.path.join(payload_dir, sepolicy_fsv_sig)
+ fsv_sig_old = fsv_sig_out_path + '.old'
+ if os.path.exists(fsv_sig_out_path):
+ os.rename(fsv_sig_out_path, fsv_sig_old)
+
+ fsverity_cmd = [self.fsverity_tool, 'sign', policy_zip_path, fsv_sig_out_path,
+ '--key=' + sepolicy_key, '--cert=' + sepolicy_cert]
+ common.RunAndCheckOutput(fsverity_cmd)
+ if os.path.exists(fsv_sig_old):
+ os.remove(fsv_sig_old)
+
def RepackApexPayload(self, payload_dir, payload_key, signing_args=None):
"""Rebuilds the apex file with the updated payload directory."""
apex_dir = common.MakeTempDir()
@@ -173,7 +214,7 @@ class ApexApkSigner(object):
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
- shutil.rmtree(path)
+ shutil.rmtree(path, ignore_errors=True)
# TODO(xunchang) the signing process can be improved by using
# '--unsigned_payload_only'. But we need to parse the vbmeta earlier for
@@ -324,7 +365,9 @@ def ParseApexPayloadInfo(avbtool, payload_path):
def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
container_pw, apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False, sepolicy_key=None, sepolicy_cert=None,
+ fsverity_tool=None):
"""Signs the current uncompressed APEX with the given payload/container keys.
Args:
@@ -337,6 +380,10 @@ def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
sign_tool: A tool to sign the contents of the APEX.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
+ sepolicy_key: Key to sign a sepolicy zip.
+ sepolicy_cert: Cert to sign a sepolicy zip.
+ fsverity_tool: fsverity path to sign sepolicy zip.
Returns:
The path to the signed APEX file.
@@ -345,8 +392,9 @@ def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
# the apex file after signing.
apk_signer = ApexApkSigner(apex_file, container_pw,
codename_to_api_level_map,
- avbtool, sign_tool)
- apex_file = apk_signer.ProcessApexFile(apk_keys, payload_key, signing_args)
+ avbtool, sign_tool, fsverity_tool)
+ apex_file = apk_signer.ProcessApexFile(
+ apk_keys, payload_key, signing_args, is_sepolicy, sepolicy_key, sepolicy_cert)
# 2a. Extract and sign the APEX_PAYLOAD_IMAGE entry with the given
# payload_key.
@@ -400,7 +448,9 @@ def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
container_pw, apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False, sepolicy_key=None, sepolicy_cert=None,
+ fsverity_tool=None):
"""Signs the current compressed APEX with the given payload/container keys.
Args:
@@ -412,6 +462,10 @@ def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
codename_to_api_level_map: A dict that maps from codename to API level.
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
+ is_sepolicy: Indicates if the apex is a sepolicy.apex
+ sepolicy_key: Key to sign a sepolicy zip.
+ sepolicy_cert: Cert to sign a sepolicy zip.
+ fsverity_tool: fsverity path to sign sepolicy zip.
Returns:
The path to the signed APEX file.
@@ -438,7 +492,11 @@ def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
codename_to_api_level_map,
no_hashtree,
signing_args,
- sign_tool)
+ sign_tool,
+ is_sepolicy,
+ sepolicy_key,
+ sepolicy_cert,
+ fsverity_tool)
# 3. Compress signed original apex.
compressed_apex_file = common.MakeTempFile(prefix='apex-container-',
@@ -466,7 +524,8 @@ def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None, sign_tool=None):
+ no_hashtree, signing_args=None, sign_tool=None,
+ is_sepolicy=False, sepolicy_key=None, sepolicy_cert=None, fsverity_tool=None):
"""Signs the current APEX with the given payload/container keys.
Args:
@@ -478,6 +537,9 @@ def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
codename_to_api_level_map: A dict that maps from codename to API level.
no_hashtree: Don't include hashtree in the signed APEX.
signing_args: Additional args to be passed to the payload signer.
+ sepolicy_key: Key to sign a sepolicy zip.
+ sepolicy_cert: Cert to sign a sepolicy zip.
+ fsverity_tool: fsverity path to sign sepolicy zip.
Returns:
The path to the signed APEX file.
@@ -503,7 +565,11 @@ def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=is_sepolicy,
+ sepolicy_key=sepolicy_key,
+ sepolicy_cert=sepolicy_cert,
+ fsverity_tool=fsverity_tool)
elif apex_type == 'COMPRESSED':
return SignCompressedApex(
avbtool,
@@ -515,7 +581,11 @@ def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=is_sepolicy,
+ sepolicy_key=sepolicy_key,
+ sepolicy_cert=sepolicy_cert,
+ fsverity_tool=fsverity_tool)
else:
# TODO(b/172912232): support signing compressed apex
raise ApexInfoError('Unsupported apex type {}'.format(apex_type))
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 4b5846d2f7..dbd2c6f201 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -705,7 +705,7 @@ def ImagePropFromGlobalDict(glob_dict, mount_point):
if mount_point not in allowed_partitions:
continue
- if mount_point == "system_other":
+ if (mount_point == "system_other") and (dest_prop != "partition_size"):
# Propagate system properties to system_other. They'll get overridden
# after as needed.
copy_prop(src_prop.format("system"), dest_prop)
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 58510a52d2..b395c196d0 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -181,8 +181,5 @@ def main():
if __name__ == '__main__':
try:
main()
- except AssertionError as err:
- print('\n ERROR: %s\n' % (err,))
- sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/check_partition_sizes.py b/tools/releasetools/check_partition_sizes.py
index eaed07e877..738d77d63e 100644
--- a/tools/releasetools/check_partition_sizes.py
+++ b/tools/releasetools/check_partition_sizes.py
@@ -300,8 +300,5 @@ if __name__ == "__main__":
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError:
- logger.exception("\n ERROR:\n")
- sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index 6fc79d2b3a..4a2a905849 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -164,7 +164,7 @@ def GetVintfFileList():
"""
def PathToPatterns(path):
if path[-1] == '/':
- path += '*'
+ path += '**'
# Loop over all the entries in DIR_SEARCH_PATHS and find one where the key
# is a prefix of path. In order to get find the correct prefix, sort the
@@ -286,8 +286,5 @@ if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError:
- logger.exception('\n ERROR:\n')
- sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 686102a58d..9feb8af700 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -498,8 +498,9 @@ class BuildInfo(object):
def GetPartitionBuildProp(self, prop, partition):
"""Returns the inquired build property for the provided partition."""
- # Boot image uses ro.[product.]bootimage instead of boot.
- prop_partition = "bootimage" if partition == "boot" else partition
+ # Boot image and init_boot image uses ro.[product.]bootimage instead of boot.
+ # This comes from the generic ramdisk
+ prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition
# If provided a partition for this property, only look within that
# partition's build.prop.
@@ -1025,7 +1026,8 @@ class PartitionBuildProps(object):
import_path = tokens[1]
if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
- raise ValueError('Unrecognized import path {}'.format(line))
+ logger.warn('Unrecognized import path {}'.format(line))
+ return {}
# We only recognize a subset of import statement that the init process
# supports. And we can loose the restriction based on how the dynamic
@@ -1403,7 +1405,7 @@ def _HasGkiCertificationArgs():
"gki_signing_algorithm" in OPTIONS.info_dict)
-def _GenerateGkiCertificate(image, image_name, partition_name):
+def _GenerateGkiCertificate(image, image_name):
key_path = OPTIONS.info_dict.get("gki_signing_key_path")
algorithm = OPTIONS.info_dict.get("gki_signing_algorithm")
@@ -1432,8 +1434,7 @@ def _GenerateGkiCertificate(image, image_name, partition_name):
if signature_args:
cmd.extend(["--additional_avb_args", signature_args])
- args = OPTIONS.info_dict.get(
- "avb_" + partition_name + "_add_hash_footer_args", "")
+ args = OPTIONS.info_dict.get("avb_boot_add_hash_footer_args", "")
args = args.strip()
if args:
cmd.extend(["--additional_avb_args", args])
@@ -1626,27 +1627,9 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
if args and args.strip():
cmd.extend(shlex.split(args))
- boot_signature = None
- if _HasGkiCertificationArgs():
- # Certify GKI images.
- boot_signature_bytes = b''
- if kernel_path is not None:
- boot_signature_bytes += _GenerateGkiCertificate(
- kernel_path, "generic_kernel", "boot")
- if has_ramdisk:
- boot_signature_bytes += _GenerateGkiCertificate(
- ramdisk_img.name, "generic_ramdisk", "init_boot")
-
- if len(boot_signature_bytes) > 0:
- boot_signature = tempfile.NamedTemporaryFile()
- boot_signature.write(boot_signature_bytes)
- boot_signature.flush()
- cmd.extend(["--boot_signature", boot_signature.name])
- else:
- # Certified GKI boot/init_boot image mustn't set 'mkbootimg_version_args'.
- args = info_dict.get("mkbootimg_version_args")
- if args and args.strip():
- cmd.extend(shlex.split(args))
+ args = info_dict.get("mkbootimg_version_args")
+ if args and args.strip():
+ cmd.extend(shlex.split(args))
if has_ramdisk:
cmd.extend(["--ramdisk", ramdisk_img.name])
@@ -1668,6 +1651,29 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
RunAndCheckOutput(cmd)
+ if _HasGkiCertificationArgs():
+ if not os.path.exists(img.name):
+ raise ValueError("Cannot find GKI boot.img")
+ if kernel_path is None or not os.path.exists(kernel_path):
+ raise ValueError("Cannot find GKI kernel.img")
+
+ # Certify GKI images.
+ boot_signature_bytes = b''
+ boot_signature_bytes += _GenerateGkiCertificate(img.name, "boot")
+ boot_signature_bytes += _GenerateGkiCertificate(
+ kernel_path, "generic_kernel")
+
+ BOOT_SIGNATURE_SIZE = 16 * 1024
+ if len(boot_signature_bytes) > BOOT_SIGNATURE_SIZE:
+ raise ValueError(
+ f"GKI boot_signature size must be <= {BOOT_SIGNATURE_SIZE}")
+ boot_signature_bytes += (
+ b'\0' * (BOOT_SIGNATURE_SIZE - len(boot_signature_bytes)))
+ assert len(boot_signature_bytes) == BOOT_SIGNATURE_SIZE
+
+ with open(img.name, 'ab') as f:
+ f.write(boot_signature_bytes)
+
if (info_dict.get("boot_signer") == "true" and
info_dict.get("verity_key")):
# Hard-code the path as "/boot" for two-step special recovery image (which
@@ -1728,9 +1734,6 @@ def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
ramdisk_img.close()
img.close()
- if boot_signature is not None:
- boot_signature.close()
-
return data
@@ -2244,8 +2247,8 @@ def GetMinSdkVersion(apk_name):
stdoutdata, stderrdata = proc.communicate()
if proc.returncode != 0:
raise ExternalError(
- "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
- proc.returncode, stdoutdata, stderrdata))
+ "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format(
+ apk_name, proc.returncode, stdoutdata, stderrdata))
for line in stdoutdata.split("\n"):
# Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
@@ -2818,6 +2821,9 @@ def ZipDelete(zip_filename, entries):
"""
if isinstance(entries, str):
entries = [entries]
+ # If list is empty, nothing to do
+ if not entries:
+ return
cmd = ["zip", "-d", zip_filename] + entries
RunAndCheckOutput(cmd)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 0b2b1870c1..76da89c865 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -251,8 +251,5 @@ if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- logger.exception('\n ERROR:\n')
- sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index da5e93fb49..6d3ee3f658 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
#
-# Copyright (C) 2019 The Android Open Source Project
+# Copyright (C) 2022 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -72,7 +72,8 @@ Usage: merge_target_files [args]
files package and saves it at this path.
--rebuild_recovery
- Deprecated; does nothing.
+ Copy the recovery image used by non-A/B devices, used when
+ regenerating vendor images with --rebuild-sepolicy.
--allow-duplicate-apkapex-keys
If provided, duplicate APK/APEX keys are ignored and the value from the
@@ -101,8 +102,6 @@ Usage: merge_target_files [args]
If provided, the location of vendor's dexpreopt_config.zip.
"""
-from __future__ import print_function
-
import fnmatch
import glob
import json
@@ -127,7 +126,7 @@ import ota_from_target_files
import sparse_img
import verity_utils
-from common import AddCareMapForAbOta, ExternalError, PARTITIONS_WITH_CARE_MAP
+from common import ExternalError
logger = logging.getLogger(__name__)
@@ -145,7 +144,6 @@ OPTIONS.output_item_list = None
OPTIONS.output_ota = None
OPTIONS.output_img = None
OPTIONS.output_super_empty = None
-# TODO(b/132730255): Remove this option.
OPTIONS.rebuild_recovery = False
# TODO(b/150582573): Remove this option.
OPTIONS.allow_duplicate_apkapex_keys = False
@@ -277,40 +275,26 @@ def write_sorted_data(data, path):
output.write(out_str)
-def extract_items(target_files, target_files_temp_dir, extract_item_list):
- """Extracts items from target files to temporary directory.
-
- This function extracts from the specified target files zip archive into the
- specified temporary directory, the items specified in the extract item list.
+def extract_items(input_zip, output_dir, extract_item_list):
+ """Extracts items in extra_item_list from a zip to a dir."""
- Args:
- target_files: The target files zip archive from which to extract items.
- target_files_temp_dir: The temporary directory where the extracted items
- will land.
- extract_item_list: A list of items to extract.
- """
-
- logger.info('extracting from %s', target_files)
+ logger.info('extracting from %s', input_zip)
# Filter the extract_item_list to remove any items that do not exist in the
# zip file. Otherwise, the extraction step will fail.
- with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zipfile:
- target_files_namelist = target_files_zipfile.namelist()
+ with zipfile.ZipFile(input_zip, allowZip64=True) as input_zipfile:
+ input_namelist = input_zipfile.namelist()
filtered_extract_item_list = []
for pattern in extract_item_list:
- matching_namelist = fnmatch.filter(target_files_namelist, pattern)
+ matching_namelist = fnmatch.filter(input_namelist, pattern)
if not matching_namelist:
logger.warning('no match for %s', pattern)
else:
filtered_extract_item_list.append(pattern)
- # Extract from target_files into target_files_temp_dir the
- # filtered_extract_item_list.
-
- common.UnzipToDir(target_files, target_files_temp_dir,
- filtered_extract_item_list)
+ common.UnzipToDir(input_zip, output_dir, filtered_extract_item_list)
def copy_items(from_dir, to_dir, patterns):
@@ -337,19 +321,9 @@ def copy_items(from_dir, to_dir, patterns):
shutil.copyfile(original_file_path, copied_file_path)
-def validate_config_lists(framework_item_list, framework_misc_info_keys,
- vendor_item_list):
+def validate_config_lists():
"""Performs validations on the merge config lists.
- Args:
- framework_item_list: The list of items to extract from the partial framework
- target files package as is.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- vendor_item_list: The list of items to extract from the partial vendor
- target files package as is.
-
Returns:
False if a validation fails, otherwise true.
"""
@@ -358,8 +332,8 @@ def validate_config_lists(framework_item_list, framework_misc_info_keys,
default_combined_item_set = set(DEFAULT_FRAMEWORK_ITEM_LIST)
default_combined_item_set.update(DEFAULT_VENDOR_ITEM_LIST)
- combined_item_set = set(framework_item_list)
- combined_item_set.update(vendor_item_list)
+ combined_item_set = set(OPTIONS.framework_item_list)
+ combined_item_set.update(OPTIONS.vendor_item_list)
# Check that the merge config lists are not missing any item specified
# by the default config lists.
@@ -375,11 +349,11 @@ def validate_config_lists(framework_item_list, framework_misc_info_keys,
for partition in SINGLE_BUILD_PARTITIONS:
image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
in_framework = (
- any(item.startswith(partition) for item in framework_item_list) or
- image_path in framework_item_list)
+ any(item.startswith(partition) for item in OPTIONS.framework_item_list)
+ or image_path in OPTIONS.framework_item_list)
in_vendor = (
- any(item.startswith(partition) for item in vendor_item_list) or
- image_path in vendor_item_list)
+ any(item.startswith(partition) for item in OPTIONS.vendor_item_list) or
+ image_path in OPTIONS.vendor_item_list)
if in_framework and in_vendor:
logger.error(
'Cannot extract items from %s for both the framework and vendor'
@@ -387,9 +361,8 @@ def validate_config_lists(framework_item_list, framework_misc_info_keys,
' includes %s.', partition, partition)
has_error = True
- if ('dynamic_partition_list'
- in framework_misc_info_keys) or ('super_partition_groups'
- in framework_misc_info_keys):
+ if ('dynamic_partition_list' in OPTIONS.framework_misc_info_keys) or (
+ 'super_partition_groups' in OPTIONS.framework_misc_info_keys):
logger.error('Dynamic partition misc info keys should come from '
'the vendor instance of META/misc_info.txt.')
has_error = True
@@ -397,98 +370,42 @@ def validate_config_lists(framework_item_list, framework_misc_info_keys,
return not has_error
-def process_ab_partitions_txt(framework_target_files_temp_dir,
- vendor_target_files_temp_dir,
- output_target_files_temp_dir):
- """Performs special processing for META/ab_partitions.txt.
-
- This function merges the contents of the META/ab_partitions.txt files from the
- framework directory and the vendor directory, placing the merged result in the
- output directory. The precondition in that the files are already extracted.
- The post condition is that the output META/ab_partitions.txt contains the
- merged content. The format for each ab_partitions.txt is one partition name
- per line. The output file contains the union of the partition names.
+def merge_ab_partitions_txt(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Merges META/ab_partitions.txt.
- Args:
- framework_target_files_temp_dir: The name of a directory containing the
- special items extracted from the framework target files package.
- vendor_target_files_temp_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
+ The output contains the union of the partition names.
"""
-
- framework_ab_partitions_txt = os.path.join(framework_target_files_temp_dir,
- 'META', 'ab_partitions.txt')
-
- vendor_ab_partitions_txt = os.path.join(vendor_target_files_temp_dir, 'META',
- 'ab_partitions.txt')
-
- with open(framework_ab_partitions_txt) as f:
+ with open(os.path.join(framework_meta_dir, 'ab_partitions.txt')) as f:
framework_ab_partitions = f.read().splitlines()
- with open(vendor_ab_partitions_txt) as f:
+ with open(os.path.join(vendor_meta_dir, 'ab_partitions.txt')) as f:
vendor_ab_partitions = f.read().splitlines()
- output_ab_partitions = set(framework_ab_partitions + vendor_ab_partitions)
-
- output_ab_partitions_txt = os.path.join(output_target_files_temp_dir, 'META',
- 'ab_partitions.txt')
-
- write_sorted_data(data=output_ab_partitions, path=output_ab_partitions_txt)
-
+ write_sorted_data(
+ data=set(framework_ab_partitions + vendor_ab_partitions),
+ path=os.path.join(merged_meta_dir, 'ab_partitions.txt'))
-def process_misc_info_txt(framework_target_files_temp_dir,
- vendor_target_files_temp_dir,
- output_target_files_temp_dir,
- framework_misc_info_keys):
- """Performs special processing for META/misc_info.txt.
- This function merges the contents of the META/misc_info.txt files from the
- framework directory and the vendor directory, placing the merged result in the
- output directory. The precondition in that the files are already extracted.
- The post condition is that the output META/misc_info.txt contains the merged
- content.
+def merge_misc_info_txt(framework_meta_dir, vendor_meta_dir, merged_meta_dir):
+ """Merges META/misc_info.txt.
- Args:
- framework_target_files_temp_dir: The name of a directory containing the
- special items extracted from the framework target files package.
- vendor_target_files_temp_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
+ The output contains a combination of key=value pairs from both inputs.
+ Most pairs are taken from the vendor input, while some are taken from
+ the framework input.
"""
- misc_info_path = ['META', 'misc_info.txt']
- framework_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_target_files_temp_dir, *misc_info_path))
-
- # We take most of the misc info from the vendor target files.
-
- merged_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_target_files_temp_dir, *misc_info_path))
-
- # Replace certain values in merged_dict with values from
- # framework_dict.
+ OPTIONS.framework_misc_info = common.LoadDictionaryFromFile(
+ os.path.join(framework_meta_dir, 'misc_info.txt'))
+ OPTIONS.vendor_misc_info = common.LoadDictionaryFromFile(
+ os.path.join(vendor_meta_dir, 'misc_info.txt'))
- for key in framework_misc_info_keys:
- merged_dict[key] = framework_dict[key]
+ # Merged misc info is a combination of vendor misc info plus certain values
+ # from the framework misc info.
- # Merge misc info keys used for Dynamic Partitions.
- if (merged_dict.get('use_dynamic_partitions')
- == 'true') and (framework_dict.get('use_dynamic_partitions') == 'true'):
- merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
- framework_dict=framework_dict, vendor_dict=merged_dict)
- merged_dict.update(merged_dynamic_partitions_dict)
- # Ensure that add_img_to_target_files rebuilds super split images for
- # devices that retrofit dynamic partitions. This flag may have been set to
- # false in the partial builds to prevent duplicate building of super.img.
- merged_dict['build_super_partition'] = 'true'
+ merged_dict = OPTIONS.vendor_misc_info
+ for key in OPTIONS.framework_misc_info_keys:
+ merged_dict[key] = OPTIONS.framework_misc_info[key]
# If AVB is enabled then ensure that we build vbmeta.img.
# Partial builds with AVB enabled may set PRODUCT_BUILD_VBMETA_IMAGE=false to
@@ -496,65 +413,31 @@ def process_misc_info_txt(framework_target_files_temp_dir,
if merged_dict.get('avb_enable') == 'true':
merged_dict['avb_building_vbmeta_image'] = 'true'
- # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
- # depending on which dictionary the key came from.
- # Only the file basename is required because all selinux_fc properties are
- # replaced with the full path to the file under META/ when misc_info.txt is
- # loaded from target files for repacking. See common.py LoadInfoDict().
- for key in merged_dict:
- if key.endswith('_selinux_fc'):
- merged_dict[key] = 'vendor_file_contexts.bin'
- for key in framework_dict:
- if key.endswith('_selinux_fc'):
- merged_dict[key] = 'framework_file_contexts.bin'
-
- output_misc_info_txt = os.path.join(output_target_files_temp_dir, 'META',
- 'misc_info.txt')
- write_sorted_data(data=merged_dict, path=output_misc_info_txt)
-
-
-def process_dynamic_partitions_info_txt(framework_target_files_dir,
- vendor_target_files_dir,
- output_target_files_dir):
- """Performs special processing for META/dynamic_partitions_info.txt.
-
- This function merges the contents of the META/dynamic_partitions_info.txt
- files from the framework directory and the vendor directory, placing the
- merged result in the output directory.
-
- This function does nothing if META/dynamic_partitions_info.txt from the vendor
- directory does not exist.
-
- Args:
- framework_target_files_dir: The name of a directory containing the special
- items extracted from the framework target files package.
- vendor_target_files_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
- """
+ return merged_dict
- if not os.path.exists(
- os.path.join(vendor_target_files_dir, 'META',
- 'dynamic_partitions_info.txt')):
- return
-
- dynamic_partitions_info_path = ['META', 'dynamic_partitions_info.txt']
+def merge_dynamic_partitions_info_txt(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Merge META/dynamic_partitions_info.txt."""
framework_dynamic_partitions_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_target_files_dir, *dynamic_partitions_info_path))
+ os.path.join(framework_meta_dir, 'dynamic_partitions_info.txt'))
vendor_dynamic_partitions_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_target_files_dir, *dynamic_partitions_info_path))
+ os.path.join(vendor_meta_dir, 'dynamic_partitions_info.txt'))
merged_dynamic_partitions_dict = common.MergeDynamicPartitionInfoDicts(
framework_dict=framework_dynamic_partitions_dict,
vendor_dict=vendor_dynamic_partitions_dict)
- output_dynamic_partitions_info_txt = os.path.join(
- output_target_files_dir, 'META', 'dynamic_partitions_info.txt')
write_sorted_data(
data=merged_dynamic_partitions_dict,
- path=output_dynamic_partitions_info_txt)
+ path=os.path.join(merged_meta_dir, 'dynamic_partitions_info.txt'))
+
+ # Merge misc info keys used for Dynamic Partitions.
+ OPTIONS.merged_misc_info.update(merged_dynamic_partitions_dict)
+ # Ensure that add_img_to_target_files rebuilds super split images for
+ # devices that retrofit dynamic partitions. This flag may have been set to
+ # false in the partial builds to prevent duplicate building of super.img.
+ OPTIONS.merged_misc_info['build_super_partition'] = 'true'
def item_list_to_partition_set(item_list):
@@ -586,57 +469,37 @@ def item_list_to_partition_set(item_list):
return partition_set
-def process_apex_keys_apk_certs_common(framework_target_files_dir,
- vendor_target_files_dir,
- output_target_files_dir,
- framework_partition_set,
- vendor_partition_set, file_name):
- """Performs special processing for META/apexkeys.txt or META/apkcerts.txt.
+def merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, merged_meta_dir,
+ file_name):
+ """Merges APK/APEX key list files."""
- This function merges the contents of the META/apexkeys.txt or
- META/apkcerts.txt files from the framework directory and the vendor directory,
- placing the merged result in the output directory. The precondition in that
- the files are already extracted. The post condition is that the output
- META/apexkeys.txt or META/apkcerts.txt contains the merged content.
-
- Args:
- framework_target_files_dir: The name of a directory containing the special
- items extracted from the framework target files package.
- vendor_target_files_dir: The name of a directory containing the special
- items extracted from the vendor target files package.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
- framework_partition_set: Partitions that are considered framework
- partitions. Used to filter apexkeys.txt and apkcerts.txt.
- vendor_partition_set: Partitions that are considered vendor partitions. Used
- to filter apexkeys.txt and apkcerts.txt.
- file_name: The name of the file to merge. One of apkcerts.txt or
- apexkeys.txt.
- """
+ if file_name not in ('apkcerts.txt', 'apexkeys.txt'):
+ raise ExternalError(
+ 'Unexpected file_name provided to merge_package_keys_txt: %s',
+ file_name)
def read_helper(d):
temp = {}
- file_path = os.path.join(d, 'META', file_name)
- with open(file_path) as f:
- for line in f:
- if line.strip():
- name = line.split()[0]
- match = MODULE_KEY_PATTERN.search(name)
- temp[match.group(1)] = line.strip()
+ with open(os.path.join(d, file_name)) as f:
+ for line in f.read().splitlines():
+ line = line.strip()
+ if line:
+ name_search = MODULE_KEY_PATTERN.search(line.split()[0])
+ temp[name_search.group(1)] = line
return temp
- framework_dict = read_helper(framework_target_files_dir)
- vendor_dict = read_helper(vendor_target_files_dir)
+ framework_dict = read_helper(framework_meta_dir)
+ vendor_dict = read_helper(vendor_meta_dir)
merged_dict = {}
def filter_into_merged_dict(item_dict, partition_set):
for key, value in item_dict.items():
- match = PARTITION_TAG_PATTERN.search(value)
+ tag_search = PARTITION_TAG_PATTERN.search(value)
- if match is None:
+ if tag_search is None:
raise ValueError('Entry missing partition tag: %s' % value)
- partition_tag = match.group(1)
+ partition_tag = tag_search.group(1)
if partition_tag in partition_set:
if key in merged_dict:
@@ -649,57 +512,63 @@ def process_apex_keys_apk_certs_common(framework_target_files_dir,
merged_dict[key] = value
- filter_into_merged_dict(framework_dict, framework_partition_set)
- filter_into_merged_dict(vendor_dict, vendor_partition_set)
-
- output_file = os.path.join(output_target_files_dir, 'META', file_name)
+ # Prioritize framework keys first.
+ # Duplicate keys from vendor are an error, or ignored.
+ filter_into_merged_dict(framework_dict, OPTIONS.framework_partition_set)
+ filter_into_merged_dict(vendor_dict, OPTIONS.vendor_partition_set)
# The following code is similar to write_sorted_data, but different enough
# that we couldn't use that function. We need the output to be sorted by the
# basename of the apex/apk (without the ".apex" or ".apk" suffix). This
# allows the sort to be consistent with the framework/vendor input data and
# eases comparison of input data with merged data.
- with open(output_file, 'w') as output:
- for key in sorted(merged_dict.keys()):
- out_str = merged_dict[key] + '\n'
- output.write(out_str)
+ with open(os.path.join(merged_meta_dir, file_name), 'w') as output:
+ for key, value in sorted(merged_dict.items()):
+ output.write(value + '\n')
+
+
+def create_file_contexts_copies(framework_meta_dir, vendor_meta_dir,
+ merged_meta_dir):
+ """Creates named copies of each partial build's file_contexts.bin.
+ Used when regenerating images from the partial build.
+ """
+
+ def copy_fc_file(source_dir, file_name):
+ for name in (file_name, 'file_contexts.bin'):
+ fc_path = os.path.join(source_dir, name)
+ if os.path.exists(fc_path):
+ shutil.copyfile(fc_path, os.path.join(merged_meta_dir, file_name))
+ return
+ raise ValueError('Missing file_contexts file from %s: %s', source_dir,
+ file_name)
-def copy_file_contexts(framework_target_files_dir, vendor_target_files_dir,
- output_target_files_dir):
- """Creates named copies of each build's file_contexts.bin in output META/."""
- framework_fc_path = os.path.join(framework_target_files_dir, 'META',
- 'framework_file_contexts.bin')
- if not os.path.exists(framework_fc_path):
- framework_fc_path = os.path.join(framework_target_files_dir, 'META',
- 'file_contexts.bin')
- if not os.path.exists(framework_fc_path):
- raise ValueError('Missing framework file_contexts.bin.')
- shutil.copyfile(
- framework_fc_path,
- os.path.join(output_target_files_dir, 'META',
- 'framework_file_contexts.bin'))
-
- vendor_fc_path = os.path.join(vendor_target_files_dir, 'META',
- 'vendor_file_contexts.bin')
- if not os.path.exists(vendor_fc_path):
- vendor_fc_path = os.path.join(vendor_target_files_dir, 'META',
- 'file_contexts.bin')
- if not os.path.exists(vendor_fc_path):
- raise ValueError('Missing vendor file_contexts.bin.')
- shutil.copyfile(
- vendor_fc_path,
- os.path.join(output_target_files_dir, 'META', 'vendor_file_contexts.bin'))
-
-
-def compile_split_sepolicy(product_out, partition_map):
+ copy_fc_file(framework_meta_dir, 'framework_file_contexts.bin')
+ copy_fc_file(vendor_meta_dir, 'vendor_file_contexts.bin')
+
+ # Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
+ # depending on which dictionary the key came from.
+ # Only the file basename is required because all selinux_fc properties are
+ # replaced with the full path to the file under META/ when misc_info.txt is
+ # loaded from target files for repacking. See common.py LoadInfoDict().
+ for key in OPTIONS.vendor_misc_info:
+ if key.endswith('_selinux_fc'):
+ OPTIONS.merged_misc_info[key] = 'vendor_file_contexts.bin'
+ for key in OPTIONS.framework_misc_info:
+ if key.endswith('_selinux_fc'):
+ OPTIONS.merged_misc_info[key] = 'framework_file_contexts.bin'
+
+
+def compile_split_sepolicy(target_files_dir, partition_map):
"""Uses secilc to compile a split sepolicy file.
Depends on various */etc/selinux/* and */etc/vintf/* files within partitions.
Args:
- product_out: PRODUCT_OUT directory, containing partition directories.
- partition_map: A map of partition name -> relative path within product_out.
+ target_files_dir: Extracted directory of target_files, containing partition
+ directories.
+ partition_map: A map of partition name -> relative path within
+ target_files_dir.
Returns:
A command list that can be executed to create the compiled sepolicy.
@@ -710,7 +579,7 @@ def compile_split_sepolicy(product_out, partition_map):
logger.warning('Cannot load SEPolicy files for missing partition %s',
partition)
return None
- return os.path.join(product_out, partition_map[partition], path)
+ return os.path.join(target_files_dir, partition_map[partition], path)
# Load the kernel sepolicy version from the FCM. This is normally provided
# directly to selinux.cpp as a build flag, but is also available in this file.
@@ -734,7 +603,7 @@ def compile_split_sepolicy(product_out, partition_map):
# Use the same flags and arguments as selinux.cpp OpenSplitPolicy().
cmd = ['secilc', '-m', '-M', 'true', '-G', '-N']
cmd.extend(['-c', kernel_sepolicy_version])
- cmd.extend(['-o', os.path.join(product_out, 'META/combined_sepolicy')])
+ cmd.extend(['-o', os.path.join(target_files_dir, 'META/combined_sepolicy')])
cmd.extend(['-f', '/dev/null'])
required_policy_files = (
@@ -765,14 +634,14 @@ def compile_split_sepolicy(product_out, partition_map):
return cmd
-def validate_merged_apex_info(output_target_files_dir, partitions):
+def validate_merged_apex_info(target_files_dir, partitions):
"""Validates the APEX files in the merged target files directory.
Checks the APEX files in all possible preinstalled APEX directories.
Depends on the <partition>/apex/* APEX files within partitions.
Args:
- output_target_files_dir: Output directory containing merged partition
+ target_files_dir: Extracted directory of target_files, containing partition
directories.
partitions: A list of all the partitions in the output directory.
@@ -782,10 +651,10 @@ def validate_merged_apex_info(output_target_files_dir, partitions):
"""
apex_packages = set()
- apex_partitions = ('system', 'system_ext', 'product', 'vendor')
+ apex_partitions = ('system', 'system_ext', 'product', 'vendor', 'odm')
for partition in filter(lambda p: p in apex_partitions, partitions):
apex_info = apex_utils.GetApexInfoFromTargetFiles(
- output_target_files_dir, partition, compressed_only=False)
+ target_files_dir, partition, compressed_only=False)
partition_apex_packages = set([info.package_name for info in apex_info])
duplicates = apex_packages.intersection(partition_apex_packages)
if duplicates:
@@ -795,21 +664,21 @@ def validate_merged_apex_info(output_target_files_dir, partitions):
apex_packages.update(partition_apex_packages)
-def generate_care_map(partitions, output_target_files_dir):
- """Generates a merged META/care_map.pb file in the output target files dir.
+def generate_care_map(partitions, target_files_dir):
+ """Generates a merged META/care_map.pb file in the target files dir.
Depends on the info dict from META/misc_info.txt, as well as built images
within IMAGES/.
Args:
partitions: A list of partitions to potentially include in the care map.
- output_target_files_dir: The name of a directory that will be used to create
- the output target files package after all the special cases are processed.
+ target_files_dir: Extracted directory of target_files, containing partition
+ directories.
"""
- OPTIONS.info_dict = common.LoadInfoDict(output_target_files_dir)
+ OPTIONS.info_dict = common.LoadInfoDict(target_files_dir)
partition_image_map = {}
for partition in partitions:
- image_path = os.path.join(output_target_files_dir, 'IMAGES',
+ image_path = os.path.join(target_files_dir, 'IMAGES',
'{}.img'.format(partition))
if os.path.exists(image_path):
partition_image_map[partition] = image_path
@@ -826,123 +695,77 @@ def generate_care_map(partitions, output_target_files_dir):
image_size = verity_image_builder.CalculateMaxImageSize(partition_size)
OPTIONS.info_dict[image_size_prop] = image_size
- AddCareMapForAbOta(
- os.path.join(output_target_files_dir, 'META', 'care_map.pb'),
- PARTITIONS_WITH_CARE_MAP, partition_image_map)
+def merge_meta_files(temp_dir, merged_dir):
+ """Merges various files in META/*."""
-def process_special_cases(temp_dir, framework_meta, vendor_meta,
- output_target_files_temp_dir,
- framework_misc_info_keys, framework_partition_set,
- vendor_partition_set, framework_dexpreopt_tools,
- framework_dexpreopt_config, vendor_dexpreopt_config):
- """Performs special-case processing for certain target files items.
+ framework_meta_dir = os.path.join(temp_dir, 'framework_meta', 'META')
+ extract_items(
+ input_zip=OPTIONS.framework_target_files,
+ output_dir=os.path.dirname(framework_meta_dir),
+ extract_item_list=('META/*',))
- Certain files in the output target files package require special-case
- processing. This function performs all that special-case processing.
+ vendor_meta_dir = os.path.join(temp_dir, 'vendor_meta', 'META')
+ extract_items(
+ input_zip=OPTIONS.vendor_target_files,
+ output_dir=os.path.dirname(vendor_meta_dir),
+ extract_item_list=('META/*',))
- Args:
- temp_dir: Location containing an 'output' directory where target files have
- been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES, etc.
- framework_meta: The name of a directory containing the special items
- extracted from the framework target files package.
- vendor_meta: The name of a directory containing the special items
- extracted from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- framework_partition_set: Partitions that are considered framework
- partitions. Used to filter apexkeys.txt and apkcerts.txt.
- vendor_partition_set: Partitions that are considered vendor partitions. Used
- to filter apexkeys.txt and apkcerts.txt.
-
- The following are only used if dexpreopt is applied:
-
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
- """
+ merged_meta_dir = os.path.join(merged_dir, 'META')
+
+ # Merge META/misc_info.txt into OPTIONS.merged_misc_info,
+ # but do not write it yet. The following functions may further
+ # modify this dict.
+ OPTIONS.merged_misc_info = merge_misc_info_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ create_file_contexts_copies(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ if OPTIONS.merged_misc_info.get('use_dynamic_partitions') == 'true':
+ merge_dynamic_partitions_info_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ if OPTIONS.merged_misc_info.get('ab_update') == 'true':
+ merge_ab_partitions_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir)
+
+ for file_name in ('apkcerts.txt', 'apexkeys.txt'):
+ merge_package_keys_txt(
+ framework_meta_dir=framework_meta_dir,
+ vendor_meta_dir=vendor_meta_dir,
+ merged_meta_dir=merged_meta_dir,
+ file_name=file_name)
+
+ # Write the now-finalized OPTIONS.merged_misc_info.
+ write_sorted_data(
+ data=OPTIONS.merged_misc_info,
+ path=os.path.join(merged_meta_dir, 'misc_info.txt'))
- if 'ab_update' in framework_misc_info_keys:
- process_ab_partitions_txt(
- framework_target_files_temp_dir=framework_meta,
- vendor_target_files_temp_dir=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir)
-
- copy_file_contexts(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir)
-
- process_misc_info_txt(
- framework_target_files_temp_dir=framework_meta,
- vendor_target_files_temp_dir=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir,
- framework_misc_info_keys=framework_misc_info_keys)
-
- process_dynamic_partitions_info_txt(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir)
-
- process_apex_keys_apk_certs_common(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir,
- framework_partition_set=framework_partition_set,
- vendor_partition_set=vendor_partition_set,
- file_name='apkcerts.txt')
-
- process_apex_keys_apk_certs_common(
- framework_target_files_dir=framework_meta,
- vendor_target_files_dir=vendor_meta,
- output_target_files_dir=output_target_files_temp_dir,
- framework_partition_set=framework_partition_set,
- vendor_partition_set=vendor_partition_set,
- file_name='apexkeys.txt')
- process_dexopt(
- temp_dir=temp_dir,
- framework_meta=framework_meta,
- vendor_meta=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir,
- framework_dexpreopt_tools=framework_dexpreopt_tools,
- framework_dexpreopt_config=framework_dexpreopt_config,
- vendor_dexpreopt_config=vendor_dexpreopt_config)
-
-
-def process_dexopt(temp_dir, framework_meta, vendor_meta,
- output_target_files_temp_dir,
- framework_dexpreopt_tools, framework_dexpreopt_config,
- vendor_dexpreopt_config):
+def process_dexopt(temp_dir, output_target_files_dir):
"""If needed, generates dexopt files for vendor apps.
Args:
temp_dir: Location containing an 'output' directory where target files have
- been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES, etc.
- framework_meta: The name of a directory containing the special items
- extracted from the framework target files package.
- vendor_meta: The name of a directory containing the special items extracted
- from the vendor target files package.
- output_target_files_temp_dir: The name of a directory that will be used to
- create the output target files package after all the special cases are
- processed.
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
+ been extracted, e.g. <temp_dir>/output/SYSTEM, <temp_dir>/output/IMAGES,
+ etc.
+ output_target_files_dir: The name of a directory that will be used to create
+ the output target files package after all the special cases are processed.
"""
# Load vendor and framework META/misc_info.txt.
- misc_info_path = ['META', 'misc_info.txt']
- vendor_misc_info_dict = common.LoadDictionaryFromFile(
- os.path.join(vendor_meta, *misc_info_path))
-
- if (vendor_misc_info_dict.get('building_with_vsdk') != 'true' or
- framework_dexpreopt_tools is None or
- framework_dexpreopt_config is None or
- vendor_dexpreopt_config is None):
+ if (OPTIONS.vendor_misc_info.get('building_with_vsdk') != 'true' or
+ OPTIONS.framework_dexpreopt_tools is None or
+ OPTIONS.framework_dexpreopt_config is None or
+ OPTIONS.vendor_dexpreopt_config is None):
return
logger.info('applying dexpreopt')
@@ -984,26 +807,30 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
# package.vdex
# package.odex
dexpreopt_tools_files_temp_dir = os.path.join(temp_dir, 'tools')
- dexpreopt_framework_config_files_temp_dir = os.path.join(temp_dir, 'system_config')
- dexpreopt_vendor_config_files_temp_dir = os.path.join(temp_dir, 'vendor_config')
+ dexpreopt_framework_config_files_temp_dir = os.path.join(
+ temp_dir, 'system_config')
+ dexpreopt_vendor_config_files_temp_dir = os.path.join(temp_dir,
+ 'vendor_config')
extract_items(
- target_files=OPTIONS.framework_dexpreopt_tools,
- target_files_temp_dir=dexpreopt_tools_files_temp_dir,
+ input_zip=OPTIONS.framework_dexpreopt_tools,
+ output_dir=dexpreopt_tools_files_temp_dir,
extract_item_list=('*',))
extract_items(
- target_files=OPTIONS.framework_dexpreopt_config,
- target_files_temp_dir=dexpreopt_framework_config_files_temp_dir,
+ input_zip=OPTIONS.framework_dexpreopt_config,
+ output_dir=dexpreopt_framework_config_files_temp_dir,
extract_item_list=('*',))
extract_items(
- target_files=OPTIONS.vendor_dexpreopt_config,
- target_files_temp_dir=dexpreopt_vendor_config_files_temp_dir,
+ input_zip=OPTIONS.vendor_dexpreopt_config,
+ output_dir=dexpreopt_vendor_config_files_temp_dir,
extract_item_list=('*',))
- os.symlink(os.path.join(output_target_files_temp_dir, "SYSTEM"),
- os.path.join(temp_dir, "system"))
- os.symlink(os.path.join(output_target_files_temp_dir, "VENDOR"),
- os.path.join(temp_dir, "vendor"))
+ os.symlink(
+ os.path.join(output_target_files_dir, 'SYSTEM'),
+ os.path.join(temp_dir, 'system'))
+ os.symlink(
+ os.path.join(output_target_files_dir, 'VENDOR'),
+ os.path.join(temp_dir, 'vendor'))
# The directory structure for flatteded APEXes is:
#
@@ -1026,12 +853,10 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
# com.android.appsearch.apex
# com.android.art.apex
# ...
- apex_root = os.path.join(output_target_files_temp_dir, "SYSTEM", "apex")
- framework_misc_info_dict = common.LoadDictionaryFromFile(
- os.path.join(framework_meta, *misc_info_path))
+ apex_root = os.path.join(output_target_files_dir, 'SYSTEM', 'apex')
# Check for flattended versus updatable APEX.
- if framework_misc_info_dict.get('target_flatten_apex') == 'false':
+ if OPTIONS.framework_misc_info.get('target_flatten_apex') == 'false':
# Extract APEX.
logging.info('extracting APEX')
@@ -1094,13 +919,14 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
dex_img = 'VENDOR'
# Open vendor_filesystem_config to append the items generated by dexopt.
vendor_file_system_config = open(
- os.path.join(temp_dir, 'output', 'META', 'vendor_filesystem_config.txt'),
- 'a')
+ os.path.join(temp_dir, 'output', 'META',
+ 'vendor_filesystem_config.txt'), 'a')
# Dexpreopt vendor apps.
dexpreopt_config_suffix = '_dexpreopt.config'
- for config in glob.glob(os.path.join(
- dexpreopt_vendor_config_files_temp_dir, '*' + dexpreopt_config_suffix)):
+ for config in glob.glob(
+ os.path.join(dexpreopt_vendor_config_files_temp_dir,
+ '*' + dexpreopt_config_suffix)):
app = os.path.basename(config)[:-len(dexpreopt_config_suffix)]
logging.info('dexpreopt config: %s %s', config, app)
@@ -1110,8 +936,9 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
apk_dir = 'priv-app'
apk_path = os.path.join(temp_dir, 'vendor', apk_dir, app, app + '.apk')
if not os.path.exists(apk_path):
- logging.warning('skipping dexpreopt for %s, no apk found in vendor/app '
- 'or vendor/priv-app', app)
+ logging.warning(
+ 'skipping dexpreopt for %s, no apk found in vendor/app '
+ 'or vendor/priv-app', app)
continue
# Generate dexpreopting script. Note 'out_dir' is not the output directory
@@ -1121,10 +948,11 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
command = [
os.path.join(dexpreopt_tools_files_temp_dir, 'dexpreopt_gen'),
'-global',
- os.path.join(dexpreopt_framework_config_files_temp_dir, 'dexpreopt.config'),
+ os.path.join(dexpreopt_framework_config_files_temp_dir,
+ 'dexpreopt.config'),
'-global_soong',
- os.path.join(
- dexpreopt_framework_config_files_temp_dir, 'dexpreopt_soong.config'),
+ os.path.join(dexpreopt_framework_config_files_temp_dir,
+ 'dexpreopt_soong.config'),
'-module',
config,
'-dexpreopt_script',
@@ -1137,13 +965,13 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
]
# Run the command from temp_dir so all tool paths are its descendants.
- logging.info("running %s", command)
- subprocess.check_call(command, cwd = temp_dir)
+ logging.info('running %s', command)
+ subprocess.check_call(command, cwd=temp_dir)
# Call the generated script.
command = ['sh', 'dexpreopt_app.sh', apk_path]
- logging.info("running %s", command)
- subprocess.check_call(command, cwd = temp_dir)
+ logging.info('running %s', command)
+ subprocess.check_call(command, cwd=temp_dir)
# Output files are in:
#
@@ -1171,14 +999,17 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
# TODO(b/188179859): Support for other architectures.
arch = 'arm64'
- dex_destination = os.path.join(temp_dir, 'output', dex_img, apk_dir, app, 'oat', arch)
+ dex_destination = os.path.join(temp_dir, 'output', dex_img, apk_dir, app,
+ 'oat', arch)
os.makedirs(dex_destination)
- dex2oat_path = os.path.join(
- temp_dir, 'out', 'dex2oat_result', 'vendor', apk_dir, app, 'oat', arch)
- shutil.copy(os.path.join(dex2oat_path, 'package.vdex'),
- os.path.join(dex_destination, app + '.vdex'))
- shutil.copy(os.path.join(dex2oat_path, 'package.odex'),
- os.path.join(dex_destination, app + '.odex'))
+ dex2oat_path = os.path.join(temp_dir, 'out', 'dex2oat_result', 'vendor',
+ apk_dir, app, 'oat', arch)
+ shutil.copy(
+ os.path.join(dex2oat_path, 'package.vdex'),
+ os.path.join(dex_destination, app + '.vdex'))
+ shutil.copy(
+ os.path.join(dex2oat_path, 'package.odex'),
+ os.path.join(dex_destination, app + '.odex'))
# Append entries to vendor_file_system_config.txt, such as:
#
@@ -1192,8 +1023,10 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
vendor_file_system_config.writelines([
vendor_app_prefix + ' 0 2000 755 ' + selabel + '\n',
vendor_app_prefix + '/' + arch + ' 0 2000 755 ' + selabel + '\n',
- vendor_app_prefix + '/' + arch + '/' + app + '.odex 0 0 644 ' + selabel + '\n',
- vendor_app_prefix + '/' + arch + '/' + app + '.vdex 0 0 644 ' + selabel + '\n',
+ vendor_app_prefix + '/' + arch + '/' + app + '.odex 0 0 644 ' +
+ selabel + '\n',
+ vendor_app_prefix + '/' + arch + '/' + app + '.vdex 0 0 644 ' +
+ selabel + '\n',
])
if not use_system_other_odex:
@@ -1202,47 +1035,15 @@ def process_dexopt(temp_dir, framework_meta, vendor_meta,
# TODO(b/188179859): Rebuilding a vendor image in GRF mode (e.g., T(framework)
# and S(vendor) may require logic similar to that in
# rebuild_image_with_sepolicy.
- vendor_img = os.path.join(output_target_files_temp_dir, 'IMAGES', 'vendor.img')
+ vendor_img = os.path.join(output_target_files_dir, 'IMAGES', 'vendor.img')
if os.path.exists(vendor_img):
logging.info('Deleting %s', vendor_img)
os.remove(vendor_img)
-def create_merged_package(temp_dir, framework_target_files, framework_item_list,
- vendor_target_files, vendor_item_list,
- framework_misc_info_keys, rebuild_recovery,
- framework_dexpreopt_tools, framework_dexpreopt_config,
- vendor_dexpreopt_config):
+def create_merged_package(temp_dir):
"""Merges two target files packages into one target files structure.
- Args:
- temp_dir: The name of a directory we use when we extract items from the
- input target files packages, and also a scratch directory that we use for
- temporary files.
- framework_target_files: The name of the zip archive containing the framework
- partial target files package.
- framework_item_list: The list of items to extract from the partial framework
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial framework
- target files package.
- vendor_target_files: The name of the zip archive containing the vendor
- partial target files package.
- vendor_item_list: The list of items to extract from the partial vendor
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial vendor
- target files package.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
- devices and write it to the system image.
-
- The following are only used if dexpreopt is applied:
-
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
-
Returns:
Path to merged package under temp directory.
"""
@@ -1252,53 +1053,27 @@ def create_merged_package(temp_dir, framework_target_files, framework_item_list,
output_target_files_temp_dir = os.path.join(temp_dir, 'output')
extract_items(
- target_files=framework_target_files,
- target_files_temp_dir=output_target_files_temp_dir,
- extract_item_list=framework_item_list)
+ input_zip=OPTIONS.framework_target_files,
+ output_dir=output_target_files_temp_dir,
+ extract_item_list=OPTIONS.framework_item_list)
extract_items(
- target_files=vendor_target_files,
- target_files_temp_dir=output_target_files_temp_dir,
- extract_item_list=vendor_item_list)
+ input_zip=OPTIONS.vendor_target_files,
+ output_dir=output_target_files_temp_dir,
+ extract_item_list=OPTIONS.vendor_item_list)
# Perform special case processing on META/* items.
# After this function completes successfully, all the files we need to create
# the output target files package are in place.
- framework_meta = os.path.join(temp_dir, 'framework_meta')
- vendor_meta = os.path.join(temp_dir, 'vendor_meta')
- extract_items(
- target_files=framework_target_files,
- target_files_temp_dir=framework_meta,
- extract_item_list=('META/*',))
- extract_items(
- target_files=vendor_target_files,
- target_files_temp_dir=vendor_meta,
- extract_item_list=('META/*',))
- process_special_cases(
- temp_dir=temp_dir,
- framework_meta=framework_meta,
- vendor_meta=vendor_meta,
- output_target_files_temp_dir=output_target_files_temp_dir,
- framework_misc_info_keys=framework_misc_info_keys,
- framework_partition_set=item_list_to_partition_set(framework_item_list),
- vendor_partition_set=item_list_to_partition_set(vendor_item_list),
- framework_dexpreopt_tools=framework_dexpreopt_tools,
- framework_dexpreopt_config=framework_dexpreopt_config,
- vendor_dexpreopt_config=vendor_dexpreopt_config)
+ merge_meta_files(temp_dir=temp_dir, merged_dir=output_target_files_temp_dir)
- return output_target_files_temp_dir
+ process_dexopt(
+ temp_dir=temp_dir, output_target_files_dir=output_target_files_temp_dir)
+ return output_target_files_temp_dir
-def generate_images(target_files_dir, rebuild_recovery):
- """Generate images from target files.
- This function takes merged output temporary directory and create images
- from it.
-
- Args:
- target_files_dir: Path to merged temp directory.
- rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
- devices and write it to the system image.
- """
+def generate_missing_images(target_files_dir):
+ """Generate any missing images from target files."""
# Regenerate IMAGES in the target directory.
@@ -1306,33 +1081,24 @@ def generate_images(target_files_dir, rebuild_recovery):
'--verbose',
'--add_missing',
]
- # TODO(b/132730255): Remove this if statement.
- if rebuild_recovery:
+ if OPTIONS.rebuild_recovery:
add_img_args.append('--rebuild_recovery')
add_img_args.append(target_files_dir)
add_img_to_target_files.main(add_img_args)
-def rebuild_image_with_sepolicy(target_files_dir,
- vendor_otatools=None,
- vendor_target_files=None):
+def rebuild_image_with_sepolicy(target_files_dir):
"""Rebuilds odm.img or vendor.img to include merged sepolicy files.
If odm is present then odm is preferred -- otherwise vendor is used.
-
- Args:
- target_files_dir: Path to the extracted merged target-files package.
- vendor_otatools: If not None, path to an otatools.zip from the vendor build
- that is used when recompiling the image.
- vendor_target_files: Expected if vendor_otatools is not None. Path to the
- vendor target-files zip.
"""
partition = 'vendor'
if os.path.exists(os.path.join(target_files_dir, 'ODM')) or os.path.exists(
os.path.join(target_files_dir, 'IMAGES/odm.img')):
partition = 'odm'
partition_img = '{}.img'.format(partition)
+ partition_map = '{}.map'.format(partition)
logger.info('Recompiling %s using the merged sepolicy files.', partition_img)
@@ -1360,54 +1126,74 @@ def rebuild_image_with_sepolicy(target_files_dir,
copy_selinux_file('PRODUCT/etc/selinux/product_sepolicy_and_mapping.sha256',
'precompiled_sepolicy.product_sepolicy_and_mapping.sha256')
- if not vendor_otatools:
+ if not OPTIONS.vendor_otatools:
# Remove the partition from the merged target-files archive. It will be
- # rebuilt later automatically by generate_images().
+ # rebuilt later automatically by generate_missing_images().
os.remove(os.path.join(target_files_dir, 'IMAGES', partition_img))
- else:
- # TODO(b/192253131): Remove the need for vendor_otatools by fixing
- # backwards-compatibility issues when compiling images on R from S+.
- if not vendor_target_files:
- raise ValueError(
- 'Expected vendor_target_files if vendor_otatools is not None.')
- logger.info(
- '%s recompilation will be performed using the vendor otatools.zip',
- partition_img)
-
- # Unzip the vendor build's otatools.zip and target-files archive.
- vendor_otatools_dir = common.MakeTempDir(
- prefix='merge_target_files_vendor_otatools_')
- vendor_target_files_dir = common.MakeTempDir(
- prefix='merge_target_files_vendor_target_files_')
- common.UnzipToDir(vendor_otatools, vendor_otatools_dir)
- common.UnzipToDir(vendor_target_files, vendor_target_files_dir)
-
- # Copy the partition contents from the merged target-files archive to the
- # vendor target-files archive.
- shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
- shutil.copytree(
- os.path.join(target_files_dir, partition.upper()),
- os.path.join(vendor_target_files_dir, partition.upper()),
- symlinks=True)
-
- # Delete then rebuild the partition.
- os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
- rebuild_partition_command = [
- os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
- '--verbose',
- '--add_missing',
- vendor_target_files_dir,
- ]
- logger.info('Recompiling %s: %s', partition_img,
- ' '.join(rebuild_partition_command))
- common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
+ return
- # Move the newly-created image to the merged target files dir.
- if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
- os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
- shutil.move(
- os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
- os.path.join(target_files_dir, 'IMAGES', partition_img))
+ # TODO(b/192253131): Remove the need for vendor_otatools by fixing
+ # backwards-compatibility issues when compiling images across releases.
+ if not OPTIONS.vendor_target_files:
+ raise ValueError(
+ 'Expected vendor_target_files if vendor_otatools is not None.')
+ logger.info(
+ '%s recompilation will be performed using the vendor otatools.zip',
+ partition_img)
+
+ # Unzip the vendor build's otatools.zip and target-files archive.
+ vendor_otatools_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_otatools_')
+ vendor_target_files_dir = common.MakeTempDir(
+ prefix='merge_target_files_vendor_target_files_')
+ common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
+ common.UnzipToDir(OPTIONS.vendor_target_files, vendor_target_files_dir)
+
+ # Copy the partition contents from the merged target-files archive to the
+ # vendor target-files archive.
+ shutil.rmtree(os.path.join(vendor_target_files_dir, partition.upper()))
+ shutil.copytree(
+ os.path.join(target_files_dir, partition.upper()),
+ os.path.join(vendor_target_files_dir, partition.upper()),
+ symlinks=True)
+
+ # Delete then rebuild the partition.
+ os.remove(os.path.join(vendor_target_files_dir, 'IMAGES', partition_img))
+ rebuild_partition_command = [
+ os.path.join(vendor_otatools_dir, 'bin', 'add_img_to_target_files'),
+ '--verbose',
+ '--add_missing',
+ ]
+ if OPTIONS.rebuild_recovery:
+ rebuild_partition_command.append('--rebuild_recovery')
+ rebuild_partition_command.append(vendor_target_files_dir)
+ logger.info('Recompiling %s: %s', partition_img,
+ ' '.join(rebuild_partition_command))
+ common.RunAndCheckOutput(rebuild_partition_command, verbose=True)
+
+ # Move the newly-created image to the merged target files dir.
+ if not os.path.exists(os.path.join(target_files_dir, 'IMAGES')):
+ os.makedirs(os.path.join(target_files_dir, 'IMAGES'))
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_img),
+ os.path.join(target_files_dir, 'IMAGES', partition_img))
+ shutil.move(
+ os.path.join(vendor_target_files_dir, 'IMAGES', partition_map),
+ os.path.join(target_files_dir, 'IMAGES', partition_map))
+
+ def copy_recovery_file(filename):
+ for subdir in ('VENDOR', 'SYSTEM/vendor'):
+ source = os.path.join(vendor_target_files_dir, subdir, filename)
+ if os.path.exists(source):
+ dest = os.path.join(target_files_dir, subdir, filename)
+ shutil.copy(source, dest)
+ return
+ logger.info('Skipping copy_recovery_file for %s, file not found', filename)
+
+ if OPTIONS.rebuild_recovery:
+ copy_recovery_file('etc/recovery.img')
+ copy_recovery_file('bin/install-recovery.sh')
+ copy_recovery_file('recovery-from-boot.p')
def generate_super_empty_image(target_dir, output_super_empty):
@@ -1442,16 +1228,15 @@ def generate_super_empty_image(target_dir, output_super_empty):
shutil.copyfile(super_empty_img, output_super_empty)
-def create_target_files_archive(output_file, source_dir, temp_dir):
- """Creates archive from target package.
+def create_target_files_archive(output_zip, source_dir, temp_dir):
+ """Creates a target_files zip archive from the input source dir.
Args:
- output_file: The name of the zip archive target files package.
+ output_zip: The name of the zip archive target files package.
source_dir: The target directory contains package to be archived.
temp_dir: Path to temporary directory for any intermediate files.
"""
output_target_files_list = os.path.join(temp_dir, 'output.list')
- output_zip = os.path.abspath(output_file)
output_target_files_meta_dir = os.path.join(source_dir, 'META')
def files_from_path(target_path, extra_args=None):
@@ -1463,6 +1248,9 @@ def create_target_files_archive(output_file, source_dir, temp_dir):
stdin=find_process.stdout,
verbose=False)
+ # META content appears first in the zip. This is done by the
+ # standard build system for optimized extraction of those files,
+ # so we do the same step for merged target_files.zips here too.
meta_content = files_from_path(output_target_files_meta_dir)
other_content = files_from_path(
source_dir,
@@ -1476,30 +1264,22 @@ def create_target_files_archive(output_file, source_dir, temp_dir):
'soong_zip',
'-d',
'-o',
- output_zip,
+ os.path.abspath(output_zip),
'-C',
source_dir,
'-r',
output_target_files_list,
]
- logger.info('creating %s', output_file)
+ logger.info('creating %s', output_zip)
common.RunAndCheckOutput(command, verbose=True)
- logger.info('finished creating %s', output_file)
-
- return output_zip
+ logger.info('finished creating %s', output_zip)
-def merge_target_files(temp_dir, framework_target_files, framework_item_list,
- framework_misc_info_keys, vendor_target_files,
- vendor_item_list, output_target_files, output_dir,
- output_item_list, output_ota, output_img,
- output_super_empty, rebuild_recovery, vendor_otatools,
- rebuild_sepolicy, framework_dexpreopt_tools,
- framework_dexpreopt_config, vendor_dexpreopt_config):
+def merge_target_files(temp_dir):
"""Merges two target files packages together.
- This function takes framework and vendor target files packages as input,
+ This function uses framework and vendor target files packages as input,
performs various file extractions, special case processing, and finally
creates a merged zip archive as output.
@@ -1507,50 +1287,13 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list,
temp_dir: The name of a directory we use when we extract items from the
input target files packages, and also a scratch directory that we use for
temporary files.
- framework_target_files: The name of the zip archive containing the framework
- partial target files package.
- framework_item_list: The list of items to extract from the partial framework
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial framework
- target files package.
- framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys should come from the
- vendor instance.
- vendor_target_files: The name of the zip archive containing the vendor
- partial target files package.
- vendor_item_list: The list of items to extract from the partial vendor
- target files package as is, meaning these items will land in the output
- target files package exactly as they appear in the input partial vendor
- target files package.
- output_target_files: The name of the output zip archive target files package
- created by merging framework and vendor.
- output_dir: The destination directory for saving merged files.
- output_item_list: The list of items to copy into the output_dir.
- output_ota: The name of the output zip archive ota package.
- output_img: The name of the output zip archive img package.
- output_super_empty: If provided, creates a super_empty.img file from the
- merged target files package and saves it at this path.
- rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
- devices and write it to the system image.
- vendor_otatools: Path to an otatools zip used for recompiling vendor images.
- rebuild_sepolicy: If true, rebuild odm.img (if target uses ODM) or
- vendor.img using a merged precompiled_sepolicy file.
-
- The following are only used if dexpreopt is applied:
-
- framework_dexpreopt_tools: Location of dexpreopt_tools.zip.
- framework_dexpreopt_config: Location of framework's dexpreopt_config.zip.
- vendor_dexpreopt_config: Location of vendor's dexpreopt_config.zip.
"""
logger.info('starting: merge framework %s and vendor %s into output %s',
- framework_target_files, vendor_target_files, output_target_files)
+ OPTIONS.framework_target_files, OPTIONS.vendor_target_files,
+ OPTIONS.output_target_files)
- output_target_files_temp_dir = create_merged_package(
- temp_dir, framework_target_files, framework_item_list,
- vendor_target_files, vendor_item_list, framework_misc_info_keys,
- rebuild_recovery, framework_dexpreopt_tools, framework_dexpreopt_config,
- vendor_dexpreopt_config)
+ output_target_files_temp_dir = create_merged_package(temp_dir)
if not check_target_files_vintf.CheckVintf(output_target_files_temp_dir):
raise RuntimeError('Incompatible VINTF metadata')
@@ -1571,10 +1314,9 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list,
f.write(violation)
# Check for violations across the input builds' partition groups.
- framework_partitions = item_list_to_partition_set(framework_item_list)
- vendor_partitions = item_list_to_partition_set(vendor_item_list)
shareduid_errors = common.SharedUidPartitionViolations(
- json.loads(violation), [framework_partitions, vendor_partitions])
+ json.loads(violation),
+ [OPTIONS.framework_partition_set, OPTIONS.vendor_partition_set])
if shareduid_errors:
for error in shareduid_errors:
logger.error(error)
@@ -1599,42 +1341,44 @@ def merge_target_files(temp_dir, framework_target_files, framework_item_list,
logger.info('Compiling split sepolicy: %s', ' '.join(split_sepolicy_cmd))
common.RunAndCheckOutput(split_sepolicy_cmd)
# Include the compiled policy in an image if requested.
- if rebuild_sepolicy:
- rebuild_image_with_sepolicy(output_target_files_temp_dir, vendor_otatools,
- vendor_target_files)
+ if OPTIONS.rebuild_sepolicy:
+ rebuild_image_with_sepolicy(output_target_files_temp_dir)
# Run validation checks on the pre-installed APEX files.
validate_merged_apex_info(output_target_files_temp_dir, partition_map.keys())
- generate_images(output_target_files_temp_dir, rebuild_recovery)
+ generate_missing_images(output_target_files_temp_dir)
- generate_super_empty_image(output_target_files_temp_dir, output_super_empty)
+ generate_super_empty_image(output_target_files_temp_dir,
+ OPTIONS.output_super_empty)
# Finally, create the output target files zip archive and/or copy the
# output items to the output target files directory.
- if output_dir:
- copy_items(output_target_files_temp_dir, output_dir, output_item_list)
+ if OPTIONS.output_dir:
+ copy_items(output_target_files_temp_dir, OPTIONS.output_dir,
+ OPTIONS.output_item_list)
- if not output_target_files:
+ if not OPTIONS.output_target_files:
return
- # Create the merged META/care_map.pb if A/B update
- if 'ab_update' in framework_misc_info_keys:
+ # Create the merged META/care_map.pb if the device uses A/B updates.
+ if OPTIONS.merged_misc_info.get('ab_update') == 'true':
generate_care_map(partition_map.keys(), output_target_files_temp_dir)
- output_zip = create_target_files_archive(output_target_files,
- output_target_files_temp_dir,
- temp_dir)
+ create_target_files_archive(OPTIONS.output_target_files,
+ output_target_files_temp_dir, temp_dir)
# Create the IMG package from the merged target files package.
- if output_img:
- img_from_target_files.main([output_zip, output_img])
+ if OPTIONS.output_img:
+ img_from_target_files.main(
+ [OPTIONS.output_target_files, OPTIONS.output_img])
# Create the OTA package from the merged target files package.
- if output_ota:
- ota_from_target_files.main([output_zip, output_ota])
+ if OPTIONS.output_ota:
+ ota_from_target_files.main(
+ [OPTIONS.output_target_files, OPTIONS.output_ota])
def call_func_with_temp_dir(func, keep_tmp):
@@ -1715,7 +1459,7 @@ def main():
OPTIONS.output_img = a
elif o == '--output-super-empty':
OPTIONS.output_super_empty = a
- elif o == '--rebuild_recovery': # TODO(b/132730255): Warn
+ elif o == '--rebuild_recovery':
OPTIONS.rebuild_recovery = True
elif o == '--allow-duplicate-apkapex-keys':
OPTIONS.allow_duplicate_apkapex_keys = True
@@ -1770,57 +1514,42 @@ def main():
if (args or OPTIONS.framework_target_files is None or
OPTIONS.vendor_target_files is None or
(OPTIONS.output_target_files is None and OPTIONS.output_dir is None) or
- (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None)):
+ (OPTIONS.output_dir is not None and OPTIONS.output_item_list is None) or
+ (OPTIONS.rebuild_recovery and not OPTIONS.rebuild_sepolicy)):
common.Usage(__doc__)
sys.exit(1)
if OPTIONS.framework_item_list:
- framework_item_list = common.LoadListFromFile(OPTIONS.framework_item_list)
+ OPTIONS.framework_item_list = common.LoadListFromFile(
+ OPTIONS.framework_item_list)
else:
- framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
+ OPTIONS.framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
+ OPTIONS.framework_partition_set = item_list_to_partition_set(
+ OPTIONS.framework_item_list)
if OPTIONS.framework_misc_info_keys:
- framework_misc_info_keys = common.LoadListFromFile(
+ OPTIONS.framework_misc_info_keys = common.LoadListFromFile(
OPTIONS.framework_misc_info_keys)
else:
- framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
+ OPTIONS.framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
if OPTIONS.vendor_item_list:
- vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
+ OPTIONS.vendor_item_list = common.LoadListFromFile(OPTIONS.vendor_item_list)
else:
- vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
+ OPTIONS.vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
+ OPTIONS.vendor_partition_set = item_list_to_partition_set(
+ OPTIONS.vendor_item_list)
if OPTIONS.output_item_list:
- output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
+ OPTIONS.output_item_list = common.LoadListFromFile(OPTIONS.output_item_list)
else:
- output_item_list = None
+ OPTIONS.output_item_list = None
- if not validate_config_lists(
- framework_item_list=framework_item_list,
- framework_misc_info_keys=framework_misc_info_keys,
- vendor_item_list=vendor_item_list):
+ if not validate_config_lists():
sys.exit(1)
- call_func_with_temp_dir(
- lambda temp_dir: merge_target_files(
- temp_dir=temp_dir,
- framework_target_files=OPTIONS.framework_target_files,
- framework_item_list=framework_item_list,
- framework_misc_info_keys=framework_misc_info_keys,
- vendor_target_files=OPTIONS.vendor_target_files,
- vendor_item_list=vendor_item_list,
- output_target_files=OPTIONS.output_target_files,
- output_dir=OPTIONS.output_dir,
- output_item_list=output_item_list,
- output_ota=OPTIONS.output_ota,
- output_img=OPTIONS.output_img,
- output_super_empty=OPTIONS.output_super_empty,
- rebuild_recovery=OPTIONS.rebuild_recovery,
- vendor_otatools=OPTIONS.vendor_otatools,
- rebuild_sepolicy=OPTIONS.rebuild_sepolicy,
- framework_dexpreopt_tools=OPTIONS.framework_dexpreopt_tools,
- framework_dexpreopt_config=OPTIONS.framework_dexpreopt_config,
- vendor_dexpreopt_config=OPTIONS.vendor_dexpreopt_config), OPTIONS.keep_tmp)
+ call_func_with_temp_dir(lambda temp_dir: merge_target_files(temp_dir),
+ OPTIONS.keep_tmp)
if __name__ == '__main__':
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 88b91734db..93e7042259 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -237,6 +237,10 @@ A/B OTA specific options
--enable_lz4diff
Whether to enable lz4diff feature. Will generate smaller OTA for EROFS but
uses more memory.
+
+ --spl_downgrade
+ Force generate an SPL downgrade OTA. Only needed if target build has an
+ older SPL.
"""
from __future__ import print_function
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 6896f83c58..5d403dc9f4 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -569,7 +569,8 @@ class PropertyFiles(object):
tokens.append('metadata.pb:' + ' ' * 15)
else:
tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
- tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
+ if METADATA_PROTO_NAME in zip_file.namelist():
+ tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME))
return ','.join(tokens)
diff --git a/tools/releasetools/sign_apex.py b/tools/releasetools/sign_apex.py
index 66f5e0513a..722359b2d2 100755
--- a/tools/releasetools/sign_apex.py
+++ b/tools/releasetools/sign_apex.py
@@ -42,6 +42,15 @@ Usage: sign_apex [flags] input_apex_file output_apex_file
--sign_tool <sign_tool>
Optional flag that specifies a custom signing tool for the contents of the apex.
+
+ --sepolicy_key <key>
+ Optional flag that specifies the sepolicy signing key, defaults to payload_key.
+
+ --sepolicy_cert <cert>
+ Optional flag that specifies the sepolicy signing cert.
+
+ --fsverity_tool <path>
+ Optional flag that specifies the path to fsverity tool to sign SEPolicy, defaults to fsverity.
"""
import logging
@@ -55,7 +64,8 @@ logger = logging.getLogger(__name__)
def SignApexFile(avbtool, apex_file, payload_key, container_key, no_hashtree,
- apk_keys=None, signing_args=None, codename_to_api_level_map=None, sign_tool=None):
+ apk_keys=None, signing_args=None, codename_to_api_level_map=None, sign_tool=None,
+ sepolicy_key=None, sepolicy_cert=None, fsverity_tool=None):
"""Signs the given apex file."""
with open(apex_file, 'rb') as input_fp:
apex_data = input_fp.read()
@@ -70,7 +80,11 @@ def SignApexFile(avbtool, apex_file, payload_key, container_key, no_hashtree,
no_hashtree=no_hashtree,
apk_keys=apk_keys,
signing_args=signing_args,
- sign_tool=sign_tool)
+ sign_tool=sign_tool,
+ is_sepolicy=apex_file.endswith("sepolicy.apex"),
+ sepolicy_key=sepolicy_key,
+ sepolicy_cert=sepolicy_cert,
+ fsverity_tool=fsverity_tool)
def main(argv):
@@ -106,6 +120,12 @@ def main(argv):
options['extra_apks'].update({n: key})
elif o == '--sign_tool':
options['sign_tool'] = a
+ elif o == '--sepolicy_key':
+ options['sepolicy_key'] = a
+ elif o == '--sepolicy_cert':
+ options['sepolicy_cert'] = a
+ elif o == '--fsverity_tool':
+ options['fsverity_tool'] = a
else:
return False
return True
@@ -121,6 +141,9 @@ def main(argv):
'payload_key=',
'extra_apks=',
'sign_tool=',
+ 'sepolicy_key=',
+ 'sepolicy_cert=',
+ 'fsverity_tool='
],
extra_option_handler=option_handler)
@@ -141,7 +164,10 @@ def main(argv):
signing_args=options.get('payload_extra_args'),
codename_to_api_level_map=options.get(
'codename_to_api_level_map', {}),
- sign_tool=options.get('sign_tool', None))
+ sign_tool=options.get('sign_tool', None),
+ sepolicy_key=options.get('sepolicy_key', None),
+ sepolicy_cert=options.get('sepolicy_cert', None),
+ fsverity_tool=options.get('fsverity_tool', None))
shutil.copyfile(signed_apex, args[1])
logger.info("done.")
@@ -149,8 +175,5 @@ def main(argv):
if __name__ == '__main__':
try:
main(sys.argv[1:])
- except common.ExternalError:
- logger.exception("\n ERROR:\n")
- sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 5b16d803cf..054315f4e4 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -688,6 +688,39 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
print(" Rewriting AVB public key of system_other in /product")
common.ZipWrite(output_tf_zip, public_key, filename)
+ # Updates pvmfw embedded public key with the virt APEX payload key.
+ elif filename == "PREBUILT_IMAGES/pvmfw.img":
+ # Find the name of the virt APEX in the target files.
+ namelist = input_tf_zip.namelist()
+ apex_gen = (GetApexFilename(f) for f in namelist if IsApexFile(f))
+ virt_apex_re = re.compile("^com\.([^\.]+\.)?android\.virt\.apex$")
+ virt_apex = next((a for a in apex_gen if virt_apex_re.match(a)), None)
+ if not virt_apex:
+ print("Removing %s from ramdisk: virt APEX not found" % filename)
+ else:
+ print("Replacing %s embedded key with %s key" % (filename, virt_apex))
+ # Get the current and new embedded keys.
+ payload_key, container_key, sign_tool = apex_keys[virt_apex]
+ new_pubkey_path = common.ExtractAvbPublicKey(
+ misc_info['avb_avbtool'], payload_key)
+ with open(new_pubkey_path, 'rb') as f:
+ new_pubkey = f.read()
+ pubkey_info = copy.copy(
+ input_tf_zip.getinfo("PREBUILT_IMAGES/pvmfw_embedded.avbpubkey"))
+ old_pubkey = input_tf_zip.read(pubkey_info.filename)
+ # Validate the keys and image.
+ if len(old_pubkey) != len(new_pubkey):
+ raise common.ExternalError("pvmfw embedded public key size mismatch")
+ pos = data.find(old_pubkey)
+ if pos == -1:
+ raise common.ExternalError("pvmfw embedded public key not found")
+ # Replace the key and copy new files.
+ new_data = data[:pos] + new_pubkey + data[pos+len(old_pubkey):]
+ common.ZipWriteStr(output_tf_zip, out_info, new_data)
+ common.ZipWriteStr(output_tf_zip, pubkey_info, new_pubkey)
+ elif filename == "PREBUILT_IMAGES/pvmfw_embedded.avbpubkey":
+ pass
+
# Should NOT sign boot-debug.img.
elif filename in (
"BOOT/RAMDISK/force_debuggable",
@@ -1244,6 +1277,7 @@ def BuildVendorPartitions(output_zip_path):
logger.info("Building vendor partitions using vendor otatools.")
vendor_tempdir = common.UnzipTemp(output_zip_path, [
"META/*",
+ "SYSTEM/build.prop",
] + ["{}/*".format(p.upper()) for p in OPTIONS.vendor_partitions])
# Disable various partitions that build based on misc_info fields.
@@ -1266,9 +1300,25 @@ def BuildVendorPartitions(output_zip_path):
for key in sorted(vendor_misc_info):
output.write("{}={}\n".format(key, vendor_misc_info[key]))
+ # Disable system partition by a placeholder of IMAGES/system.img,
+ # instead of removing SYSTEM folder.
+ # Because SYSTEM/build.prop is still needed for:
+ # add_img_to_target_files.CreateImage ->
+ # common.BuildInfo ->
+ # common.BuildInfo.CalculateFingerprint
+ vendor_images_path = os.path.join(vendor_tempdir, "IMAGES")
+ if not os.path.exists(vendor_images_path):
+ os.makedirs(vendor_images_path)
+ with open(os.path.join(vendor_images_path, "system.img"), "w") as output:
+ pass
+
# Disable care_map.pb as not all ab_partitions are available when
# vendor otatools regenerates vendor images.
- os.remove(os.path.join(vendor_tempdir, "META/ab_partitions.txt"))
+ if os.path.exists(os.path.join(vendor_tempdir, "META/ab_partitions.txt")):
+ os.remove(os.path.join(vendor_tempdir, "META/ab_partitions.txt"))
+ # Disable RADIO images
+ if os.path.exists(os.path.join(vendor_tempdir, "META/pack_radioimages.txt")):
+ os.remove(os.path.join(vendor_tempdir, "META/pack_radioimages.txt"))
# Build vendor images using vendor otatools.
vendor_otatools_dir = common.MakeTempDir(prefix="vendor_otatools_")
@@ -1276,6 +1326,7 @@ def BuildVendorPartitions(output_zip_path):
cmd = [
os.path.join(vendor_otatools_dir, "bin", "add_img_to_target_files"),
"--is_signing",
+ "--add_missing",
"--verbose",
vendor_tempdir,
]
@@ -1521,8 +1572,5 @@ def main(argv):
if __name__ == '__main__':
try:
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
- raise
finally:
common.Cleanup()
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 7dd365fc39..f9732632f1 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -1642,7 +1642,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
}
test_file = tempfile.NamedTemporaryFile()
self.assertRaises(common.ExternalError, common._GenerateGkiCertificate,
- test_file.name, 'generic_kernel', 'boot')
+ test_file.name, 'generic_kernel')
def test_GenerateGkiCertificate_SearchKeyPathNotFound(self):
pubkey = 'no_testkey_gki.pem'
@@ -1662,7 +1662,7 @@ class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
}
test_file = tempfile.NamedTemporaryFile()
self.assertRaises(common.ExternalError, common._GenerateGkiCertificate,
- test_file.name, 'generic_kernel', 'boot')
+ test_file.name, 'generic_kernel')
class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index 835edab713..088ebeea0c 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -18,18 +18,26 @@ import os.path
import shutil
import common
+import merge_target_files
import test_utils
from merge_target_files import (
validate_config_lists, DEFAULT_FRAMEWORK_ITEM_LIST,
DEFAULT_VENDOR_ITEM_LIST, DEFAULT_FRAMEWORK_MISC_INFO_KEYS, copy_items,
- item_list_to_partition_set, process_apex_keys_apk_certs_common,
- compile_split_sepolicy, validate_merged_apex_info)
+ item_list_to_partition_set, merge_package_keys_txt, compile_split_sepolicy,
+ validate_merged_apex_info)
class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
+ self.OPTIONS = merge_target_files.OPTIONS
+ self.OPTIONS.framework_item_list = DEFAULT_FRAMEWORK_ITEM_LIST
+ self.OPTIONS.framework_misc_info_keys = DEFAULT_FRAMEWORK_MISC_INFO_KEYS
+ self.OPTIONS.vendor_item_list = DEFAULT_VENDOR_ITEM_LIST
+ self.OPTIONS.framework_partition_set = set(
+ ['product', 'system', 'system_ext'])
+ self.OPTIONS.vendor_partition_set = set(['odm', 'vendor'])
def test_copy_items_CopiesItemsMatchingPatterns(self):
@@ -84,76 +92,55 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
os.readlink(os.path.join(output_dir, 'a_link.cpp')), 'a.cpp')
def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self):
- framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- framework_item_list.remove('SYSTEM/*')
- self.assertFalse(
- validate_config_lists(framework_item_list,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- DEFAULT_VENDOR_ITEM_LIST))
+ self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
+ self.OPTIONS.framework_item_list.remove('SYSTEM/*')
+ self.assertFalse(validate_config_lists())
def test_validate_config_lists_ReturnsTrueIfDefaultItemInDifferentList(self):
- framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- framework_item_list.remove('ROOT/*')
- vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- vendor_item_list.append('ROOT/*')
- self.assertTrue(
- validate_config_lists(framework_item_list,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- vendor_item_list))
+ self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
+ self.OPTIONS.framework_item_list.remove('ROOT/*')
+ self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ self.OPTIONS.vendor_item_list.append('ROOT/*')
+ self.assertTrue(validate_config_lists())
def test_validate_config_lists_ReturnsTrueIfExtraItem(self):
- framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
- framework_item_list.append('MY_NEW_PARTITION/*')
- self.assertTrue(
- validate_config_lists(framework_item_list,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- DEFAULT_VENDOR_ITEM_LIST))
+ self.OPTIONS.framework_item_list = list(DEFAULT_FRAMEWORK_ITEM_LIST)
+ self.OPTIONS.framework_item_list.append('MY_NEW_PARTITION/*')
+ self.assertTrue(validate_config_lists())
def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartition(self):
- vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- vendor_item_list.append('SYSTEM/my_system_file')
- self.assertFalse(
- validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- vendor_item_list))
+ self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ self.OPTIONS.vendor_item_list.append('SYSTEM/my_system_file')
+ self.assertFalse(validate_config_lists())
def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartitionImage(
self):
- vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
- vendor_item_list.append('IMAGES/system.img')
- self.assertFalse(
- validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
- DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
- vendor_item_list))
+ self.OPTIONS.vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ self.OPTIONS.vendor_item_list.append('IMAGES/system.img')
+ self.assertFalse(validate_config_lists())
def test_validate_config_lists_ReturnsFalseIfBadSystemMiscInfoKeys(self):
for bad_key in ['dynamic_partition_list', 'super_partition_groups']:
- framework_misc_info_keys = list(DEFAULT_FRAMEWORK_MISC_INFO_KEYS)
- framework_misc_info_keys.append(bad_key)
- self.assertFalse(
- validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
- framework_misc_info_keys,
- DEFAULT_VENDOR_ITEM_LIST))
-
- def test_process_apex_keys_apk_certs_ReturnsTrueIfNoConflicts(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'META'))
+ self.OPTIONS.framework_misc_info_keys = list(
+ DEFAULT_FRAMEWORK_MISC_INFO_KEYS)
+ self.OPTIONS.framework_misc_info_keys.append(bad_key)
+ self.assertFalse(validate_config_lists())
+
+ def test_merge_package_keys_txt_ReturnsTrueIfNoConflicts(self):
+ output_meta_dir = common.MakeTempDir()
- framework_dir = common.MakeTempDir()
- os.makedirs(os.path.join(framework_dir, 'META'))
+ framework_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
- os.path.join(framework_dir, 'META', 'apexkeys.txt'))
+ os.path.join(framework_meta_dir, 'apexkeys.txt'))
- vendor_dir = common.MakeTempDir()
- os.makedirs(os.path.join(vendor_dir, 'META'))
+ vendor_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_vendor.txt'),
- os.path.join(vendor_dir, 'META', 'apexkeys.txt'))
+ os.path.join(vendor_meta_dir, 'apexkeys.txt'))
- process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir,
- set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']), 'apexkeys.txt')
+ merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, output_meta_dir,
+ 'apexkeys.txt')
merged_entries = []
merged_path = os.path.join(self.testdata_dir, 'apexkeys_merge.txt')
@@ -162,7 +149,7 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
merged_entries = f.read().split('\n')
output_entries = []
- output_path = os.path.join(output_dir, 'META', 'apexkeys.txt')
+ output_path = os.path.join(output_meta_dir, 'apexkeys.txt')
with open(output_path) as f:
output_entries = f.read().split('\n')
@@ -170,45 +157,36 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
return self.assertEqual(merged_entries, output_entries)
def test_process_apex_keys_apk_certs_ReturnsFalseIfConflictsPresent(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'META'))
+ output_meta_dir = common.MakeTempDir()
- framework_dir = common.MakeTempDir()
- os.makedirs(os.path.join(framework_dir, 'META'))
+ framework_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_framework.txt'),
- os.path.join(framework_dir, 'META', 'apexkeys.txt'))
+ os.path.join(framework_meta_dir, 'apexkeys.txt'))
- conflict_dir = common.MakeTempDir()
- os.makedirs(os.path.join(conflict_dir, 'META'))
+ conflict_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apexkeys_framework_conflict.txt'),
- os.path.join(conflict_dir, 'META', 'apexkeys.txt'))
+ os.path.join(conflict_meta_dir, 'apexkeys.txt'))
- self.assertRaises(ValueError, process_apex_keys_apk_certs_common,
- framework_dir, conflict_dir, output_dir,
- set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']), 'apexkeys.txt')
+ self.assertRaises(ValueError, merge_package_keys_txt, framework_meta_dir,
+ conflict_meta_dir, output_meta_dir, 'apexkeys.txt')
def test_process_apex_keys_apk_certs_HandlesApkCertsSyntax(self):
- output_dir = common.MakeTempDir()
- os.makedirs(os.path.join(output_dir, 'META'))
+ output_meta_dir = common.MakeTempDir()
- framework_dir = common.MakeTempDir()
- os.makedirs(os.path.join(framework_dir, 'META'))
+ framework_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apkcerts_framework.txt'),
- os.path.join(framework_dir, 'META', 'apkcerts.txt'))
+ os.path.join(framework_meta_dir, 'apkcerts.txt'))
- vendor_dir = common.MakeTempDir()
- os.makedirs(os.path.join(vendor_dir, 'META'))
+ vendor_meta_dir = common.MakeTempDir()
os.symlink(
os.path.join(self.testdata_dir, 'apkcerts_vendor.txt'),
- os.path.join(vendor_dir, 'META', 'apkcerts.txt'))
+ os.path.join(vendor_meta_dir, 'apkcerts.txt'))
- process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir,
- set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']), 'apkcerts.txt')
+ merge_package_keys_txt(framework_meta_dir, vendor_meta_dir, output_meta_dir,
+ 'apkcerts.txt')
merged_entries = []
merged_path = os.path.join(self.testdata_dir, 'apkcerts_merge.txt')
@@ -217,7 +195,7 @@ class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
merged_entries = f.read().split('\n')
output_entries = []
- output_path = os.path.join(output_dir, 'META', 'apkcerts.txt')
+ output_path = os.path.join(output_meta_dir, 'apkcerts.txt')
with open(output_path) as f:
output_entries = f.read().split('\n')
diff --git a/tools/releasetools/test_sign_apex.py b/tools/releasetools/test_sign_apex.py
index 8470f202c5..c344e22058 100644
--- a/tools/releasetools/test_sign_apex.py
+++ b/tools/releasetools/test_sign_apex.py
@@ -71,3 +71,21 @@ class SignApexTest(test_utils.ReleaseToolsTestCase):
False,
codename_to_api_level_map={'S': 31, 'Tiramisu' : 32})
self.assertTrue(os.path.exists(signed_apex))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_SignApexWithSepolicy(self):
+ test_apex = os.path.join(self.testdata_dir, 'sepolicy.apex')
+ payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
+ container_key = os.path.join(self.testdata_dir, 'testkey')
+ sepolicy_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
+ sepolicy_cert = os.path.join(self.testdata_dir, 'testkey.x509.pem')
+ signed_test_apex = sign_apex.SignApexFile(
+ 'avbtool',
+ test_apex,
+ payload_key,
+ container_key,
+ False,
+ None,
+ sepolicy_key=sepolicy_key,
+ sepolicy_cert=sepolicy_cert)
+ self.assertTrue(os.path.exists(signed_test_apex))
diff --git a/tools/releasetools/testdata/sepolicy.apex b/tools/releasetools/testdata/sepolicy.apex
new file mode 100644
index 0000000000..f7d267d08d
--- /dev/null
+++ b/tools/releasetools/testdata/sepolicy.apex
Binary files differ
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index a08ddbede6..d55ad88d8d 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -379,6 +379,11 @@ class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
self.avbtool = avbtool
self.algorithm = algorithm
self.key_path = key_path
+ if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
+ new_key_path = os.path.join(OPTIONS.search_path, key_path)
+ if os.path.exists(new_key_path):
+ self.key_path = new_key_path
+
self.salt = salt
self.signing_args = signing_args
self.image_size = None