diff --git a/Android.mk b/Android.mk new file mode 100644 index 000000000..bb1334a05 --- /dev/null +++ b/Android.mk @@ -0,0 +1,568 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +LOCAL_PATH := $(call my-dir) + +art_path := $(LOCAL_PATH) + +######################################################################## +# clean-oat rules +# + +include $(art_path)/build/Android.common_path.mk +include $(art_path)/build/Android.oat.mk + +# Following the example of build's dont_bother for clean targets. +art_dont_bother := false +ifneq (,$(filter clean-oat%,$(MAKECMDGOALS))) + art_dont_bother := true +endif + +# Don't bother with tests unless there is a test-art*, build-art*, or related target. +art_test_bother := false +ifneq (,$(filter tests test-art% valgrind-test-art% build-art% checkbuild,$(MAKECMDGOALS))) + art_test_bother := true +endif + +.PHONY: clean-oat +clean-oat: clean-oat-host clean-oat-target + +.PHONY: clean-oat-host +clean-oat-host: + find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" | xargs rm -f +ifneq ($(TMPDIR),) + rm -rf $(TMPDIR)/$(USER)/test-*/dalvik-cache/* + rm -rf $(TMPDIR)/android-data/dalvik-cache/* +else + rm -rf /tmp/$(USER)/test-*/dalvik-cache/* + rm -rf /tmp/android-data/dalvik-cache/* +endif + +.PHONY: clean-oat-target +clean-oat-target: + adb root + adb wait-for-device remount + adb shell rm -rf $(ART_TARGET_NATIVETEST_DIR) + adb shell rm -rf $(ART_TARGET_TEST_DIR) + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/* + adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH) + adb shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH) +ifdef TARGET_2ND_ARCH + adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) + adb shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) +endif + adb shell rm -rf data/run-test/test-*/dalvik-cache/* + +ifneq ($(art_dont_bother),true) + +######################################################################## +# cpplint rules to style check art source files + +include $(art_path)/build/Android.cpplint.mk + +######################################################################## +# product rules + +include $(art_path)/runtime/Android.mk +include $(art_path)/runtime/simulator/Android.mk +include $(art_path)/compiler/Android.mk +include $(art_path)/dexdump/Android.mk +include $(art_path)/dexlist/Android.mk +include $(art_path)/dex2oat/Android.mk +include $(art_path)/disassembler/Android.mk +include $(art_path)/oatdump/Android.mk +include $(art_path)/imgdiag/Android.mk +include $(art_path)/patchoat/Android.mk +include $(art_path)/profman/Android.mk +include $(art_path)/dalvikvm/Android.mk +include $(art_path)/tools/Android.mk +include $(art_path)/tools/ahat/Android.mk +include $(art_path)/tools/dexfuzz/Android.mk +include $(art_path)/tools/dmtracedump/Android.mk +include $(art_path)/sigchainlib/Android.mk +include $(art_path)/libart_fake/Android.mk + + +# ART_HOST_DEPENDENCIES depends on Android.executable.mk above for ART_HOST_EXECUTABLES +ART_HOST_DEPENDENCIES := \ + $(ART_HOST_EXECUTABLES) \ + $(HOST_OUT_JAVA_LIBRARIES)/core-libart-hostdex.jar \ + $(HOST_OUT_JAVA_LIBRARIES)/core-oj-hostdex.jar \ + $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \ + $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \ + $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvm$(ART_HOST_SHLIB_EXTENSION) +ART_TARGET_DEPENDENCIES := \ + $(ART_TARGET_EXECUTABLES) \ + $(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \ + $(TARGET_OUT_JAVA_LIBRARIES)/core-oj.jar \ + $(TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ + $(TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \ + $(TARGET_OUT_SHARED_LIBRARIES)/libopenjdkjvm.so +ifdef TARGET_2ND_ARCH +ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so +ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so +ART_TARGET_DEPENDENCIES += $(2ND_TARGET_OUT_SHARED_LIBRARIES)/libopenjdkjvm.so +endif +ifdef HOST_2ND_ARCH +ART_HOST_DEPENDENCIES += $(2ND_HOST_OUT_SHARED_LIBRARIES)/libjavacore.so +ART_HOST_DEPENDENCIES += $(2ND_HOST_OUT_SHARED_LIBRARIES)/libopenjdk.so +ART_HOST_DEPENDENCIES += $(2ND_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvm.so +endif + +######################################################################## +# test rules + +ifeq ($(art_test_bother),true) + +# All the dependencies that must be built ahead of sync-ing them onto the target device. +TEST_ART_TARGET_SYNC_DEPS := + +include $(art_path)/build/Android.common_test.mk +include $(art_path)/build/Android.gtest.mk +include $(art_path)/test/Android.run-test.mk +include $(art_path)/benchmark/Android.mk + +TEST_ART_ADB_ROOT_AND_REMOUNT := \ + (adb root && \ + adb wait-for-device remount && \ + ((adb shell touch /system/testfile && \ + (adb shell rm /system/testfile || true)) || \ + (adb disable-verity && \ + adb reboot && \ + adb wait-for-device root && \ + adb wait-for-device remount))) + +# Sync test files to the target, depends upon all things that must be pushed to the target. +.PHONY: test-art-target-sync +# Check if we need to sync. In case ART_TEST_ANDROID_ROOT is not empty, +# the code below uses 'adb push' instead of 'adb sync', which does not +# check if the files on the device have changed. +ifneq ($(ART_TEST_NO_SYNC),true) +ifeq ($(ART_TEST_ANDROID_ROOT),) +test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) + $(TEST_ART_ADB_ROOT_AND_REMOUNT) + adb sync +else +test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) + $(TEST_ART_ADB_ROOT_AND_REMOUNT) + adb wait-for-device push $(ANDROID_PRODUCT_OUT)/system $(ART_TEST_ANDROID_ROOT) +# Push the contents of the `data` dir into `/data` on the device. If +# `/data` already exists on the device, it is not overwritten, but its +# contents are updated. + adb push $(ANDROID_PRODUCT_OUT)/data / +endif +endif + +# "mm test-art" to build and run all tests on host and device +.PHONY: test-art +test-art: test-art-host test-art-target + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-gtest +test-art-gtest: test-art-host-gtest test-art-target-gtest + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-run-test +test-art-run-test: test-art-host-run-test test-art-target-run-test + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +######################################################################## +# host test rules + +VIXL_TEST_DEPENDENCY := +# We can only run the vixl tests on 64-bit hosts (vixl testing issue) when its a +# top-level build (to declare the vixl test rule). +ifneq ($(HOST_PREFER_32_BIT),true) +ifeq ($(ONE_SHOT_MAKEFILE),) +VIXL_TEST_DEPENDENCY := run-vixl-tests +endif +endif + +.PHONY: test-art-host-vixl +test-art-host-vixl: $(VIXL_TEST_DEPENDENCY) + +# "mm test-art-host" to build and run all host tests. +.PHONY: test-art-host +test-art-host: test-art-host-gtest test-art-host-run-test \ + test-art-host-vixl test-art-host-dexdump + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely with the default compiler. +.PHONY: test-art-host-default +test-art-host-default: test-art-host-run-test-default + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely with the optimizing compiler. +.PHONY: test-art-host-optimizing +test-art-host-optimizing: test-art-host-run-test-optimizing + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely on the interpreter. +.PHONY: test-art-host-interpreter +test-art-host-interpreter: test-art-host-run-test-interpreter + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All host tests that run solely on the jit. +.PHONY: test-art-host-jit +test-art-host-jit: test-art-host-run-test-jit + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Primary host architecture variants: +.PHONY: test-art-host$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(ART_PHONY_TEST_HOST_SUFFIX) \ + test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Secondary host architecture variants: +ifneq ($(HOST_PREFER_32_BIT),true) +.PHONY: test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \ + test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX) +test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) +endif + +# Dexdump/list regression test. +.PHONY: test-art-host-dexdump +test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist) + ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests + +# Valgrind. +.PHONY: valgrind-test-art-host +valgrind-test-art-host: valgrind-test-art-host-gtest + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: valgrind-test-art-host32 +valgrind-test-art-host32: valgrind-test-art-host-gtest32 + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: valgrind-test-art-host64 +valgrind-test-art-host64: valgrind-test-art-host-gtest64 + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +######################################################################## +# target test rules + +# "mm test-art-target" to build and run all target tests. +.PHONY: test-art-target +test-art-target: test-art-target-gtest test-art-target-run-test + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely with the default compiler. +.PHONY: test-art-target-default +test-art-target-default: test-art-target-run-test-default + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely with the optimizing compiler. +.PHONY: test-art-target-optimizing +test-art-target-optimizing: test-art-target-run-test-optimizing + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely on the interpreter. +.PHONY: test-art-target-interpreter +test-art-target-interpreter: test-art-target-run-test-interpreter + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# All target tests that run solely on the jit. +.PHONY: test-art-target-jit +test-art-target-jit: test-art-target-run-test-jit + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Primary target architecture variants: +.PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \ + test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +# Secondary target architecture variants: +ifdef TARGET_2ND_ARCH +.PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \ + test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) + +.PHONY: test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) +test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) + $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) +endif + +endif # art_test_bother + +######################################################################## +# oat-target and oat-target-sync rules + +OAT_TARGET_RULES := + +# $(1): input jar or apk target location +define declare-oat-target-target +OUT_OAT_FILE := $(PRODUCT_OUT)/$(basename $(1)).odex + +ifeq ($(ONE_SHOT_MAKEFILE),) +# ONE_SHOT_MAKEFILE is empty for a top level build and we don't want +# to define the oat-target-* rules there because they will conflict +# with the build/core/dex_preopt.mk defined rules. +.PHONY: oat-target-$(1) +oat-target-$(1): + +else +.PHONY: oat-target-$(1) +oat-target-$(1): $$(OUT_OAT_FILE) + +$$(OUT_OAT_FILE): $(PRODUCT_OUT)/$(1) $(DEFAULT_DEX_PREOPT_BUILT_IMAGE) $(DEX2OAT_DEPENDENCY) + @mkdir -p $$(dir $$@) + $(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \ + --boot-image=$(DEFAULT_DEX_PREOPT_BUILT_IMAGE) --dex-file=$(PRODUCT_OUT)/$(1) \ + --dex-location=/$(1) --oat-file=$$@ \ + --instruction-set=$(DEX2OAT_TARGET_ARCH) \ + --instruction-set-variant=$(DEX2OAT_TARGET_CPU_VARIANT) \ + --instruction-set-features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ + --android-root=$(PRODUCT_OUT)/system --include-patch-information \ + --runtime-arg -Xnorelocate + +endif + +OAT_TARGET_RULES += oat-target-$(1) +endef + +$(foreach file,\ + $(filter-out\ + $(addprefix $(TARGET_OUT_JAVA_LIBRARIES)/,$(addsuffix .jar,$(LIBART_TARGET_BOOT_JARS))),\ + $(wildcard $(TARGET_OUT_APPS)/*.apk) $(wildcard $(TARGET_OUT_JAVA_LIBRARIES)/*.jar)),\ + $(eval $(call declare-oat-target-target,$(subst $(PRODUCT_OUT)/,,$(file))))) + +.PHONY: oat-target +oat-target: $(ART_TARGET_DEPENDENCIES) $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) $(OAT_TARGET_RULES) + +.PHONY: oat-target-sync +oat-target-sync: oat-target + $(TEST_ART_ADB_ROOT_AND_REMOUNT) + adb sync + +#################################################################################################### +# Fake packages to ensure generation of libopenjdkd when one builds with mm/mmm/mmma. +# +# The library is required for starting a runtime in debug mode, but libartd does not depend on it +# (dependency cycle otherwise). +# +# Note: * As the package is phony to create a dependency the package name is irrelevant. +# * We make MULTILIB explicit to "both," just to state here that we want both libraries on +# 64-bit systems, even if it is the default. + +# ART on the host. +ifeq ($(ART_BUILD_HOST_DEBUG),true) +include $(CLEAR_VARS) +LOCAL_MODULE := art-libartd-libopenjdkd-host-dependency +LOCAL_MULTILIB := both +LOCAL_REQUIRED_MODULES := libopenjdkd +LOCAL_IS_HOST_MODULE := true +include $(BUILD_PHONY_PACKAGE) +endif + +# ART on the target. +ifeq ($(ART_BUILD_TARGET_DEBUG),true) +include $(CLEAR_VARS) +LOCAL_MODULE := art-libartd-libopenjdkd-target-dependency +LOCAL_MULTILIB := both +LOCAL_REQUIRED_MODULES := libopenjdkd +include $(BUILD_PHONY_PACKAGE) +endif + +######################################################################## +# "m build-art" for quick minimal build +.PHONY: build-art +build-art: build-art-host build-art-target + +.PHONY: build-art-host +build-art-host: $(HOST_OUT_EXECUTABLES)/art $(ART_HOST_DEPENDENCIES) $(HOST_CORE_IMG_OUTS) + +.PHONY: build-art-target +build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS) + +######################################################################## +# Rules for building all dependencies for tests. + +.PHONY: build-art-host-tests +build-art-host-tests: build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES) + +.PHONY: build-art-target-tests +build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TEST_ART_TARGET_SYNC_DEPS) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES) + +######################################################################## +# targets to switch back and forth from libdvm to libart + +.PHONY: use-art +use-art: + adb root + adb wait-for-device shell stop + adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell start + +.PHONY: use-artd +use-artd: + adb root + adb wait-for-device shell stop + adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + adb shell start + +.PHONY: use-dalvik +use-dalvik: + adb root + adb wait-for-device shell stop + adb shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so + adb shell start + +.PHONY: use-art-full +use-art-full: + adb root + adb wait-for-device shell stop + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + adb shell setprop dalvik.vm.dex2oat-filter \"\" + adb shell setprop dalvik.vm.image-dex2oat-filter \"\" + adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit false + adb shell start + +.PHONY: use-artd-full +use-artd-full: + adb root + adb wait-for-device shell stop + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + adb shell setprop dalvik.vm.dex2oat-filter \"\" + adb shell setprop dalvik.vm.image-dex2oat-filter \"\" + adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + adb shell setprop dalvik.vm.usejit false + adb shell start + +.PHONY: use-art-jit +use-art-jit: + adb root + adb wait-for-device shell stop + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + adb shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime" + adb shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime" + adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit true + adb shell start + +.PHONY: use-art-interpret-only +use-art-interpret-only: + adb root + adb wait-for-device shell stop + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" + adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" + adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit false + adb shell start + +.PHONY: use-artd-interpret-only +use-artd-interpret-only: + adb root + adb wait-for-device shell stop + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" + adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" + adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so + adb shell setprop dalvik.vm.usejit false + adb shell start + +.PHONY: use-art-verify-none +use-art-verify-none: + adb root + adb wait-for-device shell stop + adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* + adb shell setprop dalvik.vm.dex2oat-filter "verify-none" + adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none" + adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so + adb shell setprop dalvik.vm.usejit false + adb shell start + +######################################################################## + +endif # !art_dont_bother + +# Clear locally used variables. +art_dont_bother := +art_test_bother := +TEST_ART_TARGET_SYNC_DEPS := + +include $(art_path)/runtime/openjdkjvm/Android.mk + +# Helper target that depends on boot image creation. +# +# Can be used, for example, to dump initialization failures: +# m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt +.PHONY: art-boot-image +art-boot-image: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) diff --git a/CleanSpec.mk b/CleanSpec.mk new file mode 100644 index 000000000..341df7840 --- /dev/null +++ b/CleanSpec.mk @@ -0,0 +1,55 @@ +# Copyright (C) 2007 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# If you don't need to do a full clean build but would like to touch +# a file or delete some intermediate files, add a clean step to the end +# of the list. These steps will only be run once, if they haven't been +# run before. +# +# E.g.: +# $(call add-clean-step, touch -c external/sqlite/sqlite3.h) +# $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates) +# +# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with +# files that are missing or have been moved. +# +# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory. +# Use $(OUT_DIR) to refer to the "out" directory. +# +# If you need to re-do something that's already mentioned, just copy +# the command and add it to the bottom of the list. E.g., if a change +# that you made last week required touching a file and a change you +# made today requires touching the same file, just copy the old +# touch step and add it to the end of the list. +# +# ************************************************ +# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST +# ************************************************ + +# For example: +#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates) +#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates) +#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f) +#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*) + +# Switching to jemalloc requires deleting these files. +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libart_*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libartd_*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libart_*) +$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libartd_*) + +# ************************************************ +# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST +# ************************************************ diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2 new file mode 100644 index 000000000..e69de29bb diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..d79b004b1 --- /dev/null +++ b/NOTICE @@ -0,0 +1,264 @@ + + Copyright (c) 2005-2013, The Android Open Source Project + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +------------------------------------------------------------------- + +For art/runtime/elf.h derived from external/llvm/include/llvm/Support/ELF.h + +============================================================================== +LLVM Release License +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign. +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. + +============================================================================== +Copyrights and Licenses for Third Party Software Distributed with LLVM: +============================================================================== +The LLVM software contains code written by third parties. Such software will +have its own individual LICENSE.TXT file in the directory in which it appears. +This file will describe the copyrights, license, and restrictions which apply +to that code. + +The disclaimer of warranty in the University of Illinois Open Source License +applies to all code in the LLVM Distribution, and nothing in any of the +other licenses gives permission to use the names of the LLVM Team or the +University of Illinois to endorse or promote products derived from this +Software. + +The following pieces of software have additional or alternate copyrights, +licenses, and/or restrictions: + +Program Directory +------- --------- +Autoconf llvm/autoconf + llvm/projects/ModuleMaker/autoconf +Google Test llvm/utils/unittest/googletest +OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} +pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} +ARM contributions llvm/lib/Target/ARM/LICENSE.TXT +md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h diff --git a/benchmark/Android.mk b/benchmark/Android.mk new file mode 100644 index 000000000..a4a603ad0 --- /dev/null +++ b/benchmark/Android.mk @@ -0,0 +1,79 @@ +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +LOCAL_PATH := $(call my-dir) + +include art/build/Android.common_build.mk + +LIBARTBENCHMARK_COMMON_SRC_FILES := \ + jobject-benchmark/jobject_benchmark.cc \ + jni-perf/perf_jni.cc \ + scoped-primitive-array/scoped_primitive_array.cc + +# $(1): target or host +define build-libartbenchmark + ifneq ($(1),target) + ifneq ($(1),host) + $$(error expected target or host for argument 1, received $(1)) + endif + endif + + art_target_or_host := $(1) + + include $(CLEAR_VARS) + LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) + LOCAL_MODULE := libartbenchmark + ifeq ($$(art_target_or_host),target) + LOCAL_MODULE_TAGS := tests + endif + LOCAL_SRC_FILES := $(LIBARTBENCHMARK_COMMON_SRC_FILES) + LOCAL_SHARED_LIBRARIES += libart libbacktrace libnativehelper + LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk + LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk + ifeq ($$(art_target_or_host),target) + $(call set-target-local-clang-vars) + $(call set-target-local-cflags-vars,debug) + LOCAL_SHARED_LIBRARIES += libdl + LOCAL_MULTILIB := both + # LOCAL_MODULE_PATH_32 := $(ART_TARGET_OUT)/$(ART_TARGET_ARCH_32) + # LOCAL_MODULE_PATH_64 := $(ART_TARGET_OUT)/$(ART_TARGET_ARCH_64) + LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) + include $(BUILD_SHARED_LIBRARY) + else # host + LOCAL_CLANG := $(ART_HOST_CLANG) + LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS) + LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) + LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread + LOCAL_IS_HOST_MODULE := true + LOCAL_MULTILIB := both + include $(BUILD_HOST_SHARED_LIBRARY) + endif + + # Clear locally used variables. + art_target_or_host := +endef + +ifeq ($(ART_BUILD_TARGET),true) + $(eval $(call build-libartbenchmark,target)) +endif +ifeq ($(ART_BUILD_HOST),true) + $(eval $(call build-libartbenchmark,host)) +endif + +# Clear locally used variables. +LOCAL_PATH := +LIBARTBENCHMARK_COMMON_SRC_FILES := diff --git a/benchmark/jni-perf/info.txt b/benchmark/jni-perf/info.txt new file mode 100644 index 000000000..010b57be2 --- /dev/null +++ b/benchmark/jni-perf/info.txt @@ -0,0 +1 @@ +Tests for measuring performance of JNI state changes. diff --git a/benchmark/jni-perf/perf_jni.cc b/benchmark/jni-perf/perf_jni.cc new file mode 100644 index 000000000..cd8d520f1 --- /dev/null +++ b/benchmark/jni-perf/perf_jni.cc @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "jni.h" +#include "scoped_thread_state_change.h" +#include "thread.h" + +namespace art { + +namespace { + +extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfJniEmptyCall(JNIEnv*, jobject) {} + +extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOACall(JNIEnv* env, jobject) { + ScopedObjectAccess soa(env); +} + +extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOAUncheckedCall(JNIEnv*, jobject) { + ScopedObjectAccessUnchecked soa(Thread::Current()); +} + +} // namespace + +} // namespace art diff --git a/benchmark/jni-perf/src/JniPerfBenchmark.java b/benchmark/jni-perf/src/JniPerfBenchmark.java new file mode 100644 index 000000000..b1b21ce0b --- /dev/null +++ b/benchmark/jni-perf/src/JniPerfBenchmark.java @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.caliper.SimpleBenchmark; + +public class JniPerfBenchmark extends SimpleBenchmark { + private static final String MSG = "ABCDE"; + + native void perfJniEmptyCall(); + native void perfSOACall(); + native void perfSOAUncheckedCall(); + + public void timeFastJNI(int N) { + // TODO: This might be an intrinsic. + for (long i = 0; i < N; i++) { + char c = MSG.charAt(2); + } + } + + public void timeEmptyCall(int N) { + for (long i = 0; i < N; i++) { + perfJniEmptyCall(); + } + } + + public void timeSOACall(int N) { + for (long i = 0; i < N; i++) { + perfSOACall(); + } + } + + public void timeSOAUncheckedCall(int N) { + for (long i = 0; i < N; i++) { + perfSOAUncheckedCall(); + } + } + + { + System.loadLibrary("artbenchmark"); + } +} diff --git a/benchmark/jobject-benchmark/info.txt b/benchmark/jobject-benchmark/info.txt new file mode 100644 index 000000000..f2a256a3e --- /dev/null +++ b/benchmark/jobject-benchmark/info.txt @@ -0,0 +1,7 @@ +Benchmark for jobject functions + +Measures performance of: +Add/RemoveLocalRef +Add/RemoveGlobalRef +Add/RemoveWeakGlobalRef +Decoding local, weak, global, handle scope jobjects. diff --git a/benchmark/jobject-benchmark/jobject_benchmark.cc b/benchmark/jobject-benchmark/jobject_benchmark.cc new file mode 100644 index 000000000..e7ca9ebc1 --- /dev/null +++ b/benchmark/jobject-benchmark/jobject_benchmark.cc @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni.h" + +#include "mirror/class-inl.h" +#include "scoped_thread_state_change.h" + +namespace art { +namespace { + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveLocal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + mirror::Object* obj = soa.Decode(jobj); + CHECK(obj != nullptr); + for (jint i = 0; i < reps; ++i) { + jobject ref = soa.Env()->AddLocalReference(obj); + soa.Env()->DeleteLocalRef(ref); + } +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeLocal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + mirror::Object* obj = soa.Decode(jobj); + CHECK(obj != nullptr); + jobject ref = soa.Env()->AddLocalReference(obj); + for (jint i = 0; i < reps; ++i) { + CHECK_EQ(soa.Decode(ref), obj); + } + soa.Env()->DeleteLocalRef(ref); +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + mirror::Object* obj = soa.Decode(jobj); + CHECK(obj != nullptr); + for (jint i = 0; i < reps; ++i) { + jobject ref = soa.Vm()->AddGlobalRef(soa.Self(), obj); + soa.Vm()->DeleteGlobalRef(soa.Self(), ref); + } +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + mirror::Object* obj = soa.Decode(jobj); + CHECK(obj != nullptr); + jobject ref = soa.Vm()->AddGlobalRef(soa.Self(), obj); + for (jint i = 0; i < reps; ++i) { + CHECK_EQ(soa.Decode(ref), obj); + } + soa.Vm()->DeleteGlobalRef(soa.Self(), ref); +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveWeakGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + mirror::Object* obj = soa.Decode(jobj); + CHECK(obj != nullptr); + for (jint i = 0; i < reps; ++i) { + jobject ref = soa.Vm()->AddWeakGlobalRef(soa.Self(), obj); + soa.Vm()->DeleteWeakGlobalRef(soa.Self(), ref); + } +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeWeakGlobal( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + mirror::Object* obj = soa.Decode(jobj); + CHECK(obj != nullptr); + jobject ref = soa.Vm()->AddWeakGlobalRef(soa.Self(), obj); + for (jint i = 0; i < reps; ++i) { + CHECK_EQ(soa.Decode(ref), obj); + } + soa.Vm()->DeleteWeakGlobalRef(soa.Self(), ref); +} + +extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeHandleScopeRef( + JNIEnv* env, jobject jobj, jint reps) { + ScopedObjectAccess soa(env); + for (jint i = 0; i < reps; ++i) { + soa.Decode(jobj); + } +} + +} // namespace +} // namespace art diff --git a/benchmark/jobject-benchmark/src/JObjectBenchmark.java b/benchmark/jobject-benchmark/src/JObjectBenchmark.java new file mode 100644 index 000000000..f4c059c58 --- /dev/null +++ b/benchmark/jobject-benchmark/src/JObjectBenchmark.java @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.caliper.SimpleBenchmark; + +public class JObjectBenchmark extends SimpleBenchmark { + public JObjectBenchmark() { + // Make sure to link methods before benchmark starts. + System.loadLibrary("artbenchmark"); + timeAddRemoveLocal(1); + timeDecodeLocal(1); + timeAddRemoveGlobal(1); + timeDecodeGlobal(1); + timeAddRemoveWeakGlobal(1); + timeDecodeWeakGlobal(1); + timeDecodeHandleScopeRef(1); + } + + public native void timeAddRemoveLocal(int reps); + public native void timeDecodeLocal(int reps); + public native void timeAddRemoveGlobal(int reps); + public native void timeDecodeGlobal(int reps); + public native void timeAddRemoveWeakGlobal(int reps); + public native void timeDecodeWeakGlobal(int reps); + public native void timeDecodeHandleScopeRef(int reps); +} diff --git a/benchmark/scoped-primitive-array/info.txt b/benchmark/scoped-primitive-array/info.txt new file mode 100644 index 000000000..93abb7ce8 --- /dev/null +++ b/benchmark/scoped-primitive-array/info.txt @@ -0,0 +1 @@ +Tests for measuring performance of ScopedPrimitiveArray. diff --git a/benchmark/scoped-primitive-array/scoped_primitive_array.cc b/benchmark/scoped-primitive-array/scoped_primitive_array.cc new file mode 100644 index 000000000..166415729 --- /dev/null +++ b/benchmark/scoped-primitive-array/scoped_primitive_array.cc @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni.h" +#include "ScopedPrimitiveArray.h" + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureByteArray( + JNIEnv* env, jclass, int reps, jbyteArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedByteArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureShortArray( + JNIEnv* env, jclass, int reps, jshortArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedShortArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureIntArray( + JNIEnv* env, jclass, int reps, jintArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedIntArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} + +extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureLongArray( + JNIEnv* env, jclass, int reps, jlongArray arr) { + jlong ret = 0; + for (jint i = 0; i < reps; ++i) { + ScopedLongArrayRO sc(env, arr); + ret += sc[0] + sc[sc.size() - 1]; + } + return ret; +} diff --git a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java new file mode 100644 index 000000000..be276fe48 --- /dev/null +++ b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.caliper.SimpleBenchmark; + +public class ScopedPrimitiveArrayBenchmark extends SimpleBenchmark { + // Measure adds the first and last element of the array by using ScopedPrimitiveArray. + static native long measureByteArray(int reps, byte[] arr); + static native long measureShortArray(int reps, short[] arr); + static native long measureIntArray(int reps, int[] arr); + static native long measureLongArray(int reps, long[] arr); + + static final int smallLength = 16; + static final int mediumLength = 256; + static final int largeLength = 8096; + static byte[] smallBytes = new byte[smallLength]; + static byte[] mediumBytes = new byte[mediumLength]; + static byte[] largeBytes = new byte[largeLength]; + static short[] smallShorts = new short[smallLength]; + static short[] mediumShorts = new short[mediumLength]; + static short[] largeShorts = new short[largeLength]; + static int[] smallInts = new int[smallLength]; + static int[] mediumInts = new int[mediumLength]; + static int[] largeInts = new int[largeLength]; + static long[] smallLongs = new long[smallLength]; + static long[] mediumLongs = new long[mediumLength]; + static long[] largeLongs = new long[largeLength]; + + public void timeSmallBytes(int reps) { + measureByteArray(reps, smallBytes); + } + + public void timeMediumBytes(int reps) { + measureByteArray(reps, mediumBytes); + } + + public void timeLargeBytes(int reps) { + measureByteArray(reps, largeBytes); + } + + public void timeSmallShorts(int reps) { + measureShortArray(reps, smallShorts); + } + + public void timeMediumShorts(int reps) { + measureShortArray(reps, mediumShorts); + } + + public void timeLargeShorts(int reps) { + measureShortArray(reps, largeShorts); + } + + public void timeSmallInts(int reps) { + measureIntArray(reps, smallInts); + } + + public void timeMediumInts(int reps) { + measureIntArray(reps, mediumInts); + } + + public void timeLargeInts(int reps) { + measureIntArray(reps, largeInts); + } + + public void timeSmallLongs(int reps) { + measureLongArray(reps, smallLongs); + } + + public void timeMediumLongs(int reps) { + measureLongArray(reps, mediumLongs); + } + + public void timeLargeLongs(int reps) { + measureLongArray(reps, largeLongs); + } + + { + System.loadLibrary("artbenchmark"); + } +} diff --git a/build/Android.common.mk b/build/Android.common.mk new file mode 100644 index 000000000..6befec560 --- /dev/null +++ b/build/Android.common.mk @@ -0,0 +1,112 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_MK +ART_ANDROID_COMMON_MK = true + +ART_TARGET_SUPPORTED_ARCH := arm arm64 mips mips64 x86 x86_64 +ART_HOST_SUPPORTED_ARCH := x86 x86_64 + +ifneq ($(HOST_OS),darwin) + ART_HOST_SUPPORTED_ARCH := x86 x86_64 +else + # Mac OS doesn't support low-4GB allocation in a 64-bit process. So we won't be able to create + # our heaps. + ART_HOST_SUPPORTED_ARCH := x86 + ART_MULTILIB_OVERRIDE_host := 32 +endif + +ART_COVERAGE := false + +ifeq ($(ART_COVERAGE),true) +# https://gcc.gnu.org/onlinedocs/gcc/Cross-profiling.html +GCOV_PREFIX := /data/local/tmp/gcov +# GCOV_PREFIX_STRIP is an integer that defines how many levels should be +# stripped off the beginning of the path. We want the paths in $GCOV_PREFIX to +# be relative to $ANDROID_BUILD_TOP so we can just adb pull from the top and not +# have to worry about placing things ourselves. +GCOV_PREFIX_STRIP := $(shell echo $(ANDROID_BUILD_TOP) | grep -o / | wc -l) +GCOV_ENV := GCOV_PREFIX=$(GCOV_PREFIX) GCOV_PREFIX_STRIP=$(GCOV_PREFIX_STRIP) +else +GCOV_ENV := +endif + +ifeq (,$(filter $(TARGET_ARCH),$(ART_TARGET_SUPPORTED_ARCH))) +$(warning unsupported TARGET_ARCH=$(TARGET_ARCH)) +endif +ifeq (,$(filter $(HOST_ARCH),$(ART_HOST_SUPPORTED_ARCH))) +$(warning unsupported HOST_ARCH=$(HOST_ARCH)) +endif + +# Primary vs. secondary +2ND_TARGET_ARCH := $(TARGET_2ND_ARCH) +TARGET_INSTRUCTION_SET_FEATURES := $(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) +2ND_TARGET_INSTRUCTION_SET_FEATURES := $($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) +ifdef TARGET_2ND_ARCH + ifneq ($(filter %64,$(TARGET_ARCH)),) + ART_PHONY_TEST_TARGET_SUFFIX := 64 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := 32 + ART_TARGET_ARCH_32 := $(TARGET_2ND_ARCH) + ART_TARGET_ARCH_64 := $(TARGET_ARCH) + else + # TODO: ??? + $(warning Do not know what to do with this multi-target configuration!) + ART_PHONY_TEST_TARGET_SUFFIX := 32 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := + ART_TARGET_ARCH_32 := $(TARGET_ARCH) + ART_TARGET_ARCH_64 := + endif +else + ifneq ($(filter %64,$(TARGET_ARCH)),) + ART_PHONY_TEST_TARGET_SUFFIX := 64 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := + ART_TARGET_ARCH_32 := + ART_TARGET_ARCH_64 := $(TARGET_ARCH) + else + ART_PHONY_TEST_TARGET_SUFFIX := 32 + 2ND_ART_PHONY_TEST_TARGET_SUFFIX := + ART_TARGET_ARCH_32 := $(TARGET_ARCH) + ART_TARGET_ARCH_64 := + endif +endif + +ART_HOST_SHLIB_EXTENSION := $(HOST_SHLIB_SUFFIX) +ART_HOST_SHLIB_EXTENSION ?= .so +ifeq ($(HOST_PREFER_32_BIT),true) + ART_PHONY_TEST_HOST_SUFFIX := 32 + 2ND_ART_PHONY_TEST_HOST_SUFFIX := + ART_HOST_ARCH_32 := x86 + ART_HOST_ARCH_64 := + ART_HOST_ARCH := x86 + 2ND_ART_HOST_ARCH := + 2ND_HOST_ARCH := + ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH) + ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES) + 2ND_ART_HOST_OUT_SHARED_LIBRARIES := +else + ART_PHONY_TEST_HOST_SUFFIX := 64 + 2ND_ART_PHONY_TEST_HOST_SUFFIX := 32 + ART_HOST_ARCH_32 := x86 + ART_HOST_ARCH_64 := x86_64 + ART_HOST_ARCH := x86_64 + 2ND_ART_HOST_ARCH := x86 + 2ND_HOST_ARCH := x86 + ART_HOST_LIBRARY_PATH := $(HOST_LIBRARY_PATH) + ART_HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT_SHARED_LIBRARIES) + 2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES) +endif + +endif # ART_ANDROID_COMMON_MK diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk new file mode 100644 index 000000000..2294ddbd5 --- /dev/null +++ b/build/Android.common_build.mk @@ -0,0 +1,458 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_BUILD_MK +ART_ANDROID_COMMON_BUILD_MK = true + +include art/build/Android.common.mk +include art/build/Android.common_utils.mk + +# These can be overridden via the environment or by editing to +# enable/disable certain build configuration. +# +# For example, to disable everything but the host debug build you use: +# +# (export ART_BUILD_TARGET_NDEBUG=false && export ART_BUILD_TARGET_DEBUG=false && export ART_BUILD_HOST_NDEBUG=false && ...) +# +# Beware that tests may use the non-debug build for performance, notable 055-enum-performance +# +ART_BUILD_TARGET_NDEBUG ?= true +ART_BUILD_TARGET_DEBUG ?= true +ART_BUILD_HOST_NDEBUG ?= true +ART_BUILD_HOST_DEBUG ?= true + +# Set this to change what opt level Art is built at. +ART_DEBUG_OPT_FLAG ?= -O2 +ART_NDEBUG_OPT_FLAG ?= -O3 + +# Enable the static builds only for checkbuilds. +ifneq (,$(filter checkbuild,$(MAKECMDGOALS))) + ART_BUILD_HOST_STATIC ?= true +else + ART_BUILD_HOST_STATIC ?= false +endif + +# Asan does not support static linkage +ifdef SANITIZE_HOST + ART_BUILD_HOST_STATIC := false +endif + +ifneq ($(HOST_OS),linux) + ART_BUILD_HOST_STATIC := false +endif + +ifeq ($(ART_BUILD_TARGET_NDEBUG),false) +$(info Disabling ART_BUILD_TARGET_NDEBUG) +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),false) +$(info Disabling ART_BUILD_TARGET_DEBUG) +endif +ifeq ($(ART_BUILD_HOST_NDEBUG),false) +$(info Disabling ART_BUILD_HOST_NDEBUG) +endif +ifeq ($(ART_BUILD_HOST_DEBUG),false) +$(info Disabling ART_BUILD_HOST_DEBUG) +endif +ifeq ($(ART_BUILD_HOST_STATIC),true) +$(info Enabling ART_BUILD_HOST_STATIC) +endif + +ifeq ($(ART_TEST_DEBUG_GC),true) + ART_DEFAULT_GC_TYPE := SS + ART_USE_TLAB := true +endif + +# +# Used to enable JIT +# +ART_JIT := false +ifneq ($(wildcard art/JIT_ART),) +$(info Enabling ART_JIT because of existence of art/JIT_ART) +ART_JIT := true +endif +ifeq ($(WITH_ART_JIT), true) +ART_JIT := true +endif + +# +# Used to change the default GC. Valid values are CMS, SS, GSS. The default is CMS. +# +ART_DEFAULT_GC_TYPE ?= CMS +art_default_gc_type_cflags := -DART_DEFAULT_GC_TYPE_IS_$(ART_DEFAULT_GC_TYPE) + +ART_HOST_CFLAGS := +ART_TARGET_CFLAGS := + +ART_HOST_ASFLAGS := +ART_TARGET_ASFLAGS := + +# Clang build support. + +# Host. +ART_HOST_CLANG := false +ifneq ($(WITHOUT_HOST_CLANG),true) + # By default, host builds use clang for better warnings. + ART_HOST_CLANG := true +endif + +# Clang on the target. Target builds use GCC by default. +ifneq ($(USE_CLANG_PLATFORM_BUILD),) +ART_TARGET_CLANG := $(USE_CLANG_PLATFORM_BUILD) +else +ART_TARGET_CLANG := false +endif +ART_TARGET_CLANG_arm := +ART_TARGET_CLANG_arm64 := +ART_TARGET_CLANG_mips := +ART_TARGET_CLANG_mips64 := +ART_TARGET_CLANG_x86 := +ART_TARGET_CLANG_x86_64 := + +define set-target-local-clang-vars + LOCAL_CLANG := $(ART_TARGET_CLANG) + $(foreach arch,$(ART_TARGET_SUPPORTED_ARCH), + ifneq ($$(ART_TARGET_CLANG_$(arch)),) + LOCAL_CLANG_$(arch) := $$(ART_TARGET_CLANG_$(arch)) + endif) +endef + +ART_TARGET_CLANG_CFLAGS := +ART_TARGET_CLANG_CFLAGS_arm := +ART_TARGET_CLANG_CFLAGS_arm64 := +ART_TARGET_CLANG_CFLAGS_mips := +ART_TARGET_CLANG_CFLAGS_mips64 := +ART_TARGET_CLANG_CFLAGS_x86 := +ART_TARGET_CLANG_CFLAGS_x86_64 := + +# Warn about thread safety violations with clang. +art_clang_cflags := -Wthread-safety -Wthread-safety-negative + +# Warn if switch fallthroughs aren't annotated. +art_clang_cflags += -Wimplicit-fallthrough + +# Enable float equality warnings. +art_clang_cflags += -Wfloat-equal + +# Enable warning of converting ints to void*. +art_clang_cflags += -Wint-to-void-pointer-cast + +# Enable warning of wrong unused annotations. +art_clang_cflags += -Wused-but-marked-unused + +# Enable warning for deprecated language features. +art_clang_cflags += -Wdeprecated + +# Enable warning for unreachable break & return. +art_clang_cflags += -Wunreachable-code-break -Wunreachable-code-return + +# Enable missing-noreturn only on non-Mac. As lots of things are not implemented for Apple, it's +# a pain. +ifneq ($(HOST_OS),darwin) + art_clang_cflags += -Wmissing-noreturn +endif + + +# GCC-only warnings. +art_gcc_cflags := -Wunused-but-set-parameter +# Suggest const: too many false positives, but good for a trial run. +# -Wsuggest-attribute=const +# Useless casts: too many, as we need to be 32/64 agnostic, but the compiler knows. +# -Wuseless-cast +# Zero-as-null: Have to convert all NULL and "diagnostic ignore" all includes like libnativehelper +# that are still stuck pre-C++11. +# -Wzero-as-null-pointer-constant \ +# Suggest final: Have to move to a more recent GCC. +# -Wsuggest-final-types + +ART_TARGET_CLANG_CFLAGS := $(art_clang_cflags) +ifeq ($(ART_HOST_CLANG),true) + # Bug: 15446488. We don't omit the frame pointer to work around + # clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress. + ART_HOST_CFLAGS += $(art_clang_cflags) -fno-omit-frame-pointer +else + ART_HOST_CFLAGS += $(art_gcc_cflags) +endif +ifneq ($(ART_TARGET_CLANG),true) + ART_TARGET_CFLAGS += $(art_gcc_cflags) +else + # TODO: if we ever want to support GCC/Clang mix for multi-target products, this needs to be + # split up. + ifeq ($(ART_TARGET_CLANG_$(TARGET_ARCH)),false) + ART_TARGET_CFLAGS += $(art_gcc_cflags) + endif +endif + +# Clear local variables now their use has ended. +art_clang_cflags := +art_gcc_cflags := + +ART_CPP_EXTENSION := .cc + +ART_C_INCLUDES := \ + external/gtest/include \ + external/icu/icu4c/source/common \ + external/lz4/lib \ + external/valgrind/include \ + external/valgrind \ + external/vixl/src \ + external/zlib \ + +# We optimize Thread::Current() with a direct TLS access. This requires access to a private +# Bionic header. +# Note: technically we only need this on device, but this avoids the duplication of the includes. +ART_C_INCLUDES += bionic/libc/private + +# Base set of cflags used by all things ART. +art_cflags := \ + -fno-rtti \ + -std=gnu++11 \ + -ggdb3 \ + -Wall \ + -Werror \ + -Wextra \ + -Wstrict-aliasing \ + -fstrict-aliasing \ + -Wunreachable-code \ + -Wredundant-decls \ + -Wshadow \ + -Wunused \ + -fvisibility=protected \ + $(art_default_gc_type_cflags) + +# The architectures the compiled tools are able to run on. Setting this to 'all' will cause all +# architectures to be included. +ART_TARGET_CODEGEN_ARCHS ?= all +ART_HOST_CODEGEN_ARCHS ?= all + +ifeq ($(ART_TARGET_CODEGEN_ARCHS),all) + ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH)) + # We need to handle the fact that some compiler tests mix code from different architectures. + ART_TARGET_COMPILER_TESTS ?= true +else + ART_TARGET_COMPILER_TESTS := false + ifeq ($(ART_TARGET_CODEGEN_ARCHS),svelte) + ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_ARCH_64) $(ART_TARGET_ARCH_32)) + endif +endif +ifeq ($(ART_HOST_CODEGEN_ARCHS),all) + ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_SUPPORTED_ARCH) $(ART_HOST_SUPPORTED_ARCH)) + ART_HOST_COMPILER_TESTS ?= true +else + ART_HOST_COMPILER_TESTS := false + ifeq ($(ART_HOST_CODEGEN_ARCHS),svelte) + ART_HOST_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS) $(ART_HOST_ARCH_64) $(ART_HOST_ARCH_32)) + endif +endif + +ifneq (,$(filter arm64,$(ART_TARGET_CODEGEN_ARCHS))) + ART_TARGET_CODEGEN_ARCHS += arm +endif +ifneq (,$(filter mips64,$(ART_TARGET_CODEGEN_ARCHS))) + ART_TARGET_CODEGEN_ARCHS += mips +endif +ifneq (,$(filter x86_64,$(ART_TARGET_CODEGEN_ARCHS))) + ART_TARGET_CODEGEN_ARCHS += x86 +endif +ART_TARGET_CODEGEN_ARCHS := $(sort $(ART_TARGET_CODEGEN_ARCHS)) +ifneq (,$(filter arm64,$(ART_HOST_CODEGEN_ARCHS))) + ART_HOST_CODEGEN_ARCHS += arm +endif +ifneq (,$(filter mips64,$(ART_HOST_CODEGEN_ARCHS))) + ART_HOST_CODEGEN_ARCHS += mips +endif +ifneq (,$(filter x86_64,$(ART_HOST_CODEGEN_ARCHS))) + ART_HOST_CODEGEN_ARCHS += x86 +endif +ART_HOST_CODEGEN_ARCHS := $(sort $(ART_HOST_CODEGEN_ARCHS)) + +# Base set of cflags used by target build only +art_target_cflags := \ + $(foreach target_arch,$(strip $(ART_TARGET_CODEGEN_ARCHS)), -DART_ENABLE_CODEGEN_$(target_arch)) +# Base set of cflags used by host build only +art_host_cflags := \ + $(foreach host_arch,$(strip $(ART_HOST_CODEGEN_ARCHS)), -DART_ENABLE_CODEGEN_$(host_arch)) + +# Base set of asflags used by all things ART. +art_asflags := + +# Missing declarations: too many at the moment, as we use "extern" quite a bit. +# -Wmissing-declarations \ + + + +ifdef ART_IMT_SIZE + art_cflags += -DIMT_SIZE=$(ART_IMT_SIZE) +else + # Default is 64 + art_cflags += -DIMT_SIZE=64 +endif + +ifeq ($(ART_HEAP_POISONING),true) + art_cflags += -DART_HEAP_POISONING=1 + art_asflags += -DART_HEAP_POISONING=1 +endif + +# +# Used to change the read barrier type. Valid values are BAKER, BROOKS, TABLELOOKUP. +# The default is BAKER. +# +ART_READ_BARRIER_TYPE ?= BAKER + +ifeq ($(ART_USE_READ_BARRIER),true) + art_cflags += -DART_USE_READ_BARRIER=1 + art_cflags += -DART_READ_BARRIER_TYPE_IS_$(ART_READ_BARRIER_TYPE)=1 + art_asflags += -DART_USE_READ_BARRIER=1 + art_asflags += -DART_READ_BARRIER_TYPE_IS_$(ART_READ_BARRIER_TYPE)=1 + + # Temporarily override -fstack-protector-strong with -fstack-protector to avoid a major + # slowdown with the read barrier config. b/26744236. + art_cflags += -fstack-protector +endif + +ifeq ($(ART_USE_TLAB),true) + art_cflags += -DART_USE_TLAB=1 +endif + +# Cflags for non-debug ART and ART tools. +art_non_debug_cflags := \ + $(ART_NDEBUG_OPT_FLAG) + +# Cflags for debug ART and ART tools. +art_debug_cflags := \ + $(ART_DEBUG_OPT_FLAG) \ + -DDYNAMIC_ANNOTATIONS_ENABLED=1 \ + -UNDEBUG + +art_host_non_debug_cflags := $(art_non_debug_cflags) +art_target_non_debug_cflags := $(art_non_debug_cflags) + +ifeq ($(HOST_OS),linux) + # Larger frame-size for host clang builds today + ifneq ($(ART_COVERAGE),true) + ifneq ($(NATIVE_COVERAGE),true) + art_host_non_debug_cflags += -Wframe-larger-than=2700 + ifdef SANITIZE_TARGET + art_target_non_debug_cflags += -Wframe-larger-than=6400 + else + art_target_non_debug_cflags += -Wframe-larger-than=1728 + endif + endif + endif +endif + +ifndef LIBART_IMG_HOST_BASE_ADDRESS + $(error LIBART_IMG_HOST_BASE_ADDRESS unset) +endif +ART_HOST_CFLAGS += $(art_cflags) -DART_BASE_ADDRESS=$(LIBART_IMG_HOST_BASE_ADDRESS) +ART_HOST_CFLAGS += -DART_DEFAULT_INSTRUCTION_SET_FEATURES=default $(art_host_cflags) +ART_HOST_ASFLAGS += $(art_asflags) + +ifndef LIBART_IMG_TARGET_BASE_ADDRESS + $(error LIBART_IMG_TARGET_BASE_ADDRESS unset) +endif +ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) +ART_TARGET_CFLAGS += $(art_target_cflags) +ART_TARGET_ASFLAGS += $(art_asflags) + +ART_HOST_NON_DEBUG_CFLAGS := $(art_host_non_debug_cflags) +ART_TARGET_NON_DEBUG_CFLAGS := $(art_target_non_debug_cflags) +ART_HOST_DEBUG_CFLAGS := $(art_debug_cflags) +ART_TARGET_DEBUG_CFLAGS := $(art_debug_cflags) + +ifndef LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA + LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA=-0x1000000 +endif +ifndef LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA + LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA=0x1000000 +endif +ART_HOST_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA) +ART_HOST_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA) + +ifndef LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA + LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA=-0x1000000 +endif +ifndef LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA + LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA=0x1000000 +endif +ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA) +ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA) + +# To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16" +# ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs + +# Clear locals now they've served their purpose. +art_cflags := +art_asflags := +art_host_cflags := +art_target_cflags := +art_debug_cflags := +art_non_debug_cflags := +art_host_non_debug_cflags := +art_target_non_debug_cflags := +art_default_gc_type_cflags := + +ART_HOST_LDLIBS := +ifneq ($(ART_HOST_CLANG),true) + # GCC lacks libc++ assumed atomic operations, grab via libatomic. + ART_HOST_LDLIBS += -latomic +endif + +ART_TARGET_LDFLAGS := + +# $(1): ndebug_or_debug +define set-target-local-cflags-vars + LOCAL_CFLAGS += $(ART_TARGET_CFLAGS) + LOCAL_CFLAGS_x86 += $(ART_TARGET_CFLAGS_x86) + LOCAL_ASFLAGS += $(ART_TARGET_ASFLAGS) + LOCAL_LDFLAGS += $(ART_TARGET_LDFLAGS) + art_target_cflags_ndebug_or_debug := $(1) + ifeq ($$(art_target_cflags_ndebug_or_debug),debug) + LOCAL_CFLAGS += $(ART_TARGET_DEBUG_CFLAGS) + else + LOCAL_CFLAGS += $(ART_TARGET_NON_DEBUG_CFLAGS) + endif + + LOCAL_CLANG_CFLAGS := $(ART_TARGET_CLANG_CFLAGS) + $(foreach arch,$(ART_TARGET_SUPPORTED_ARCH), + LOCAL_CLANG_CFLAGS_$(arch) += $$(ART_TARGET_CLANG_CFLAGS_$(arch))) + + # Clear locally used variables. + art_target_cflags_ndebug_or_debug := +endef + +# Support for disabling certain builds. +ART_BUILD_TARGET := false +ART_BUILD_HOST := false +ART_BUILD_NDEBUG := false +ART_BUILD_DEBUG := false +ifeq ($(ART_BUILD_TARGET_NDEBUG),true) + ART_BUILD_TARGET := true + ART_BUILD_NDEBUG := true +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),true) + ART_BUILD_TARGET := true + ART_BUILD_DEBUG := true +endif +ifeq ($(ART_BUILD_HOST_NDEBUG),true) + ART_BUILD_HOST := true + ART_BUILD_NDEBUG := true +endif +ifeq ($(ART_BUILD_HOST_DEBUG),true) + ART_BUILD_HOST := true + ART_BUILD_DEBUG := true +endif + +endif # ART_ANDROID_COMMON_BUILD_MK diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk new file mode 100644 index 000000000..7be189468 --- /dev/null +++ b/build/Android.common_path.mk @@ -0,0 +1,102 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_PATH_MK +ART_ANDROID_COMMON_PATH_MK := true + +include art/build/Android.common.mk + +# Directory used for dalvik-cache on device. +ART_TARGET_DALVIK_CACHE_DIR := /data/dalvik-cache + +# Directory used for gtests on device. +# $(TARGET_OUT_DATA_NATIVE_TESTS) will evaluate to the nativetest directory in the target part on +# the host, so we can strip everything but the directory to find out whether it is "nativetest" or +# "nativetest64." +ART_TARGET_NATIVETEST_DIR := /data/$(notdir $(TARGET_OUT_DATA_NATIVE_TESTS))/art + +ART_TARGET_NATIVETEST_OUT := $(TARGET_OUT_DATA_NATIVE_TESTS)/art + +# Directory used for oat tests on device. +ART_TARGET_TEST_DIR := /data/art-test +ART_TARGET_TEST_OUT := $(TARGET_OUT_DATA)/art-test + +# Directory used for temporary test files on the host. +ifneq ($(TMPDIR),) +ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID) +else +ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo $$PPID) +endif + +# core.oat location on the device. +TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$(DEX2OAT_TARGET_ARCH)/core.oat +ifdef TARGET_2ND_ARCH +2ND_TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core.oat +endif + +CORE_OAT_SUFFIX := .oat + +# core.oat locations under the out directory. +HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core +ifneq ($(HOST_PREFER_32_BIT),true) +2ND_HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core +endif +HOST_CORE_OAT_OUTS := +TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core +ifdef TARGET_2ND_ARCH +2ND_TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core +endif +TARGET_CORE_OAT_OUTS := + +CORE_IMG_SUFFIX := .art + +# core.art locations under the out directory. +HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core +ifneq ($(HOST_PREFER_32_BIT),true) +2ND_HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core +endif +HOST_CORE_IMG_OUTS := +TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core +ifdef TARGET_2ND_ARCH +2ND_TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core +endif +TARGET_CORE_IMG_OUTS := + +# Oat location of core.art. +HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art +TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art + +# Jar files for core.art. +TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle apache-xml +HOST_CORE_JARS := $(addsuffix -hostdex,$(TARGET_CORE_JARS)) + +HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) +ifeq ($(ART_TEST_ANDROID_ROOT),) +TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar) +else +TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar) +endif + +HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) +TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) + +# Classpath for Jack compilation: we only need core-libart. +HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack +HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack) +TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack +TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack) + +endif # ART_ANDROID_COMMON_PATH_MK diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk new file mode 100644 index 000000000..df7df261a --- /dev/null +++ b/build/Android.common_test.mk @@ -0,0 +1,226 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_TEST_MK +ART_ANDROID_COMMON_TEST_MK = true + +include art/build/Android.common_path.mk + +# We need to set a define for the nativetest dir so that common_runtime_test will know the right +# path. (The problem is being a 32b test on 64b device, which is still located in nativetest64). +ART_TARGET_CFLAGS += -DART_TARGET_NATIVETEST_DIR=${ART_TARGET_NATIVETEST_DIR} + +# List of known broken tests that we won't attempt to execute. The test name must be the full +# rule name such as test-art-host-oat-optimizing-HelloWorld64. +ART_TEST_KNOWN_BROKEN := + +# List of run-tests to skip running in any configuration. This needs to be the full name of the +# run-test such as '457-regs'. +ART_TEST_RUN_TEST_SKIP ?= + +# Failing valgrind tests. +# Note: *all* 64b tests involving the runtime do not work currently. b/15170219. + +# List of known failing tests that when executed won't cause test execution to not finish. +# The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64. +ART_TEST_KNOWN_FAILING := + +# Keep going after encountering a test failure? +ART_TEST_KEEP_GOING ?= true + +# Do you want all tests, even those that are time consuming? +ART_TEST_FULL ?= false + +# Do you want run-test to be quieter? run-tests will only show output if they fail. +ART_TEST_QUIET ?= true + +# Do you want interpreter tests run? +ART_TEST_INTERPRETER ?= $(ART_TEST_FULL) +ART_TEST_INTERPRETER_ACCESS_CHECKS ?= $(ART_TEST_FULL) + +# Do you want JIT tests run? +ART_TEST_JIT ?= $(ART_TEST_FULL) + +# Do you want optimizing compiler tests run? +ART_TEST_OPTIMIZING ?= true + +# Do we want to test a PIC-compiled core image? +ART_TEST_PIC_IMAGE ?= $(ART_TEST_FULL) + +# Do we want to test PIC-compiled tests ("apps")? +ART_TEST_PIC_TEST ?= $(ART_TEST_FULL) + +# Do you want tracing tests run? +ART_TEST_TRACE ?= $(ART_TEST_FULL) + +# Do you want tracing tests (streaming mode) run? +ART_TEST_TRACE_STREAM ?= $(ART_TEST_FULL) + +# Do you want tests with GC verification enabled run? +ART_TEST_GC_VERIFY ?= $(ART_TEST_FULL) + +# Do you want tests with the GC stress mode enabled run? +ART_TEST_GC_STRESS ?= $(ART_TEST_FULL) + +# Do you want tests with the JNI forcecopy mode enabled run? +ART_TEST_JNI_FORCECOPY ?= $(ART_TEST_FULL) + +# Do you want run-tests with relocation disabled run? +ART_TEST_RUN_TEST_NO_RELOCATE ?= $(ART_TEST_FULL) + +# Do you want run-tests with prebuilding? +ART_TEST_RUN_TEST_PREBUILD ?= true + +# Do you want run-tests with no prebuilding enabled run? +ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL) + +# Do you want run-tests without a pregenerated core.art? +ART_TEST_RUN_TEST_NO_IMAGE ?= $(ART_TEST_FULL) + +# Do you want run-tests with relocation enabled but patchoat failing? +ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT ?= $(ART_TEST_FULL) + +# Do you want run-tests without a dex2oat? +ART_TEST_RUN_TEST_NO_DEX2OAT ?= $(ART_TEST_FULL) + +# Do you want run-tests with libartd.so? +ART_TEST_RUN_TEST_DEBUG ?= true + +# Do you want run-tests with libart.so? +ART_TEST_RUN_TEST_NDEBUG ?= $(ART_TEST_FULL) + +# Do you want run-tests with the host/target's second arch? +ART_TEST_RUN_TEST_2ND_ARCH ?= true + +# Do you want failed tests to have their artifacts cleaned up? +ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true + +# Do you want run-tests with the --debuggable flag +ART_TEST_RUN_TEST_DEBUGGABLE ?= $(ART_TEST_FULL) + +# Do you want to test multi-part boot-image functionality? +ART_TEST_RUN_TEST_MULTI_IMAGE ?= $(ART_TEST_FULL) + +# Define the command run on test failure. $(1) is the name of the test. Executed by the shell. +define ART_TEST_FAILED + ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \ + (mkdir -p $(ART_HOST_TEST_DIR)/failed/ && touch $(ART_HOST_TEST_DIR)/failed/$(1) && \ + echo $(ART_TEST_KNOWN_FAILING) | grep -q $(1) \ + && (echo -e "$(1) \e[91mKNOWN FAILURE\e[0m") \ + || (echo -e "$(1) \e[91mFAILED\e[0m" >&2 ))) +endef + +ifeq ($(ART_TEST_QUIET),true) + ART_TEST_ANNOUNCE_PASS := ( true ) + ART_TEST_ANNOUNCE_RUN := ( true ) + ART_TEST_ANNOUNCE_SKIP_FAILURE := ( true ) + ART_TEST_ANNOUNCE_SKIP_BROKEN := ( true ) +else + # Note the use of '=' and not ':=' is intentional since these are actually functions. + ART_TEST_ANNOUNCE_PASS = ( echo -e "$(1) \e[92mPASSED\e[0m" ) + ART_TEST_ANNOUNCE_RUN = ( echo -e "$(1) \e[95mRUNNING\e[0m") + ART_TEST_ANNOUNCE_SKIP_FAILURE = ( echo -e "$(1) \e[93mSKIPPING DUE TO EARLIER FAILURE\e[0m" ) + ART_TEST_ANNOUNCE_SKIP_BROKEN = ( echo -e "$(1) \e[93mSKIPPING BROKEN TEST\e[0m" ) +endif + +# Define the command run on test success. $(1) is the name of the test. Executed by the shell. +# The command checks prints "PASSED" then checks to see if this was a top-level make target (e.g. +# "mm test-art-host-oat-HelloWorld32"), if it was then it does nothing, otherwise it creates a file +# to be printed in the passing test summary. +define ART_TEST_PASSED + ( $(call ART_TEST_ANNOUNCE_PASS,$(1)) && \ + (echo $(MAKECMDGOALS) | grep -q $(1) || \ + (mkdir -p $(ART_HOST_TEST_DIR)/passed/ && touch $(ART_HOST_TEST_DIR)/passed/$(1)))) +endef + +# Define the command run on test success of multiple prerequisites. $(1) is the name of the test. +# When the test is a top-level make target then a summary of the ran tests is produced. Executed by +# the shell. +define ART_TEST_PREREQ_FINISHED + (echo -e "$(1) \e[32mCOMPLETE\e[0m" && \ + (echo $(MAKECMDGOALS) | grep -q -v $(1) || \ + (([ -d $(ART_HOST_TEST_DIR)/passed/ ] \ + && (echo -e "\e[92mPASSING TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/passed/) \ + || (echo -e "\e[91mNO TESTS PASSED\e[0m")) && \ + ([ -d $(ART_HOST_TEST_DIR)/skipped/ ] \ + && (echo -e "\e[93mSKIPPED TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/skipped/) \ + || (echo -e "\e[92mNO TESTS SKIPPED\e[0m")) && \ + ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \ + && (echo -e "\e[91mFAILING TESTS\e[0m" >&2 && ls -1 $(ART_HOST_TEST_DIR)/failed/ >&2) \ + || (echo -e "\e[92mNO TESTS FAILED\e[0m")) \ + && ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] && rm -r $(ART_HOST_TEST_DIR) \ + || (rm -r $(ART_HOST_TEST_DIR) && false))))) +endef + +# Define the command executed by the shell ahead of running an art test. $(1) is the name of the +# test. +define ART_TEST_SKIP + ((echo $(ART_TEST_KNOWN_BROKEN) | grep -q -v $(1) \ + && ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] || [ $(ART_TEST_KEEP_GOING) = true ])\ + && $(call ART_TEST_ANNOUNCE_RUN,$(1)) ) \ + || ((mkdir -p $(ART_HOST_TEST_DIR)/skipped/ && touch $(ART_HOST_TEST_DIR)/skipped/$(1) \ + && ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \ + && $(call ART_TEST_ANNOUNCE_SKIP_FAILURE,$(1)) ) \ + || $(call ART_TEST_ANNOUNCE_SKIP_BROKEN,$(1)) ) && false)) +endef + +# Create a build rule to create the dex file for a test. +# $(1): module prefix, e.g. art-test-dex +# $(2): input test directory in art/test, e.g. HelloWorld +# $(3): target output module path (default module path is used on host) +# $(4): additional dependencies +# $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX +# $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX +# +# If the input test directory contains a file called main.list and main.jpp, +# then a multi-dex file is created passing main.list as the --main-dex-list +# argument to dx and main.jpp for Jack. +define build-art-test-dex + ifeq ($(ART_BUILD_TARGET),true) + include $(CLEAR_VARS) + LOCAL_MODULE := $(1)-$(2) + LOCAL_SRC_FILES := $(call all-java-files-under, $(2)) + LOCAL_NO_STANDARD_LIBRARIES := true + LOCAL_DEX_PREOPT := false + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4) + LOCAL_MODULE_TAGS := tests + LOCAL_JAVA_LIBRARIES := $(TARGET_CORE_JARS) + LOCAL_MODULE_PATH := $(3) + LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT) + ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp + endif + include $(BUILD_JAVA_LIBRARY) + $(5) := $$(LOCAL_INSTALLED_MODULE) + endif + ifeq ($(ART_BUILD_HOST),true) + include $(CLEAR_VARS) + LOCAL_MODULE := $(1)-$(2) + LOCAL_SRC_FILES := $(call all-java-files-under, $(2)) + LOCAL_NO_STANDARD_LIBRARIES := true + LOCAL_DEX_PREOPT := false + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4) + LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS) + LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION) + ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp + endif + include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) + $(6) := $$(LOCAL_INSTALLED_MODULE) + endif +endef + +endif # ART_ANDROID_COMMON_TEST_MK diff --git a/build/Android.common_utils.mk b/build/Android.common_utils.mk new file mode 100644 index 000000000..8069c3a9b --- /dev/null +++ b/build/Android.common_utils.mk @@ -0,0 +1,26 @@ +# +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ifndef ART_ANDROID_COMMON_UTILS_MK +ART_ANDROID_COMMON_UTILS_MK = true + +# +# Convert a string into an uppercase string. +# +# $(1): a string which should be made uppercase +art-string-to-uppercase = $(shell echo $(1) | tr '[:lower:]' '[:upper:]') + +endif # ART_ANDROID_COMMON_UTILS_MK diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk new file mode 100644 index 000000000..a06f45a4f --- /dev/null +++ b/build/Android.cpplint.mk @@ -0,0 +1,61 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include art/build/Android.common_build.mk + +ART_CPPLINT := $(LOCAL_PATH)/tools/cpplint.py +ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf +ART_CPPLINT_FLAGS := --quiet +# This: +# 1) Gets a list of all .h & .cc files in the art directory. +# 2) Prepends 'art/' to each of them to make the full name. +# 3) removes art/runtime/elf.h from the list. +ART_CPPLINT_SRC := $(filter-out $(LOCAL_PATH)/runtime/elf.h, $(addprefix $(LOCAL_PATH)/, $(call all-subdir-named-files,*.h) $(call all-subdir-named-files,*$(ART_CPP_EXTENSION)))) + +# "mm cpplint-art" to verify we aren't regressing +.PHONY: cpplint-art +cpplint-art: + $(ART_CPPLINT) $(ART_CPPLINT_FILTER) $(ART_CPPLINT_SRC) + +# "mm cpplint-art-all" to see all warnings +.PHONY: cpplint-art-all +cpplint-art-all: + $(ART_CPPLINT) $(ART_CPPLINT_SRC) + +OUT_CPPLINT := $(TARGET_COMMON_OUT_ROOT)/cpplint + +ART_CPPLINT_TARGETS := + +define declare-art-cpplint-target +art_cpplint_file := $(1) +art_cpplint_touch := $$(OUT_CPPLINT)/$$(subst /,__,$$(art_cpplint_file)) + +$$(art_cpplint_touch): $$(art_cpplint_file) $(ART_CPPLINT) art/build/Android.cpplint.mk + $(hide) $(ART_CPPLINT) $(ART_CPPLINT_FLAGS) $(ART_CPPLINT_FILTER) $$< + $(hide) mkdir -p $$(dir $$@) + $(hide) touch $$@ + +ART_CPPLINT_TARGETS += $$(art_cpplint_touch) +endef + +$(foreach file, $(ART_CPPLINT_SRC), $(eval $(call declare-art-cpplint-target,$(file)))) +#$(info $(call declare-art-cpplint-target,$(firstword $(ART_CPPLINT_SRC)))) + +include $(CLEAR_VARS) +LOCAL_MODULE := cpplint-art-phony +LOCAL_MODULE_TAGS := optional +LOCAL_ADDITIONAL_DEPENDENCIES := $(ART_CPPLINT_TARGETS) +include $(BUILD_PHONY_PACKAGE) diff --git a/build/Android.executable.mk b/build/Android.executable.mk new file mode 100644 index 000000000..cb6d34058 --- /dev/null +++ b/build/Android.executable.mk @@ -0,0 +1,255 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include art/build/Android.common_build.mk + +ART_HOST_EXECUTABLES ?= +ART_TARGET_EXECUTABLES ?= + +ART_EXECUTABLES_CFLAGS := + +# $(1): executable ("d" will be appended for debug version) +# $(2): source +# $(3): extra shared libraries +# $(4): extra include directories +# $(5): target or host +# $(6): ndebug or debug +# $(7): value for LOCAL_MULTILIB (empty means default) +# $(8): static or shared (empty means shared, applies only for host) +define build-art-executable + ifneq ($(5),target) + ifneq ($(5),host) + $$(error expected target or host for argument 5, received $(5)) + endif + endif + ifneq ($(6),ndebug) + ifneq ($(6),debug) + $$(error expected ndebug or debug for argument 6, received $(6)) + endif + endif + + art_executable := $(1) + art_source := $(2) + art_libraries := $(3) + art_c_includes := $(4) + art_target_or_host := $(5) + art_ndebug_or_debug := $(6) + art_multilib := $(7) + art_static_or_shared := $(8) + art_out_binary_name := + + include $(CLEAR_VARS) + LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) + LOCAL_MODULE_TAGS := optional + LOCAL_SRC_FILES := $$(art_source) + LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime art/cmdline $$(art_c_includes) + + ifeq ($$(art_static_or_shared),static) + LOCAL_STATIC_LIBRARIES += $$(art_libraries) + else + LOCAL_SHARED_LIBRARIES += $$(art_libraries) + endif + + ifeq ($$(art_ndebug_or_debug),ndebug) + LOCAL_MODULE := $$(art_executable) + else #debug + LOCAL_MODULE := $$(art_executable)d + endif + + ifeq ($$(art_static_or_shared),static) + LOCAL_MODULE := $(LOCAL_MODULE)s + endif + + LOCAL_CFLAGS := $(ART_EXECUTABLES_CFLAGS) + # Mac OS linker doesn't understand --export-dynamic. + ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host) + LOCAL_LDFLAGS := -Wl,--export-dynamic + endif + + ifeq ($$(art_target_or_host),target) + $(call set-target-local-clang-vars) + $(call set-target-local-cflags-vars,$(6)) + LOCAL_SHARED_LIBRARIES += libdl + else # host + LOCAL_CLANG := $(ART_HOST_CLANG) + LOCAL_LDLIBS := $(ART_HOST_LDLIBS) + LOCAL_CFLAGS += $(ART_HOST_CFLAGS) + LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS) + ifeq ($$(art_ndebug_or_debug),debug) + LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS) + else + LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS) + endif + LOCAL_LDLIBS += -lpthread -ldl + ifeq ($$(art_static_or_shared),static) + LOCAL_LDFLAGS += -static + # We need this because GC stress mode makes use of _Unwind_GetIP and _Unwind_Backtrace and + # the symbols are also defined in libgcc_eh.a(unwind-dw2.o) + # TODO: Having this is not ideal as it might obscure errors. Try to get rid of it. + LOCAL_LDFLAGS += -z muldefs + ifeq ($$(HOST_OS),linux) + LOCAL_LDLIBS += -lrt -lncurses -ltinfo + endif + ifeq ($$(HOST_OS),darwin) + LOCAL_LDLIBS += -lncurses -ltinfo + endif + endif + + endif + + # If dynamically linked add libart by default. Statically linked executables + # needs to specify it in art_libraries to ensure proper ordering. + ifeq ($$(art_ndebug_or_debug),ndebug) + ifneq ($$(art_static_or_shared),static) + LOCAL_SHARED_LIBRARIES += libart + endif + else # debug + ifneq ($$(art_static_or_shared),static) + LOCAL_SHARED_LIBRARIES += libartd + endif + endif + + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk + LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.common_utils.mk + LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.executable.mk + + ifeq ($$(art_target_or_host),target) + LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH) + endif + + ifdef ART_MULTILIB_OVERRIDE_$$(art_target_or_host) + art_multilib := $$(ART_MULTILIB_OVERRIDE_$$(art_target_or_host)) + endif + + LOCAL_MULTILIB := $$(art_multilib) + art_out_binary_name := $$(LOCAL_MODULE) + + # If multilib=both (potentially building both 32-bit and 64-bit), need to provide stem. + ifeq ($$(art_multilib),both) + # Set up a 32-bit/64-bit stem if we are building both binaries. + # In this case, the 32-bit binary has an additional 32-bit suffix. + LOCAL_MODULE_STEM_32 := $$(LOCAL_MODULE)32 + LOCAL_MODULE_STEM_64 := $$(LOCAL_MODULE) + + # Remember the binary names so we can add them to the global art executables list later. + art_out_binary_name := $$(LOCAL_MODULE_STEM_32) $$(LOCAL_MODULE_STEM_64) + + # For single-architecture targets, remove any binary name suffixes. + ifeq ($$(art_target_or_host),target) + ifeq (,$(TARGET_2ND_ARCH)) + LOCAL_MODULE_STEM_32 := $$(LOCAL_MODULE) + art_out_binary_name := $$(LOCAL_MODULE) + endif + endif + + # For single-architecture hosts, remove any binary name suffixes. + ifeq ($$(art_target_or_host),host) + ifeq (,$(HOST_2ND_ARCH)) + LOCAL_MODULE_STEM_32 := $$(LOCAL_MODULE) + art_out_binary_name := $$(LOCAL_MODULE) + endif + endif + endif + + LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE) + + ifeq ($$(art_target_or_host),target) + include $(BUILD_EXECUTABLE) + ART_TARGET_EXECUTABLES := $(ART_TARGET_EXECUTABLES) $$(foreach name,$$(art_out_binary_name),$(TARGET_OUT_EXECUTABLES)/$$(name)) + else # host + LOCAL_IS_HOST_MODULE := true + include $(BUILD_HOST_EXECUTABLE) + ART_HOST_EXECUTABLES := $(ART_HOST_EXECUTABLES) $$(foreach name,$$(art_out_binary_name),$(HOST_OUT_EXECUTABLES)/$$(name)) + endif + + # Clear out local variables now that we're done with them. + art_executable := + art_source := + art_libraries := + art_c_includes := + art_target_or_host := + art_ndebug_or_debug := + art_multilib := + art_static_or_shared := + art_out_binary_name := + +endef + +# +# Build many art executables from multiple variations (debug/ndebug, host/target, 32/64bit). +# By default only either 32-bit or 64-bit is built (but not both -- see multilib arg). +# All other variations are gated by ANDROID_BUILD_(TARGET|HOST)_[N]DEBUG. +# The result must be eval-uated. +# +# $(1): executable name +# $(2): source files +# $(3): library dependencies (common); debug prefix is added on as necessary automatically. +# $(4): library dependencies (target only) +# $(5): library dependencies (host only) +# $(6): extra include directories +# $(7): multilib (default: empty), valid values: {,32,64,both}) +# $(8): host prefer 32-bit: {true, false} (default: false). If argument +# `multilib` is explicitly set to 64, ignore the "host prefer 32-bit" +# setting and only build a 64-bit executable on host. +define build-art-multi-executable + $(foreach debug_flavor,ndebug debug, + $(foreach target_flavor,host target, + art-multi-binary-name := $(1) + art-multi-source-files := $(2) + art-multi-lib-dependencies := $(3) + art-multi-lib-dependencies-target := $(4) + art-multi-lib-dependencies-host := $(5) + art-multi-include-extra := $(6) + art-multi-multilib := $(7) + art-multi-host-prefer-32-bit := $(8) + + # Add either -host or -target specific lib dependencies to the lib dependencies. + art-multi-lib-dependencies += $$(art-multi-lib-dependencies-$(target_flavor)) + + # Replace libart- prefix with libartd- for debug flavor. + ifeq ($(debug_flavor),debug) + art-multi-lib-dependencies := $$(subst libart-,libartd-,$$(art-multi-lib-dependencies)) + endif + + # Build the env guard var name, e.g. ART_BUILD_HOST_NDEBUG. + art-multi-env-guard := $$(call art-string-to-uppercase,ART_BUILD_$(target_flavor)_$(debug_flavor)) + + ifeq ($(target_flavor),host) + ifeq ($$(art-multi-host-prefer-32-bit),true) + ifneq ($$(art-multi-multilib),64) + art-multi-multilib := 32 + endif + endif + endif + + # Build the art executable only if the corresponding env guard was set. + ifeq ($$($$(art-multi-env-guard)),true) + $$(eval $$(call build-art-executable,$$(art-multi-binary-name),$$(art-multi-source-files),$$(art-multi-lib-dependencies),$$(art-multi-include-extra),$(target_flavor),$(debug_flavor),$$(art-multi-multilib))) + endif + + # Clear locals now they've served their purpose. + art-multi-binary-name := + art-multi-source-files := + art-multi-lib-dependencies := + art-multi-lib-dependencies-target := + art-multi-lib-dependencies-host := + art-multi-include-extra := + art-multi-multilib := + art-multi-host-prefer-32-bit := + art-multi-env-guard := + ) + ) +endef diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk new file mode 100644 index 000000000..19af14de1 --- /dev/null +++ b/build/Android.gtest.mk @@ -0,0 +1,786 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# The path for which all the dex files are relative, not actually the current directory. +LOCAL_PATH := art/test + +include art/build/Android.common_test.mk +include art/build/Android.common_path.mk +include art/build/Android.common_build.mk + +# Subdirectories in art/test which contain dex files used as inputs for gtests. +GTEST_DEX_DIRECTORIES := \ + AbstractMethod \ + AllFields \ + ExceptionHandle \ + GetMethodSignature \ + ImageLayoutA \ + ImageLayoutB \ + Instrumentation \ + Interfaces \ + Lookup \ + Main \ + MultiDex \ + MultiDexModifiedSecondary \ + MyClass \ + MyClassNatives \ + Nested \ + NonStaticLeafMethods \ + Packages \ + ProtoCompare \ + ProtoCompare2 \ + ProfileTestMultiDex \ + StaticLeafMethods \ + Statics \ + StaticsFromCode \ + Transaction \ + XandY + +# Create build rules for each dex file recording the dependency. +$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-gtest,$(dir), \ + $(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_TEST_TARGET_GTEST_$(dir)_DEX, \ + ART_TEST_HOST_GTEST_$(dir)_DEX))) + +# Create rules for MainStripped, a copy of Main with the classes.dex stripped +# for the oat file assistant tests. +ART_TEST_HOST_GTEST_MainStripped_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) +ART_TEST_TARGET_GTEST_MainStripped_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) + +$(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) + cp $< $@ + $(call dexpreopt-remove-classes.dex,$@) + +$(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) + cp $< $@ + $(call dexpreopt-remove-classes.dex,$@) + +# Dex file dependencies for each gtest. +ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested + +ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode +ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex +ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages +ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested +ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) +ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle +ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB +ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation +ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives +ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods +ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) +ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex +ART_GTEST_oat_test_DEX_DEPS := Main +ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY +ART_GTEST_proxy_test_DEX_DEPS := Interfaces +ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods +ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex +ART_GTEST_profile_compilation_info_test_DEX_DEPS := ProfileTestMultiDex +ART_GTEST_stub_test_DEX_DEPS := AllFields +ART_GTEST_transaction_test_DEX_DEPS := Transaction +ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup + +# The elf writer test has dependencies on core.oat. +ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) +ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_default_no-pic_64) $(TARGET_CORE_IMAGE_default_no-pic_32) + +ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) +ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) + +ART_GTEST_oat_file_assistant_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \ + $(HOST_OUT_EXECUTABLES)/patchoatd +ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \ + $(TARGET_OUT_EXECUTABLES)/patchoatd + + +ART_GTEST_dex2oat_test_HOST_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) +ART_GTEST_dex2oat_test_TARGET_DEPS := \ + $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) + +# TODO: document why this is needed. +ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32) + +# The dexdump test requires an image and the dexdump utility. +# TODO: rename into dexdump when migration completes +ART_GTEST_dexdump_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/dexdump2 +ART_GTEST_dexdump_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) \ + dexdump2 + +# The dexlist test requires an image and the dexlist utility. +ART_GTEST_dexlist_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/dexlist +ART_GTEST_dexlist_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) \ + dexlist + +# The imgdiag test has dependencies on core.oat since it needs to load it during the test. +# For the host, also add the installed tool (in the base size, that should suffice). For the +# target, just the module is fine, the sync will happen late enough. +ART_GTEST_imgdiag_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/imgdiagd +ART_GTEST_imgdiag_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) \ + imgdiagd + +# Oatdump test requires an image and oatfile to dump. +ART_GTEST_oatdump_test_HOST_DEPS := \ + $(HOST_CORE_IMAGE_default_no-pic_64) \ + $(HOST_CORE_IMAGE_default_no-pic_32) \ + $(HOST_OUT_EXECUTABLES)/oatdumpd +ART_GTEST_oatdump_test_TARGET_DEPS := \ + $(TARGET_CORE_IMAGE_default_no-pic_64) \ + $(TARGET_CORE_IMAGE_default_no-pic_32) \ + oatdump + +# Profile assistant tests requires profman utility. +ART_GTEST_profile_assistant_test_HOST_DEPS := \ + $(HOST_OUT_EXECUTABLES)/profmand +ART_GTEST_profile_assistant_test_TARGET_DEPS := \ + profman + +# The path for which all the source files are relative, not actually the current directory. +LOCAL_PATH := art + +RUNTIME_GTEST_COMMON_SRC_FILES := \ + cmdline/cmdline_parser_test.cc \ + dexdump/dexdump_test.cc \ + dexlist/dexlist_test.cc \ + dex2oat/dex2oat_test.cc \ + imgdiag/imgdiag_test.cc \ + oatdump/oatdump_test.cc \ + profman/profile_assistant_test.cc \ + runtime/arch/arch_test.cc \ + runtime/arch/instruction_set_test.cc \ + runtime/arch/instruction_set_features_test.cc \ + runtime/arch/memcmp16_test.cc \ + runtime/arch/stub_test.cc \ + runtime/arch/arm/instruction_set_features_arm_test.cc \ + runtime/arch/arm64/instruction_set_features_arm64_test.cc \ + runtime/arch/mips/instruction_set_features_mips_test.cc \ + runtime/arch/mips64/instruction_set_features_mips64_test.cc \ + runtime/arch/x86/instruction_set_features_x86_test.cc \ + runtime/arch/x86_64/instruction_set_features_x86_64_test.cc \ + runtime/barrier_test.cc \ + runtime/base/arena_allocator_test.cc \ + runtime/base/bit_field_test.cc \ + runtime/base/bit_utils_test.cc \ + runtime/base/bit_vector_test.cc \ + runtime/base/hash_set_test.cc \ + runtime/base/hex_dump_test.cc \ + runtime/base/histogram_test.cc \ + runtime/base/mutex_test.cc \ + runtime/base/scoped_flock_test.cc \ + runtime/base/stringprintf_test.cc \ + runtime/base/time_utils_test.cc \ + runtime/base/timing_logger_test.cc \ + runtime/base/variant_map_test.cc \ + runtime/base/unix_file/fd_file_test.cc \ + runtime/class_linker_test.cc \ + runtime/compiler_filter_test.cc \ + runtime/dex_file_test.cc \ + runtime/dex_file_verifier_test.cc \ + runtime/dex_instruction_test.cc \ + runtime/dex_instruction_visitor_test.cc \ + runtime/dex_method_iterator_test.cc \ + runtime/entrypoints/math_entrypoints_test.cc \ + runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc \ + runtime/entrypoints_order_test.cc \ + runtime/gc/accounting/card_table_test.cc \ + runtime/gc/accounting/mod_union_table_test.cc \ + runtime/gc/accounting/space_bitmap_test.cc \ + runtime/gc/collector/immune_spaces_test.cc \ + runtime/gc/heap_test.cc \ + runtime/gc/reference_queue_test.cc \ + runtime/gc/space/dlmalloc_space_static_test.cc \ + runtime/gc/space/dlmalloc_space_random_test.cc \ + runtime/gc/space/large_object_space_test.cc \ + runtime/gc/space/rosalloc_space_static_test.cc \ + runtime/gc/space/rosalloc_space_random_test.cc \ + runtime/gc/space/space_create_test.cc \ + runtime/gc/task_processor_test.cc \ + runtime/gtest_test.cc \ + runtime/handle_scope_test.cc \ + runtime/indenter_test.cc \ + runtime/indirect_reference_table_test.cc \ + runtime/instrumentation_test.cc \ + runtime/intern_table_test.cc \ + runtime/interpreter/safe_math_test.cc \ + runtime/interpreter/unstarted_runtime_test.cc \ + runtime/java_vm_ext_test.cc \ + runtime/jit/profile_compilation_info_test.cc \ + runtime/lambda/closure_test.cc \ + runtime/lambda/shorty_field_type_test.cc \ + runtime/leb128_test.cc \ + runtime/mem_map_test.cc \ + runtime/memory_region_test.cc \ + runtime/mirror/dex_cache_test.cc \ + runtime/mirror/object_test.cc \ + runtime/monitor_pool_test.cc \ + runtime/monitor_test.cc \ + runtime/oat_file_test.cc \ + runtime/oat_file_assistant_test.cc \ + runtime/parsed_options_test.cc \ + runtime/prebuilt_tools_test.cc \ + runtime/reference_table_test.cc \ + runtime/thread_pool_test.cc \ + runtime/transaction_test.cc \ + runtime/type_lookup_table_test.cc \ + runtime/utf_test.cc \ + runtime/utils_test.cc \ + runtime/verifier/method_verifier_test.cc \ + runtime/verifier/reg_type_test.cc \ + runtime/zip_archive_test.cc + +COMPILER_GTEST_COMMON_SRC_FILES := \ + runtime/jni_internal_test.cc \ + runtime/proxy_test.cc \ + runtime/reflection_test.cc \ + compiler/compiled_method_test.cc \ + compiler/debug/dwarf/dwarf_test.cc \ + compiler/driver/compiled_method_storage_test.cc \ + compiler/driver/compiler_driver_test.cc \ + compiler/elf_writer_test.cc \ + compiler/exception_test.cc \ + compiler/image_test.cc \ + compiler/jni/jni_compiler_test.cc \ + compiler/linker/multi_oat_relative_patcher_test.cc \ + compiler/linker/output_stream_test.cc \ + compiler/oat_test.cc \ + compiler/optimizing/bounds_check_elimination_test.cc \ + compiler/optimizing/dominator_test.cc \ + compiler/optimizing/find_loops_test.cc \ + compiler/optimizing/graph_checker_test.cc \ + compiler/optimizing/graph_test.cc \ + compiler/optimizing/gvn_test.cc \ + compiler/optimizing/induction_var_analysis_test.cc \ + compiler/optimizing/induction_var_range_test.cc \ + compiler/optimizing/licm_test.cc \ + compiler/optimizing/live_interval_test.cc \ + compiler/optimizing/nodes_test.cc \ + compiler/optimizing/parallel_move_test.cc \ + compiler/optimizing/pretty_printer_test.cc \ + compiler/optimizing/reference_type_propagation_test.cc \ + compiler/optimizing/side_effects_test.cc \ + compiler/optimizing/ssa_test.cc \ + compiler/optimizing/stack_map_test.cc \ + compiler/optimizing/suspend_check_test.cc \ + compiler/utils/dedupe_set_test.cc \ + compiler/utils/intrusive_forward_list_test.cc \ + compiler/utils/swap_space_test.cc \ + compiler/utils/test_dex_file_builder_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_all := \ + compiler/jni/jni_cfi_test.cc \ + compiler/optimizing/codegen_test.cc \ + compiler/optimizing/constant_folding_test.cc \ + compiler/optimizing/dead_code_elimination_test.cc \ + compiler/optimizing/linearize_test.cc \ + compiler/optimizing/liveness_test.cc \ + compiler/optimizing/live_ranges_test.cc \ + compiler/optimizing/optimizing_cfi_test.cc \ + compiler/optimizing/register_allocator_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_arm := \ + compiler/linker/arm/relative_patcher_thumb2_test.cc \ + compiler/utils/arm/managed_register_arm_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_arm64 := \ + compiler/linker/arm64/relative_patcher_arm64_test.cc \ + compiler/utils/arm64/managed_register_arm64_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_mips := \ + +COMPILER_GTEST_COMMON_SRC_FILES_mips64 := \ + +COMPILER_GTEST_COMMON_SRC_FILES_x86 := \ + compiler/linker/x86/relative_patcher_x86_test.cc \ + compiler/utils/x86/managed_register_x86_test.cc \ + +COMPILER_GTEST_COMMON_SRC_FILES_x86_64 := \ + compiler/linker/x86_64/relative_patcher_x86_64_test.cc \ + +RUNTIME_GTEST_TARGET_SRC_FILES := \ + $(RUNTIME_GTEST_COMMON_SRC_FILES) + +RUNTIME_GTEST_HOST_SRC_FILES := \ + $(RUNTIME_GTEST_COMMON_SRC_FILES) + +COMPILER_GTEST_TARGET_SRC_FILES := \ + $(COMPILER_GTEST_COMMON_SRC_FILES) + +COMPILER_GTEST_TARGET_SRC_FILES_all := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_all) \ + +COMPILER_GTEST_TARGET_SRC_FILES_arm := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm) \ + +COMPILER_GTEST_TARGET_SRC_FILES_arm64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm64) \ + +COMPILER_GTEST_TARGET_SRC_FILES_mips := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ + +COMPILER_GTEST_TARGET_SRC_FILES_mips64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ + +COMPILER_GTEST_TARGET_SRC_FILES_x86 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \ + +COMPILER_GTEST_TARGET_SRC_FILES_x86_64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ + +$(foreach arch,$(ART_TARGET_CODEGEN_ARCHS),$(eval COMPILER_GTEST_TARGET_SRC_FILES += $$(COMPILER_GTEST_TARGET_SRC_FILES_$(arch)))) +ifeq (true,$(ART_TARGET_COMPILER_TESTS)) + COMPILER_GTEST_TARGET_SRC_FILES += $(COMPILER_GTEST_TARGET_SRC_FILES_all) +endif + +COMPILER_GTEST_HOST_SRC_FILES := \ + $(COMPILER_GTEST_COMMON_SRC_FILES) \ + +COMPILER_GTEST_HOST_SRC_FILES_all := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_all) \ + +COMPILER_GTEST_HOST_SRC_FILES_arm := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm) \ + compiler/utils/arm/assembler_arm32_test.cc \ + compiler/utils/arm/assembler_thumb2_test.cc \ + compiler/utils/assembler_thumb_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_arm64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_arm64) \ + +COMPILER_GTEST_HOST_SRC_FILES_mips := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips) \ + compiler/utils/mips/assembler_mips_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_mips64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \ + compiler/utils/mips64/assembler_mips64_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_x86 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86) \ + compiler/utils/x86/assembler_x86_test.cc \ + +COMPILER_GTEST_HOST_SRC_FILES_x86_64 := \ + $(COMPILER_GTEST_COMMON_SRC_FILES_x86_64) \ + compiler/utils/x86_64/assembler_x86_64_test.cc + +$(foreach arch,$(ART_HOST_CODEGEN_ARCHS),$(eval COMPILER_GTEST_HOST_SRC_FILES += $$(COMPILER_GTEST_HOST_SRC_FILES_$(arch)))) +ifeq (true,$(ART_HOST_COMPILER_TESTS)) + COMPILER_GTEST_HOST_SRC_FILES += $(COMPILER_GTEST_HOST_SRC_FILES_all) +endif + +ART_TEST_CFLAGS := + +include $(CLEAR_VARS) +LOCAL_MODULE := libart-gtest +LOCAL_MODULE_TAGS := optional +LOCAL_CPP_EXTENSION := cc +LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc +LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/cmdline art/compiler +LOCAL_SHARED_LIBRARIES := libartd libartd-compiler libdl +LOCAL_STATIC_LIBRARIES += libgtest +LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk +LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk +$(eval $(call set-target-local-clang-vars)) +$(eval $(call set-target-local-cflags-vars,debug)) +LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue +include $(BUILD_SHARED_LIBRARY) + +include $(CLEAR_VARS) +LOCAL_MODULE := libart-gtest +LOCAL_MODULE_TAGS := optional +LOCAL_CPP_EXTENSION := cc +LOCAL_CFLAGS := $(ART_HOST_CFLAGS) +LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) +LOCAL_SRC_FILES := runtime/common_runtime_test.cc compiler/common_compiler_test.cc +LOCAL_C_INCLUDES := $(ART_C_INCLUDES) art/runtime art/cmdline art/compiler +LOCAL_SHARED_LIBRARIES := libartd libartd-compiler +LOCAL_STATIC_LIBRARIES := libgtest_host +LOCAL_LDLIBS += -ldl -lpthread +LOCAL_MULTILIB := both +LOCAL_CLANG := $(ART_HOST_CLANG) +LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue +LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk +LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk +include $(BUILD_HOST_SHARED_LIBRARY) + +# Variables holding collections of gtest pre-requisits used to run a number of gtests. +ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST_RULES := +ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_VALGRIND_GTEST_RULES := +ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST_RULES := +ART_TEST_HOST_GTEST_DEPENDENCIES := + +ART_GTEST_TARGET_ANDROID_ROOT := '/system' +ifneq ($(ART_TEST_ANDROID_ROOT),) + ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT) +endif + +# Define a make rule for a target device gtest. +# $(1): gtest name - the name of the test we're building such as leb128_test. +# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +# $(3): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/ +define define-art-gtest-rule-target + gtest_rule := test-art-target-gtest-$(1)$$($(2)ART_PHONY_TEST_TARGET_SUFFIX) + + # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test + # to ensure files are pushed to the device. + TEST_ART_TARGET_SYNC_DEPS += \ + $$(ART_GTEST_$(1)_TARGET_DEPS) \ + $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \ + $$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \ + $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ + $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \ + $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \ + $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar + +.PHONY: $$(gtest_rule) +$$(gtest_rule): test-art-target-sync + $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID + $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID + $(hide) adb shell chmod 755 $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) + $(hide) $$(call ART_TEST_SKIP,$$@) && \ + (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ + $(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \ + && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \ + && $$(call ART_TEST_PASSED,$$@)) \ + || $$(call ART_TEST_FAILED,$$@)) + $(hide) rm -f /tmp/$$@-$$$$PPID + + ART_TEST_TARGET_GTEST$($(2)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule) + ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule) + ART_TEST_TARGET_GTEST_$(1)_RULES += $$(gtest_rule) + + # Clear locally defined variables. + gtest_rule := +endef # define-art-gtest-rule-target + +ART_VALGRIND_DEPENDENCIES := \ + $(HOST_OUT_EXECUTABLES)/valgrind \ + $(HOST_OUT)/lib64/valgrind/memcheck-amd64-linux \ + $(HOST_OUT)/lib64/valgrind/memcheck-x86-linux \ + $(HOST_OUT)/lib64/valgrind/default.supp \ + $(HOST_OUT)/lib64/valgrind/vgpreload_core-amd64-linux.so \ + $(HOST_OUT)/lib64/valgrind/vgpreload_core-x86-linux.so \ + $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-amd64-linux.so \ + $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-x86-linux.so + +# Define make rules for a host gtests. +# $(1): gtest name - the name of the test we're building such as leb128_test. +# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. +define define-art-gtest-rule-host + gtest_rule := test-art-host-gtest-$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX) + gtest_exe := $$(HOST_OUT_EXECUTABLES)/$(1)$$($(2)ART_PHONY_TEST_HOST_SUFFIX) + # Dependencies for all host gtests. + gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \ + $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \ + $$($(2)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \ + $$(gtest_exe) \ + $$(ART_GTEST_$(1)_HOST_DEPS) \ + $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) + + ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps) + +.PHONY: $$(gtest_rule) +$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) + $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && $$(call ART_TEST_PASSED,$$@)) \ + || $$(call ART_TEST_FAILED,$$@) + + ART_TEST_HOST_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule) + ART_TEST_HOST_GTEST_RULES += $$(gtest_rule) + ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule) + + +.PHONY: valgrind-$$(gtest_rule) +valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIES) + $(hide) $$(call ART_TEST_SKIP,$$@) && \ + VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \ + $(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \ + --suppressions=art/test/valgrind-suppressions.txt $$< && \ + $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@) + + ART_TEST_HOST_VALGRIND_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule) + ART_TEST_HOST_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule) + ART_TEST_HOST_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule) + + # Clear locally defined variables. + valgrind_gtest_rule := + gtest_rule := + gtest_exe := + gtest_deps := +endef # define-art-gtest-rule-host + +# Define the rules to build and run host and target gtests. +# $(1): target or host +# $(2): file name +# $(3): extra C includes +# $(4): extra shared libraries +define define-art-gtest + ifneq ($(1),target) + ifneq ($(1),host) + $$(error expected target or host for argument 1, received $(1)) + endif + endif + + art_target_or_host := $(1) + art_gtest_filename := $(2) + art_gtest_extra_c_includes := $(3) + art_gtest_extra_shared_libraries := $(4) + + include $$(CLEAR_VARS) + art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename))) + LOCAL_MODULE := $$(art_gtest_name) + ifeq ($$(art_target_or_host),target) + LOCAL_MODULE_TAGS := tests + endif + LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION) + LOCAL_SRC_FILES := $$(art_gtest_filename) + LOCAL_C_INCLUDES += $$(ART_C_INCLUDES) art/runtime art/cmdline $$(art_gtest_extra_c_includes) + LOCAL_SHARED_LIBRARIES += libartd $$(art_gtest_extra_shared_libraries) libart-gtest libartd-disassembler + LOCAL_WHOLE_STATIC_LIBRARIES += libsigchain + + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk + LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.gtest.mk + + # Mac OS linker doesn't understand --export-dynamic. + ifneq ($$(HOST_OS)-$$(art_target_or_host),darwin-host) + # Allow jni_compiler_test to find Java_MyClassNatives_bar within itself using dlopen(NULL, ...). + LOCAL_LDFLAGS := -Wl,--export-dynamic -Wl,-u,Java_MyClassNatives_bar -Wl,-u,Java_MyClassNatives_sbar + endif + + LOCAL_CFLAGS := $$(ART_TEST_CFLAGS) + ifeq ($$(art_target_or_host),target) + $$(eval $$(call set-target-local-clang-vars)) + $$(eval $$(call set-target-local-cflags-vars,debug)) + LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl + LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32) + LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64) + LOCAL_MULTILIB := both + LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue + include $$(BUILD_EXECUTABLE) + library_path := + 2nd_library_path := + ifneq ($$(ART_TEST_ANDROID_ROOT),) + ifdef TARGET_2ND_ARCH + 2nd_library_path := $$(ART_TEST_ANDROID_ROOT)/lib + library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 + else + ifneq ($(filter %64,$(TARGET_ARCH)),) + library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 + else + library_path := $$(ART_TEST_ANDROID_ROOT)/lib + endif + endif + endif + + ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := + ifdef TARGET_2ND_ARCH + $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),2ND_,$$(2nd_library_path))) + endif + $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),,$$(library_path))) + + # A rule to run the different architecture versions of the gtest. +.PHONY: test-art-target-gtest-$$(art_gtest_name) +test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES) + $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) + + # Clear locally defined variables. + ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := + else # host + LOCAL_CLANG := $$(ART_HOST_CLANG) + LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS) + LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS) + LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl + LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl + LOCAL_IS_HOST_MODULE := true + LOCAL_MULTILIB := both + LOCAL_MODULE_STEM_32 := $$(art_gtest_name)32 + LOCAL_MODULE_STEM_64 := $$(art_gtest_name)64 + LOCAL_CLANG_CFLAGS += -Wno-used-but-marked-unused -Wno-deprecated -Wno-missing-noreturn # gtest issue + include $$(BUILD_HOST_EXECUTABLE) + + ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := + ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := + ifneq ($$(HOST_PREFER_32_BIT),true) + $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),2ND_)) + endif + $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),)) + + # Rules to run the different architecture versions of the gtest. +.PHONY: test-art-host-gtest-$$(art_gtest_name) +test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES) + $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) + +.PHONY: valgrind-test-art-host-gtest-$$(art_gtest_name) +valgrind-test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES) + $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) + + # Clear locally defined variables. + ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := + ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := + endif # host_or_target + + # Clear locally defined variables. + art_target_or_host := + art_gtest_filename := + art_gtest_extra_c_includes := + art_gtest_extra_shared_libraries := + art_gtest_name := + library_path := + 2nd_library_path := +endef # define-art-gtest + + +ifeq ($(ART_BUILD_TARGET),true) + $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace))) + $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) +endif +ifeq ($(ART_BUILD_HOST),true) + $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,libbacktrace))) + $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) +endif + +# Used outside the art project to get a list of the current tests +RUNTIME_TARGET_GTEST_MAKE_TARGETS := +$(foreach file, $(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(basename $$(file))))) +COMPILER_TARGET_GTEST_MAKE_TARGETS := +$(foreach file, $(COMPILER_GTEST_TARGET_SRC_FILES), $(eval COMPILER_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(basename $$(file))))) + +# Define all the combinations of host/target, valgrind and suffix such as: +# test-art-host-gtest or valgrind-test-art-host-gtest64 +# $(1): host or target +# $(2): HOST or TARGET +# $(3): valgrind- or undefined +# $(4): undefined, 32 or 64 +define define-test-art-gtest-combination + ifeq ($(1),host) + ifneq ($(2),HOST) + $$(error argument mismatch $(1) and ($2)) + endif + else + ifneq ($(1),target) + $$(error found $(1) expected host or target) + endif + ifneq ($(2),TARGET) + $$(error argument mismatch $(1) and ($2)) + endif + endif + + rule_name := $(3)test-art-$(1)-gtest$(4) + ifeq ($(3),valgrind-) + ifneq ($(1),host) + $$(error valgrind tests only wired up for the host) + endif + dependencies := $$(ART_TEST_$(2)_VALGRIND_GTEST$(4)_RULES) + else + dependencies := $$(ART_TEST_$(2)_GTEST$(4)_RULES) + endif + +.PHONY: $$(rule_name) +$$(rule_name): $$(dependencies) + $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) + + # Clear locally defined variables. + rule_name := + dependencies := +endef # define-test-art-gtest-combination + +$(eval $(call define-test-art-gtest-combination,target,TARGET,,)) +$(eval $(call define-test-art-gtest-combination,target,TARGET,,$(ART_PHONY_TEST_TARGET_SUFFIX))) +ifdef TARGET_2ND_ARCH +$(eval $(call define-test-art-gtest-combination,target,TARGET,,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))) +endif +$(eval $(call define-test-art-gtest-combination,host,HOST,,)) +$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,)) +$(eval $(call define-test-art-gtest-combination,host,HOST,,$(ART_PHONY_TEST_HOST_SUFFIX))) +$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(ART_PHONY_TEST_HOST_SUFFIX))) +ifneq ($(HOST_PREFER_32_BIT),true) +$(eval $(call define-test-art-gtest-combination,host,HOST,,$(2ND_ART_PHONY_TEST_HOST_SUFFIX))) +$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(2ND_ART_PHONY_TEST_HOST_SUFFIX))) +endif + +# Clear locally defined variables. +define-art-gtest-rule-target := +define-art-gtest-rule-host := +define-art-gtest := +define-test-art-gtest-combination := +RUNTIME_GTEST_COMMON_SRC_FILES := +COMPILER_GTEST_COMMON_SRC_FILES := +RUNTIME_GTEST_TARGET_SRC_FILES := +RUNTIME_GTEST_HOST_SRC_FILES := +COMPILER_GTEST_TARGET_SRC_FILES := +COMPILER_GTEST_HOST_SRC_FILES := +ART_TEST_CFLAGS := +ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_GTEST_RULES := +ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := +ART_TEST_HOST_VALGRIND_GTEST_RULES := +ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := +ART_TEST_TARGET_GTEST_RULES := +ART_GTEST_TARGET_ANDROID_ROOT := +ART_GTEST_class_linker_test_DEX_DEPS := +ART_GTEST_compiler_driver_test_DEX_DEPS := +ART_GTEST_dex_file_test_DEX_DEPS := +ART_GTEST_exception_test_DEX_DEPS := +ART_GTEST_elf_writer_test_HOST_DEPS := +ART_GTEST_elf_writer_test_TARGET_DEPS := +ART_GTEST_jni_compiler_test_DEX_DEPS := +ART_GTEST_jni_internal_test_DEX_DEPS := +ART_GTEST_oat_file_assistant_test_DEX_DEPS := +ART_GTEST_oat_file_assistant_test_HOST_DEPS := +ART_GTEST_oat_file_assistant_test_TARGET_DEPS := +ART_GTEST_dex2oat_test_DEX_DEPS := +ART_GTEST_dex2oat_test_HOST_DEPS := +ART_GTEST_dex2oat_test_TARGET_DEPS := +ART_GTEST_object_test_DEX_DEPS := +ART_GTEST_proxy_test_DEX_DEPS := +ART_GTEST_reflection_test_DEX_DEPS := +ART_GTEST_stub_test_DEX_DEPS := +ART_GTEST_transaction_test_DEX_DEPS := +ART_GTEST_dex2oat_environment_tests_DEX_DEPS := +ART_VALGRIND_DEPENDENCIES := +$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=)) +$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=)) +ART_TEST_HOST_GTEST_MainStripped_DEX := +ART_TEST_TARGET_GTEST_MainStripped_DEX := +GTEST_DEX_DIRECTORIES := +LOCAL_PATH := diff --git a/build/Android.oat.mk b/build/Android.oat.mk new file mode 100644 index 000000000..884f698cd --- /dev/null +++ b/build/Android.oat.mk @@ -0,0 +1,315 @@ +# +# Copyright (C) 2011 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +######################################################################## +# Rules to build a smaller "core" image to support core libraries +# (that is, non-Android frameworks) testing on the host and target +# +# The main rules to build the default "boot" image are in +# build/core/dex_preopt_libart.mk + +include art/build/Android.common_build.mk + +LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := +ifeq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) + LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default +else + LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) +endif +LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := +ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) + LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default +else + LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) +endif + +# Use dex2oat debug version for better error reporting +# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. +# $(2): pic/no-pic +# $(3): 2ND_ or undefined, 2ND_ for 32-bit host builds. +# $(4): wrapper, e.g., valgrind. +# $(5): dex2oat suffix, e.g, valgrind requires 32 right now. +# $(6): multi-image. +# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for +# run-test --no-image +define create-core-oat-host-rules + core_compile_options := + core_image_name := + core_oat_name := + core_infix := + core_pic_infix := + core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY) + + ifeq ($(1),default) + core_compile_options += --compiler-backend=Quick + endif + ifeq ($(1),optimizing) + core_compile_options += --compiler-backend=Optimizing + core_dex2oat_dependency := $(DEX2OAT) + core_infix := -optimizing + endif + ifeq ($(1),interpreter) + core_compile_options += --compiler-filter=interpret-only + core_infix := -interpreter + endif + ifeq ($(1),interp-ac) + core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail + core_infix := -interp-ac + endif + ifeq ($(1),jit) + core_compile_options += --compiler-filter=verify-at-runtime + core_infix := -jit + endif + ifeq ($(1),default) + # Default has no infix, no compile options. + endif + ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) + #Technically this test is not precise, but hopefully good enough. + $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) + endif + + ifeq ($(2),pic) + core_compile_options += --compile-pic + core_pic_infix := -pic + endif + ifeq ($(2),no-pic) + # No change for non-pic + endif + ifneq ($(filter-out pic no-pic,$(2)),) + # Technically this test is not precise, but hopefully good enough. + $$(error found $(2) expected pic or no-pic) + endif + + # If $(6) is true, generate a multi-image. + ifeq ($(6),true) + core_multi_infix := -multi + core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar + core_multi_group := _multi + else + core_multi_infix := + core_multi_param := + core_multi_group := + endif + + core_image_name := $($(3)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$$(core_multi_infix)$(4)$(CORE_IMG_SUFFIX) + core_oat_name := $($(3)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$$(core_multi_infix)$(4)$(CORE_OAT_SUFFIX) + + # Using the bitness suffix makes it easier to add as a dependency for the run-test mk. + ifeq ($(3),) + $(4)HOST_CORE_IMAGE_$(1)_$(2)$$(core_multi_group)_64 := $$(core_image_name) + else + $(4)HOST_CORE_IMAGE_$(1)_$(2)$$(core_multi_group)_32 := $$(core_image_name) + endif + $(4)HOST_CORE_IMG_OUTS += $$(core_image_name) + $(4)HOST_CORE_OAT_OUTS += $$(core_oat_name) + + # If we have a wrapper, make the target phony. + ifneq ($(4),) +.PHONY: $$(core_image_name) + endif +$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) +$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) +$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) +$$(core_image_name): PRIVATE_CORE_MULTI_PARAM := $$(core_multi_param) +$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency) + @echo "host dex2oat: $$@" + @mkdir -p $$(dir $$@) + $$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ + --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ + --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \ + $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ + --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(3)ART_HOST_ARCH) \ + $$(LOCAL_$(3)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ + --host --android-root=$$(HOST_OUT) --include-patch-information --generate-debug-info \ + $$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS) + +$$(core_oat_name): $$(core_image_name) + + # Clean up locally used variables. + core_dex2oat_dependency := + core_compile_options := + core_image_name := + core_oat_name := + core_infix := + core_pic_infix := +endef # create-core-oat-host-rules + +# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. +# $(2): wrapper. +# $(3): dex2oat suffix. +# $(4): multi-image. +define create-core-oat-host-rule-combination + $(call create-core-oat-host-rules,$(1),no-pic,,$(2),$(3),$(4)) + $(call create-core-oat-host-rules,$(1),pic,,$(2),$(3),$(4)) + + ifneq ($(HOST_PREFER_32_BIT),true) + $(call create-core-oat-host-rules,$(1),no-pic,2ND_,$(2),$(3),$(4)) + $(call create-core-oat-host-rules,$(1),pic,2ND_,$(2),$(3),$(4)) + endif +endef + +$(eval $(call create-core-oat-host-rule-combination,default,,,false)) +$(eval $(call create-core-oat-host-rule-combination,optimizing,,,false)) +$(eval $(call create-core-oat-host-rule-combination,interpreter,,,false)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false)) +$(eval $(call create-core-oat-host-rule-combination,jit,,,false)) +$(eval $(call create-core-oat-host-rule-combination,default,,,true)) +$(eval $(call create-core-oat-host-rule-combination,optimizing,,,true)) +$(eval $(call create-core-oat-host-rule-combination,interpreter,,,true)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true)) +$(eval $(call create-core-oat-host-rule-combination,jit,,,true)) + +valgrindHOST_CORE_IMG_OUTS := +valgrindHOST_CORE_OAT_OUTS := +$(eval $(call create-core-oat-host-rule-combination,default,valgrind,32,false)) +$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false)) +$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false)) +$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false)) +$(eval $(call create-core-oat-host-rule-combination,jit,valgrind,32,false)) + +valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS) + +test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS) + +define create-core-oat-target-rules + core_compile_options := + core_image_name := + core_oat_name := + core_infix := + core_pic_infix := + core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY) + + ifeq ($(1),default) + core_compile_options += --compiler-backend=Quick + endif + ifeq ($(1),optimizing) + core_compile_options += --compiler-backend=Optimizing + # With the optimizing compiler, we want to rerun dex2oat whenever there is + # a dex2oat change to catch regressions early. + core_dex2oat_dependency := $(DEX2OAT) + core_infix := -optimizing + endif + ifeq ($(1),interpreter) + core_compile_options += --compiler-filter=interpret-only + core_infix := -interpreter + endif + ifeq ($(1),interp-ac) + core_compile_options += --compiler-filter=verify-at-runtime --runtime-arg -Xverify:softfail + core_infix := -interp-ac + endif + ifeq ($(1),jit) + core_compile_options += --compiler-filter=verify-at-runtime + core_infix := -jit + endif + ifeq ($(1),default) + # Default has no infix, no compile options. + endif + ifneq ($(filter-out default interpreter interp-ac jit optimizing,$(1)),) + # Technically this test is not precise, but hopefully good enough. + $$(error found $(1) expected default, interpreter, interpreter-access-checks, jit or optimizing) + endif + + ifeq ($(2),pic) + core_compile_options += --compile-pic + core_pic_infix := -pic + endif + ifeq ($(2),no-pic) + # No change for non-pic + endif + ifneq ($(filter-out pic no-pic,$(2)),) + #Technically this test is not precise, but hopefully good enough. + $$(error found $(2) expected pic or no-pic) + endif + + core_image_name := $($(3)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_IMG_SUFFIX) + core_oat_name := $($(3)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_pic_infix)$(4)$(CORE_OAT_SUFFIX) + + # Using the bitness suffix makes it easier to add as a dependency for the run-test mk. + ifeq ($(3),) + ifdef TARGET_2ND_ARCH + $(4)TARGET_CORE_IMAGE_$(1)_$(2)_64 := $$(core_image_name) + else + $(4)TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name) + endif + else + $(4)TARGET_CORE_IMAGE_$(1)_$(2)_32 := $$(core_image_name) + endif + $(4)TARGET_CORE_IMG_OUTS += $$(core_image_name) + $(4)TARGET_CORE_OAT_OUTS += $$(core_oat_name) + + # If we have a wrapper, make the target phony. + ifneq ($(4),) +.PHONY: $$(core_image_name) + endif +$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) +$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) +$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) +$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency) + @echo "target dex2oat: $$@" + @mkdir -p $$(dir $$@) + $$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ + --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ + --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \ + $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ + --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ + --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(3)TARGET_ARCH) \ + --instruction-set-variant=$$($(3)DEX2OAT_TARGET_CPU_VARIANT) \ + --instruction-set-features=$$($(3)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ + --android-root=$$(PRODUCT_OUT)/system --include-patch-information --generate-debug-info \ + $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1) + +$$(core_oat_name): $$(core_image_name) + + # Clean up locally used variables. + core_dex2oat_dependency := + core_compile_options := + core_image_name := + core_oat_name := + core_infix := + core_pic_infix := +endef # create-core-oat-target-rules + +# $(1): compiler - default, optimizing, jit, interpreter or interpreter-access-checks. +# $(2): wrapper. +# $(3): dex2oat suffix. +define create-core-oat-target-rule-combination + $(call create-core-oat-target-rules,$(1),no-pic,,$(2),$(3)) + $(call create-core-oat-target-rules,$(1),pic,,$(2),$(3)) + + ifdef TARGET_2ND_ARCH + $(call create-core-oat-target-rules,$(1),no-pic,2ND_,$(2),$(3)) + $(call create-core-oat-target-rules,$(1),pic,2ND_,$(2),$(3)) + endif +endef + +$(eval $(call create-core-oat-target-rule-combination,default,,)) +$(eval $(call create-core-oat-target-rule-combination,optimizing,,)) +$(eval $(call create-core-oat-target-rule-combination,interpreter,,)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac,,)) +$(eval $(call create-core-oat-target-rule-combination,jit,,)) + +valgrindTARGET_CORE_IMG_OUTS := +valgrindTARGET_CORE_OAT_OUTS := +$(eval $(call create-core-oat-target-rule-combination,default,valgrind,32)) +$(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32)) +$(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32)) +$(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32)) +$(eval $(call create-core-oat-target-rule-combination,jit,valgrind,32)) + +valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS) + +valgrind-test-art-host-dex2oat: valgrind-test-art-host-dex2oat-host valgrind-test-art-host-dex2oat-target diff --git a/cmdline/README.md b/cmdline/README.md new file mode 100644 index 000000000..8cac77f82 --- /dev/null +++ b/cmdline/README.md @@ -0,0 +1,245 @@ +Cmdline +=================== + +Introduction +------------- +This directory contains the classes that do common command line tool initialization and parsing. The +long term goal is eventually for all `art` command-line tools to be using these helpers. + +---------- + + +## Cmdline Parser +------------- + +The `CmdlineParser` class provides a fluent interface using a domain-specific language to quickly +generate a type-safe value parser that process a user-provided list of strings (`argv`). Currently, +it can parse a string into a `VariantMap`, although in the future it might be desirable to parse +into any struct of any field. + +To use, create a `CmdlineParser::Builder` and then chain the `Define` methods together with +`WithType` and `IntoXX` methods. + +### Quick Start +For example, to save the values into a user-defined variant map: + +``` +struct FruitVariantMap : VariantMap { + static const Key Apple; + static const Key Orange; + static const Key Help; +}; +// Note that some template boilerplate has been avoided for clarity. +// See variant_map_test.cc for how to completely define a custom map. + +using FruitParser = CmdlineParser; + +FruitParser MakeParser() { + auto&& builder = FruitParser::Builder(); + builder. + .Define("--help") + .IntoKey(FruitVariantMap::Help) + Define("--apple:_") + .WithType() + .IntoKey(FruitVariantMap::Apple) + .Define("--orange:_") + .WithType() + .WithRange(0.0, 1.0) + .IntoKey(FruitVariantMap::Orange); + + return builder.Build(); +} + +int main(char** argv, int argc) { + auto parser = MakeParser(); + auto result = parser.parse(argv, argc)); + if (result.isError()) { + std::cerr << result.getMessage() << std::endl; + return EXIT_FAILURE; + } + auto map = parser.GetArgumentsMap(); + std::cout << "Help? " << map.GetOrDefault(FruitVariantMap::Help) << std::endl; + std::cout << "Apple? " << map.GetOrDefault(FruitVariantMap::Apple) << std::endl; + std::cout << "Orange? " << map.GetOrDefault(FruitVariantMap::Orange) << std::endl; + + return EXIT_SUCCESS; +} +``` + +In the above code sample, we define a parser which is capable of parsing something like `--help +--apple:123 --orange:0.456` . It will error out automatically if invalid flags are given, or if the +appropriate flags are given but of the the wrong type/range. So for example, `--foo` will not parse +(invalid argument), neither will `--apple:fruit` (fruit is not an int) nor `--orange:1234` (1234 is +out of range of [0.0, 1.0]) + +### Argument Definitions in Detail +#### Define method +The 'Define' method takes one or more aliases for the argument. Common examples might be `{"-h", +"--help"}` where both `--help` and `-h` are aliases for the same argument. + +The simplest kind of argument just tests for presence, but we often want to parse out a particular +type of value (such as an int or double as in the above `FruitVariantMap` example). To do that, a +_wildcard_ must be used to denote the location within the token that the type will be parsed out of. + +For example with `-orange:_` the parse would know to check all tokens in an `argv` list for the +`-orange:` prefix and then strip it, leaving only the remains to be parsed. + +#### WithType method (optional) +After an argument definition is provided, the parser builder needs to know what type the argument +will be in order to provide the type safety and make sure the rest of the argument definition is +correct as early as possible (in essence, everything but the parsing of the argument name is done at +compile time). + +Everything that follows a `WithType()` call is thus type checked to only take `T` values. + +If this call is omitted, the parser generator assumes you are building a `Unit` type (i.e. an +argument that only cares about presence). + +#### WithRange method (optional) +Some values will not make sense outside of a `[min, max]` range, so this is an option to quickly add +a range check without writing custom code. The range check is performed after the main parsing +happens and happens for any type implementing the `<=` operators. + +#### WithValueMap (optional) +When parsing an enumeration, it might be very convenient to map a list of possible argument string +values into its runtime value. + +With something like +``` + .Define("-hello:_") + .WithValueMap({"world", kWorld}, + {"galaxy", kGalaxy}) +``` +It will parse either `-hello:world` or `-hello:galaxy` only (and error out on other variations of +`-hello:whatever`), converting it to the type-safe value of `kWorld` or `kGalaxy` respectively. + +This is meant to be another shorthand (like `WithRange`) to avoid writing a custom type parser. In +general it takes a variadic number of `pair`. + +#### WithValues (optional) +When an argument definition has multiple aliases with no wildcards, it might be convenient to +quickly map them into discrete values. + +For example: +``` + .Define({"-xinterpret", "-xnointerpret"}) + .WithValues({true, false} +``` +It will parse `-xinterpret` as `true` and `-xnointerpret` as `false`. + +In general, it uses the position of the argument alias to map into the WithValues position value. + +(Note that this method will not work when the argument definitions have a wildcard because there is +no way to position-ally match that). + +#### AppendValues (optional) +By default, the argument is assumed to appear exactly once, and if the user specifies it more than +once, only the latest value is taken into account (and all previous occurrences of the argument are +ignored). + +In some situations, we may want to accumulate the argument values instead of discarding the previous +ones. + +For example +``` + .Define("-D") + .WithType)() + .AppendValues() +``` +Will parse something like `-Dhello -Dworld -Dbar -Dbaz` into `std::vector{"hello", +"world", "bar", "baz"}`. + +### Setting an argument parse target (required) +To complete an argument definition, the parser generator also needs to know where to save values. +Currently, only `IntoKey` is supported, but that may change in the future. + +#### IntoKey (required) +This specifies that when a value is parsed, it will get saved into a variant map using the specific +key. + +For example, +``` + .Define("-help") + .IntoKey(Map::Help) +``` +will save occurrences of the `-help` argument by doing a `Map.Set(Map::Help, ParsedValue("-help"))` +where `ParsedValue` is an imaginary function that parses the `-help` argment into a specific type +set by `WithType`. + +### Ignoring unknown arguments +This is highly discouraged, but for compatibility with `JNI` which allows argument ignores, there is +an option to ignore any argument tokens that are not known to the parser. This is done with the +`Ignore` function which takes a list of argument definition names. + +It's semantically equivalent to making a series of argument definitions that map to `Unit` but don't +get saved anywhere. Values will still get parsed as normal, so it will *not* ignore known arguments +with invalid values, only user-arguments for which it could not find a matching argument definition. + +### Parsing custom types +Any type can be parsed from a string by specializing the `CmdlineType` class and implementing the +static interface provided by `CmdlineTypeParser`. It is recommended to inherit from +`CmdlineTypeParser` since it already provides default implementations for every method. + +The `Parse` method should be implemented for most types. Some types will allow appending (such as an +`std::vector` and are meant to be used with `AppendValues` in which case the +`ParseAndAppend` function should be implemented. + +For example: +``` +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + char* end = nullptr; + errno = 0; + double value = strtod(str.c_str(), &end); + + if (*end != '\0') { + return Result::Failure("Failed to parse double from " + str); + } + if (errno == ERANGE) { + return Result::OutOfRange( + "Failed to parse double from " + str + "; overflow/underflow occurred"); + } + + return Result::Success(value); + } + + static const char* Name() { return "double"; } + // note: Name() is just here for more user-friendly errors, + // but in the future we will use non-standard ways of getting the type name + // at compile-time and this will no longer be required +}; +``` +Will parse any non-append argument definitions with a type of `double`. + +For an appending example: +``` +template <> +struct CmdlineType> : CmdlineTypeParser> { + Result ParseAndAppend(const std::string& args, + std::vector& existing_value) { + existing_value.push_back(args); + return Result::SuccessNoValue(); + } + static const char* Name() { return "std::vector"; } +}; +``` +Will parse multiple instances of the same argument repeatedly into the `existing_value` (which will +be default-constructed to `T{}` for the first occurrence of the argument). + +#### What is a `Result`? +`Result` is a typedef for `CmdlineParseResult` and it acts similar to a poor version of +`Either` in Haskell. In particular, it would be similar to `Either< int ErrorCode, +Maybe >`. + +There are helpers like `Result::Success(value)`, `Result::Failure(string message)` and so on to +quickly construct these without caring about the type. + +When successfully parsing a single value, `Result::Success(value)` should be used, and when +successfully parsing an appended value, use `Result::SuccessNoValue()` and write back the new value +into `existing_value` as an out-parameter. + +When many arguments are parsed, the result is collapsed down to a `CmdlineResult` which acts as a +`Either` where the right side simply indicates success. When values are +successfully stored, the parser will automatically save it into the target destination as a side +effect. diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h new file mode 100644 index 000000000..4dcaf804d --- /dev/null +++ b/cmdline/cmdline.h @@ -0,0 +1,379 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_H_ +#define ART_CMDLINE_CMDLINE_H_ + +#include +#include + +#include +#include +#include + +#include "runtime.h" +#include "base/stringpiece.h" +#include "noop_compiler_callbacks.h" +#include "base/logging.h" + +#if !defined(NDEBUG) +#define DBG_LOG LOG(INFO) +#else +#define DBG_LOG LOG(DEBUG) +#endif + +namespace art { + +// TODO: Move to and remove all copies of this function. +static bool LocationToFilename(const std::string& location, InstructionSet isa, + std::string* filename) { + bool has_system = false; + bool has_cache = false; + // image_location = /system/framework/boot.art + // system_image_filename = /system/framework//boot.art + std::string system_filename(GetSystemImageFilename(location.c_str(), isa)); + if (OS::FileExists(system_filename.c_str())) { + has_system = true; + } + + bool have_android_data = false; + bool dalvik_cache_exists = false; + bool is_global_cache = false; + std::string dalvik_cache; + GetDalvikCache(GetInstructionSetString(isa), false, &dalvik_cache, + &have_android_data, &dalvik_cache_exists, &is_global_cache); + + std::string cache_filename; + if (have_android_data && dalvik_cache_exists) { + // Always set output location even if it does not exist, + // so that the caller knows where to create the image. + // + // image_location = /system/framework/boot.art + // *image_filename = /data/dalvik-cache//boot.art + std::string error_msg; + if (GetDalvikCacheFilename(location.c_str(), dalvik_cache.c_str(), + &cache_filename, &error_msg)) { + has_cache = true; + } + } + if (has_system) { + *filename = system_filename; + return true; + } else if (has_cache) { + *filename = cache_filename; + return true; + } else { + return false; + } +} + +static Runtime* StartRuntime(const char* boot_image_location, InstructionSet instruction_set) { + CHECK(boot_image_location != nullptr); + + RuntimeOptions options; + + // We are more like a compiler than a run-time. We don't want to execute code. + { + static NoopCompilerCallbacks callbacks; + options.push_back(std::make_pair("compilercallbacks", &callbacks)); + } + + // Boot image location. + { + std::string boot_image_option; + boot_image_option += "-Ximage:"; + boot_image_option += boot_image_location; + options.push_back(std::make_pair(boot_image_option.c_str(), nullptr)); + } + + // Instruction set. + options.push_back( + std::make_pair("imageinstructionset", + reinterpret_cast(GetInstructionSetString(instruction_set)))); + // None of the command line tools need sig chain. If this changes we'll need + // to upgrade this option to a proper parameter. + options.push_back(std::make_pair("-Xno-sig-chain", nullptr)); + if (!Runtime::Create(options, false)) { + fprintf(stderr, "Failed to create runtime\n"); + return nullptr; + } + + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more manageable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + + return Runtime::Current(); +} + +struct CmdlineArgs { + enum ParseStatus { + kParseOk, // Parse successful. Do not set the error message. + kParseUnknownArgument, // Unknown argument. Do not set the error message. + kParseError, // Parse ok, but failed elsewhere. Print the set error message. + }; + + bool Parse(int argc, char** argv) { + // Skip over argv[0]. + argv++; + argc--; + + if (argc == 0) { + fprintf(stderr, "No arguments specified\n"); + PrintUsage(); + return false; + } + + std::string error_msg; + for (int i = 0; i < argc; i++) { + const StringPiece option(argv[i]); + if (option.starts_with("--boot-image=")) { + boot_image_location_ = option.substr(strlen("--boot-image=")).data(); + } else if (option.starts_with("--instruction-set=")) { + StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data(); + instruction_set_ = GetInstructionSetFromString(instruction_set_str.data()); + if (instruction_set_ == kNone) { + fprintf(stderr, "Unsupported instruction set %s\n", instruction_set_str.data()); + PrintUsage(); + return false; + } + } else if (option.starts_with("--output=")) { + output_name_ = option.substr(strlen("--output=")).ToString(); + const char* filename = output_name_.c_str(); + out_.reset(new std::ofstream(filename)); + if (!out_->good()) { + fprintf(stderr, "Failed to open output filename %s\n", filename); + PrintUsage(); + return false; + } + os_ = out_.get(); + } else { + ParseStatus parse_status = ParseCustom(option, &error_msg); + + if (parse_status == kParseUnknownArgument) { + fprintf(stderr, "Unknown argument %s\n", option.data()); + } + + if (parse_status != kParseOk) { + fprintf(stderr, "%s\n", error_msg.c_str()); + PrintUsage(); + return false; + } + } + } + + DBG_LOG << "will call parse checks"; + + { + ParseStatus checks_status = ParseChecks(&error_msg); + if (checks_status != kParseOk) { + fprintf(stderr, "%s\n", error_msg.c_str()); + PrintUsage(); + return false; + } + } + + return true; + } + + virtual std::string GetUsage() const { + std::string usage; + + usage += // Required. + " --boot-image=: provide the image location for the boot class path.\n" + " Do not include the arch as part of the name, it is added automatically.\n" + " Example: --boot-image=/system/framework/boot.art\n" + " (specifies /system/framework//boot.art as the image file)\n" + "\n"; + usage += StringPrintf( // Optional. + " --instruction-set=(arm|arm64|mips|mips64|x86|x86_64): for locating the image\n" + " file based on the image location set.\n" + " Example: --instruction-set=x86\n" + " Default: %s\n" + "\n", + GetInstructionSetString(kRuntimeISA)); + usage += // Optional. + " --output= may be used to send the output to a file.\n" + " Example: --output=/tmp/oatdump.txt\n" + "\n"; + + return usage; + } + + // Specified by --boot-image. + const char* boot_image_location_ = nullptr; + // Specified by --instruction-set. + InstructionSet instruction_set_ = kRuntimeISA; + // Specified by --output. + std::ostream* os_ = &std::cout; + std::unique_ptr out_; // If something besides cout is used + std::string output_name_; + + virtual ~CmdlineArgs() {} + + bool ParseCheckBootImage(std::string* error_msg) { + if (boot_image_location_ == nullptr) { + *error_msg = "--boot-image must be specified"; + return false; + } + + DBG_LOG << "boot image location: " << boot_image_location_; + + // Checks for --boot-image location. + { + std::string boot_image_location = boot_image_location_; + size_t file_name_idx = boot_image_location.rfind("/"); + if (file_name_idx == std::string::npos) { // Prevent a InsertIsaDirectory check failure. + *error_msg = "Boot image location must have a / in it"; + return false; + } + + // Don't let image locations with the 'arch' in it through, since it's not a location. + // This prevents a common error "Could not create an image space..." when initing the Runtime. + if (file_name_idx != std::string::npos) { + std::string no_file_name = boot_image_location.substr(0, file_name_idx); + size_t ancestor_dirs_idx = no_file_name.rfind("/"); + + std::string parent_dir_name; + if (ancestor_dirs_idx != std::string::npos) { + parent_dir_name = no_file_name.substr(ancestor_dirs_idx + 1); + } else { + parent_dir_name = no_file_name; + } + + DBG_LOG << "boot_image_location parent_dir_name was " << parent_dir_name; + + if (GetInstructionSetFromString(parent_dir_name.c_str()) != kNone) { + *error_msg = "Do not specify the architecture as part of the boot image location"; + return false; + } + } + + // Check that the boot image location points to a valid file name. + std::string file_name; + if (!LocationToFilename(boot_image_location, instruction_set_, &file_name)) { + *error_msg = StringPrintf("No corresponding file for location '%s' exists", + file_name.c_str()); + return false; + } + + DBG_LOG << "boot_image_filename does exist: " << file_name; + } + + return true; + } + + void PrintUsage() { + fprintf(stderr, "%s", GetUsage().c_str()); + } + + protected: + virtual ParseStatus ParseCustom(const StringPiece& option ATTRIBUTE_UNUSED, + std::string* error_msg ATTRIBUTE_UNUSED) { + return kParseUnknownArgument; + } + + virtual ParseStatus ParseChecks(std::string* error_msg ATTRIBUTE_UNUSED) { + return kParseOk; + } +}; + +template +struct CmdlineMain { + int Main(int argc, char** argv) { + InitLogging(argv); + std::unique_ptr args = std::unique_ptr(CreateArguments()); + args_ = args.get(); + + DBG_LOG << "Try to parse"; + + if (args_ == nullptr || !args_->Parse(argc, argv)) { + return EXIT_FAILURE; + } + + bool needs_runtime = NeedsRuntime(); + std::unique_ptr runtime; + + + if (needs_runtime) { + std::string error_msg; + if (!args_->ParseCheckBootImage(&error_msg)) { + fprintf(stderr, "%s\n", error_msg.c_str()); + args_->PrintUsage(); + return EXIT_FAILURE; + } + runtime.reset(CreateRuntime(args.get())); + if (runtime == nullptr) { + return EXIT_FAILURE; + } + if (!ExecuteWithRuntime(runtime.get())) { + return EXIT_FAILURE; + } + } else { + if (!ExecuteWithoutRuntime()) { + return EXIT_FAILURE; + } + } + + if (!ExecuteCommon()) { + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; + } + + // Override this function to create your own arguments. + // Usually will want to return a subtype of CmdlineArgs. + virtual Args* CreateArguments() { + return new Args(); + } + + // Override this function to do something else with the runtime. + virtual bool ExecuteWithRuntime(Runtime* runtime) { + CHECK(runtime != nullptr); + // Do nothing + return true; + } + + // Does the code execution need a runtime? Sometimes it doesn't. + virtual bool NeedsRuntime() { + return true; + } + + // Do execution without having created a runtime. + virtual bool ExecuteWithoutRuntime() { + return true; + } + + // Continue execution after ExecuteWith[out]Runtime + virtual bool ExecuteCommon() { + return true; + } + + virtual ~CmdlineMain() {} + + protected: + Args* args_ = nullptr; + + private: + Runtime* CreateRuntime(CmdlineArgs* args) { + CHECK(args != nullptr); + + return StartRuntime(args->boot_image_location_, args->instruction_set_); + } +}; +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_H_ diff --git a/cmdline/cmdline_parse_result.h b/cmdline/cmdline_parse_result.h new file mode 100644 index 000000000..982f17866 --- /dev/null +++ b/cmdline/cmdline_parse_result.h @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ +#define ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ + +#include "cmdline_result.h" +#include "detail/cmdline_parser_detail.h" + +namespace art { +// Result of a type-parsing attempt. If successful holds the strongly-typed value, +// otherwise it holds either a usage or a failure string message that should be displayed back +// to the user. +// +// CmdlineType::Parse/CmdlineType::ParseAndAppend must return this type. +template +struct CmdlineParseResult : CmdlineResult { + using CmdlineResult::CmdlineResult; + + // Create an error result with the usage error code and the specified message. + static CmdlineParseResult Usage(const std::string& message) { + return CmdlineParseResult(kUsage, message); + } + + // Create an error result with the failure error code and no message. + static CmdlineParseResult Failure() { + return CmdlineParseResult(kFailure); + } + + // Create an error result with the failure error code and no message. + static CmdlineParseResult Failure(const std::string& message) { + return CmdlineParseResult(kFailure, message); + } + + // Create a successful result which holds the specified value. + static CmdlineParseResult Success(const T& value) { + return CmdlineParseResult(value); + } + + // Create a successful result, taking over the value. + static CmdlineParseResult Success(T&& value) { + return CmdlineParseResult(std::forward(value)); + } + + // Create succesful result, without any values. Used when a value was successfully appended + // into an existing object. + static CmdlineParseResult SuccessNoValue() { + return CmdlineParseResult(T {}); + } + + // Create an error result with the OutOfRange error and the specified message. + static CmdlineParseResult OutOfRange(const std::string& message) { + return CmdlineParseResult(kOutOfRange, message); + } + + // Create an error result with the OutOfRange code and a custom message + // which is printed from the actual/min/max values. + // Values are converted to string using the ostream<< operator. + static CmdlineParseResult OutOfRange(const T& value, + const T& min, + const T& max) { + return CmdlineParseResult(kOutOfRange, + "actual: " + art::detail::ToStringAny(value) + + ", min: " + art::detail::ToStringAny(min) + + ", max: " + art::detail::ToStringAny(max)); + } + + // Get a read-only reference to the underlying value. + // The result must have been successful and must have a value. + const T& GetValue() const { + assert(IsSuccess()); + assert(has_value_); + return value_; + } + + // Get a mutable reference to the underlying value. + // The result must have been successful and must have a value. + T& GetValue() { + assert(IsSuccess()); + assert(has_value_); + return value_; + } + + // Take over the value. + // The result must have been successful and must have a value. + T&& ReleaseValue() { + assert(IsSuccess()); + assert(has_value_); + return std::move(value_); + } + + // Whether or not the result has a value (e.g. created with Result::Success). + // Error results never have values, success results commonly, but not always, have values. + bool HasValue() const { + return has_value_; + } + + // Cast an error-result from type T2 to T1. + // Safe since error-results don't store a typed value. + template + static CmdlineParseResult CastError(const CmdlineParseResult& other) { + assert(other.IsError()); + return CmdlineParseResult(other.GetStatus()); + } + + // Make sure copying is allowed + CmdlineParseResult(const CmdlineParseResult&) = default; + // Make sure moving is cheap + CmdlineParseResult(CmdlineParseResult&&) = default; + + private: + explicit CmdlineParseResult(const T& value) + : CmdlineResult(kSuccess), value_(value), has_value_(true) {} + explicit CmdlineParseResult(T&& value) + : CmdlineResult(kSuccess), value_(std::forward(value)), has_value_(true) {} + CmdlineParseResult() + : CmdlineResult(kSuccess), value_(), has_value_(false) {} + + T value_; + bool has_value_ = false; +}; + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h new file mode 100644 index 000000000..cfc096728 --- /dev/null +++ b/cmdline/cmdline_parser.h @@ -0,0 +1,633 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_PARSER_H_ +#define ART_CMDLINE_CMDLINE_PARSER_H_ + +#define CMDLINE_NDEBUG 1 // Do not output any debugging information for parsing. + +#include "cmdline/detail/cmdline_parser_detail.h" +#include "cmdline/detail/cmdline_parse_argument_detail.h" +#include "cmdline/detail/cmdline_debug_detail.h" + +#include "cmdline_type_parser.h" +#include "token_range.h" +#include "cmdline_types.h" +#include "cmdline_result.h" +#include "cmdline_parse_result.h" + +#include "runtime/base/variant_map.h" + +#include +#include + +namespace art { +// Build a parser for command line arguments with a small domain specific language. +// Each parsed type must have a specialized CmdlineType in order to do the string->T parsing. +// Each argument must also have a VariantMap::Key in order to do the T storage. +template class TVariantMapKey> +struct CmdlineParser { + template + struct ArgumentBuilder; + + struct Builder; // Build the parser. + struct UntypedArgumentBuilder; // Build arguments which weren't yet given a type. + + private: + // Forward declare some functions that we need to use before fully-defining structs. + template + static ArgumentBuilder CreateArgumentBuilder(Builder& parent); + static void AppendCompletedArgument(Builder& builder, detail::CmdlineParseArgumentAny* arg); + + // Allow argument definitions to save their values when they are parsed, + // without having a dependency on CmdlineParser or any of the builders. + // + // A shared pointer to the save destination is saved into the load/save argument callbacks. + // + // This also allows the underlying storage (i.e. a variant map) to be released + // to the user, without having to recreate all of the callbacks. + struct SaveDestination { + SaveDestination() : variant_map_(new TVariantMap()) {} + + // Save value to the variant map. + template + void SaveToMap(const TVariantMapKey& key, TArg& value) { + variant_map_->Set(key, value); + } + + // Get the existing value from a map, creating the value if it did not already exist. + template + TArg& GetOrCreateFromMap(const TVariantMapKey& key) { + auto* ptr = variant_map_->Get(key); + if (ptr == nullptr) { + variant_map_->Set(key, TArg()); + ptr = variant_map_->Get(key); + assert(ptr != nullptr); + } + + return *ptr; + } + + protected: + // Release the map, clearing it as a side-effect. + // Future saves will be distinct from previous saves. + TVariantMap&& ReleaseMap() { + return std::move(*variant_map_); + } + + // Get a read-only reference to the variant map. + const TVariantMap& GetMap() { + return *variant_map_; + } + + // Clear all potential save targets. + void Clear() { + variant_map_->Clear(); + } + + private: + // Don't try to copy or move this. Just don't. + SaveDestination(const SaveDestination&) = delete; + SaveDestination(SaveDestination&&) = delete; + SaveDestination& operator=(const SaveDestination&) = delete; + SaveDestination& operator=(SaveDestination&&) = delete; + + std::shared_ptr variant_map_; + + // Allow the parser to change the underlying pointers when we release the underlying storage. + friend struct CmdlineParser; + }; + + public: + // Builder for the argument definition of type TArg. Do not use this type directly, + // it is only a separate type to provide compile-time enforcement against doing + // illegal builds. + template + struct ArgumentBuilder { + // Add a range check to this argument. + ArgumentBuilder& WithRange(const TArg& min, const TArg& max) { + argument_info_.has_range_ = true; + argument_info_.min_ = min; + argument_info_.max_ = max; + + return *this; + } + + // Map the list of names into the list of values. List of names must not have + // any wildcards '_' in it. + // + // Do not use if a value map has already been set. + ArgumentBuilder& WithValues(std::initializer_list value_list) { + SetValuesInternal(value_list); + return *this; + } + + // When used with a single alias, map the alias into this value. + // Same as 'WithValues({value})' , but allows the omission of the curly braces {}. + ArgumentBuilder WithValue(const TArg& value) { + return WithValues({ value }); + } + + // Map the parsed string values (from _) onto a concrete value. If no wildcard + // has been specified, then map the value directly from the arg name (i.e. + // if there are multiple aliases, then use the alias to do the mapping). + // + // Do not use if a values list has already been set. + ArgumentBuilder& WithValueMap( + std::initializer_list> key_value_list) { + assert(!argument_info_.has_value_list_); + + argument_info_.has_value_map_ = true; + argument_info_.value_map_ = key_value_list; + + return *this; + } + + // If this argument is seen multiple times, successive arguments mutate the same value + // instead of replacing it with a new value. + ArgumentBuilder& AppendValues() { + argument_info_.appending_values_ = true; + + return *this; + } + + // Convenience type alias for the variant map key type definition. + using MapKey = TVariantMapKey; + + // Write the results of this argument into the key. + // To look up the parsed arguments, get the map and then use this key with VariantMap::Get + CmdlineParser::Builder& IntoKey(const MapKey& key) { + // Only capture save destination as a pointer. + // This allows the parser to later on change the specific save targets. + auto save_destination = save_destination_; + save_value_ = [save_destination, &key](TArg& value) { + save_destination->SaveToMap(key, value); + CMDLINE_DEBUG_LOG << "Saved value into map '" + << detail::ToStringAny(value) << "'" << std::endl; + }; + + load_value_ = [save_destination, &key]() -> TArg& { + TArg& value = save_destination->GetOrCreateFromMap(key); + CMDLINE_DEBUG_LOG << "Loaded value from map '" << detail::ToStringAny(value) << "'" + << std::endl; + + return value; + }; + + save_value_specified_ = true; + load_value_specified_ = true; + + CompleteArgument(); + return parent_; + } + + // Ensure we always move this when returning a new builder. + ArgumentBuilder(ArgumentBuilder&&) = default; + + protected: + // Used by builder to internally ignore arguments by dropping them on the floor after parsing. + CmdlineParser::Builder& IntoIgnore() { + save_value_ = [](TArg& value) { + CMDLINE_DEBUG_LOG << "Ignored value '" << detail::ToStringAny(value) << "'" << std::endl; + }; + load_value_ = []() -> TArg& { + assert(false && "Should not be appending values to ignored arguments"); + return *reinterpret_cast(0); // Blow up. + }; + + save_value_specified_ = true; + load_value_specified_ = true; + + CompleteArgument(); + return parent_; + } + + void SetValuesInternal(const std::vector&& value_list) { + assert(!argument_info_.has_value_map_); + + argument_info_.has_value_list_ = true; + argument_info_.value_list_ = value_list; + } + + void SetNames(std::vector&& names) { + argument_info_.names_ = names; + } + + void SetNames(std::initializer_list names) { + argument_info_.names_ = names; + } + + private: + // Copying is bad. Move only. + ArgumentBuilder(const ArgumentBuilder&) = delete; + + // Called by any function that doesn't chain back into this builder. + // Completes the argument builder and save the information into the main builder. + void CompleteArgument() { + assert(save_value_specified_ && + "No Into... function called, nowhere to save parsed values to"); + assert(load_value_specified_ && + "No Into... function called, nowhere to load parsed values from"); + + argument_info_.CompleteArgument(); + + // Appending the completed argument is destructive. The object is no longer + // usable since all the useful information got moved out of it. + AppendCompletedArgument(parent_, + new detail::CmdlineParseArgument( + std::move(argument_info_), + std::move(save_value_), + std::move(load_value_))); + } + + friend struct CmdlineParser; + friend struct CmdlineParser::Builder; + friend struct CmdlineParser::UntypedArgumentBuilder; + + ArgumentBuilder(CmdlineParser::Builder& parser, + std::shared_ptr save_destination) + : parent_(parser), + save_value_specified_(false), + load_value_specified_(false), + save_destination_(save_destination) { + save_value_ = [](TArg&) { + assert(false && "No save value function defined"); + }; + + load_value_ = []() -> TArg& { + assert(false && "No load value function defined"); + return *reinterpret_cast(0); // Blow up. + }; + } + + CmdlineParser::Builder& parent_; + std::function save_value_; + std::function load_value_; + bool save_value_specified_; + bool load_value_specified_; + detail::CmdlineParserArgumentInfo argument_info_; + + std::shared_ptr save_destination_; + }; + + struct UntypedArgumentBuilder { + // Set a type for this argument. The specific subcommand parser is looked up by the type. + template + ArgumentBuilder WithType() { + return CreateTypedBuilder(); + } + + // When used with multiple aliases, map the position of the alias to the value position. + template + ArgumentBuilder WithValues(std::initializer_list values) { + auto&& a = CreateTypedBuilder(); + a.WithValues(values); + return std::move(a); + } + + // When used with a single alias, map the alias into this value. + // Same as 'WithValues({value})' , but allows the omission of the curly braces {}. + template + ArgumentBuilder WithValue(const TArg& value) { + return WithValues({ value }); + } + + // Set the current building argument to target this key. + // When this command line argument is parsed, it can be fetched with this key. + Builder& IntoKey(const TVariantMapKey& key) { + return CreateTypedBuilder().IntoKey(key); + } + + // Ensure we always move this when returning a new builder. + UntypedArgumentBuilder(UntypedArgumentBuilder&&) = default; + + protected: + void SetNames(std::vector&& names) { + names_ = std::move(names); + } + + void SetNames(std::initializer_list names) { + names_ = names; + } + + private: + // No copying. Move instead. + UntypedArgumentBuilder(const UntypedArgumentBuilder&) = delete; + + template + ArgumentBuilder CreateTypedBuilder() { + auto&& b = CreateArgumentBuilder(parent_); + InitializeTypedBuilder(&b); // Type-specific initialization + b.SetNames(std::move(names_)); + return std::move(b); + } + + template + typename std::enable_if::value>::type + InitializeTypedBuilder(ArgumentBuilder* arg_builder) { + // Every Unit argument implicitly maps to a runtime value of Unit{} + std::vector values(names_.size(), Unit{}); // NOLINT [whitespace/braces] [5] + arg_builder->SetValuesInternal(std::move(values)); + } + + // No extra work for all other types + void InitializeTypedBuilder(void*) {} + + template + friend struct ArgumentBuilder; + friend struct Builder; + + explicit UntypedArgumentBuilder(CmdlineParser::Builder& parent) : parent_(parent) {} + // UntypedArgumentBuilder(UntypedArgumentBuilder&& other) = default; + + CmdlineParser::Builder& parent_; + std::vector names_; + }; + + // Build a new parser given a chain of calls to define arguments. + struct Builder { + Builder() : save_destination_(new SaveDestination()) {} + + // Define a single argument. The default type is Unit. + UntypedArgumentBuilder Define(const char* name) { + return Define({name}); + } + + // Define a single argument with multiple aliases. + UntypedArgumentBuilder Define(std::initializer_list names) { + auto&& b = UntypedArgumentBuilder(*this); + b.SetNames(names); + return std::move(b); + } + + // Whether the parser should give up on unrecognized arguments. Not recommended. + Builder& IgnoreUnrecognized(bool ignore_unrecognized) { + ignore_unrecognized_ = ignore_unrecognized; + return *this; + } + + // Provide a list of arguments to ignore for backwards compatibility. + Builder& Ignore(std::initializer_list ignore_list) { + for (auto&& ignore_name : ignore_list) { + std::string ign = ignore_name; + + // Ignored arguments are just like a regular definition which have very + // liberal parsing requirements (no range checks, no value checks). + // Unlike regular argument definitions, when a value gets parsed into its + // stronger type, we just throw it away. + + if (ign.find("_") != std::string::npos) { // Does the arg-def have a wildcard? + // pretend this is a string, e.g. -Xjitconfig: + auto&& builder = Define(ignore_name).template WithType().IntoIgnore(); + assert(&builder == this); + (void)builder; // Ignore pointless unused warning, it's used in the assert. + } else { + // pretend this is a unit, e.g. -Xjitblocking + auto&& builder = Define(ignore_name).template WithType().IntoIgnore(); + assert(&builder == this); + (void)builder; // Ignore pointless unused warning, it's used in the assert. + } + } + ignore_list_ = ignore_list; + return *this; + } + + // Finish building the parser; performs sanity checks. Return value is moved, not copied. + // Do not call this more than once. + CmdlineParser Build() { + assert(!built_); + built_ = true; + + auto&& p = CmdlineParser(ignore_unrecognized_, + std::move(ignore_list_), + save_destination_, + std::move(completed_arguments_)); + + return std::move(p); + } + + protected: + void AppendCompletedArgument(detail::CmdlineParseArgumentAny* arg) { + auto smart_ptr = std::unique_ptr(arg); + completed_arguments_.push_back(std::move(smart_ptr)); + } + + private: + // No copying now! + Builder(const Builder& other) = delete; + + template + friend struct ArgumentBuilder; + friend struct UntypedArgumentBuilder; + friend struct CmdlineParser; + + bool built_ = false; + bool ignore_unrecognized_ = false; + std::vector ignore_list_; + std::shared_ptr save_destination_; + + std::vector> completed_arguments_; + }; + + CmdlineResult Parse(const std::string& argv) { + std::vector tokenized; + Split(argv, ' ', &tokenized); + + return Parse(TokenRange(std::move(tokenized))); + } + + // Parse the arguments; storing results into the arguments map. Returns success value. + CmdlineResult Parse(const char* argv) { + return Parse(std::string(argv)); + } + + // Parse the arguments; storing the results into the arguments map. Returns success value. + // Assumes that argv[0] is a valid argument (i.e. not the program name). + CmdlineResult Parse(const std::vector& argv) { + return Parse(TokenRange(argv.begin(), argv.end())); + } + + // Parse the arguments; storing the results into the arguments map. Returns success value. + // Assumes that argv[0] is a valid argument (i.e. not the program name). + CmdlineResult Parse(const std::vector& argv) { + return Parse(TokenRange(argv.begin(), argv.end())); + } + + // Parse the arguments (directly from an int main(argv,argc)). Returns success value. + // Assumes that argv[0] is the program name, and ignores it. + CmdlineResult Parse(const char* argv[], int argc) { + return Parse(TokenRange(&argv[1], argc - 1)); // ignore argv[0] because it's the program name + } + + // Look up the arguments that have been parsed; use the target keys to lookup individual args. + const TVariantMap& GetArgumentsMap() const { + return save_destination_->GetMap(); + } + + // Release the arguments map that has been parsed; useful for move semantics. + TVariantMap&& ReleaseArgumentsMap() { + return save_destination_->ReleaseMap(); + } + + // How many arguments were defined? + size_t CountDefinedArguments() const { + return completed_arguments_.size(); + } + + // Ensure we have a default move constructor. + CmdlineParser(CmdlineParser&&) = default; + // Ensure we have a default move assignment operator. + CmdlineParser& operator=(CmdlineParser&&) = default; + + private: + friend struct Builder; + + // Construct a new parser from the builder. Move all the arguments. + CmdlineParser(bool ignore_unrecognized, + std::vector&& ignore_list, + std::shared_ptr save_destination, + std::vector>&& completed_arguments) + : ignore_unrecognized_(ignore_unrecognized), + ignore_list_(std::move(ignore_list)), + save_destination_(save_destination), + completed_arguments_(std::move(completed_arguments)) { + assert(save_destination != nullptr); + } + + // Parse the arguments; storing results into the arguments map. Returns success value. + // The parsing will fail on the first non-success parse result and return that error. + // + // All previously-parsed arguments are cleared out. + // Otherwise, all parsed arguments will be stored into SaveDestination as a side-effect. + // A partial parse will result only in a partial save of the arguments. + CmdlineResult Parse(TokenRange&& arguments_list) { + save_destination_->Clear(); + + for (size_t i = 0; i < arguments_list.Size(); ) { + TokenRange possible_name = arguments_list.Slice(i); + + size_t best_match_size = 0; // How many tokens were matched in the best case. + size_t best_match_arg_idx = 0; + bool matched = false; // At least one argument definition has been matched? + + // Find the closest argument definition for the remaining token range. + size_t arg_idx = 0; + for (auto&& arg : completed_arguments_) { + size_t local_match = arg->MaybeMatches(possible_name); + + if (local_match > best_match_size) { + best_match_size = local_match; + best_match_arg_idx = arg_idx; + matched = true; + } + arg_idx++; + } + + // Saw some kind of unknown argument + if (matched == false) { + if (UNLIKELY(ignore_unrecognized_)) { // This is usually off, we only need it for JNI. + // Consume 1 token and keep going, hopefully the next token is a good one. + ++i; + continue; + } + // Common case: + // Bail out on the first unknown argument with an error. + return CmdlineResult(CmdlineResult::kUnknown, + std::string("Unknown argument: ") + possible_name[0]); + } + + // Look at the best-matched argument definition and try to parse against that. + auto&& arg = completed_arguments_[best_match_arg_idx]; + + assert(arg->MaybeMatches(possible_name) == best_match_size); + + // Try to parse the argument now, if we have enough tokens. + std::pair num_tokens = arg->GetNumTokens(); + size_t min_tokens; + size_t max_tokens; + + std::tie(min_tokens, max_tokens) = num_tokens; + + if ((i + min_tokens) > arguments_list.Size()) { + // expected longer command line but it was too short + // e.g. if the argv was only "-Xms" without specifying a memory option + CMDLINE_DEBUG_LOG << "Parse failure, i = " << i << ", arg list " << arguments_list.Size() << + " num tokens in arg_def: " << min_tokens << "," << max_tokens << std::endl; + return CmdlineResult(CmdlineResult::kFailure, + std::string("Argument ") + + possible_name[0] + ": incomplete command line arguments, expected " + + std::to_string(size_t(i + min_tokens) - arguments_list.Size()) + + " more tokens"); + } + + if (best_match_size > max_tokens || best_match_size < min_tokens) { + // Even our best match was out of range, so parsing would fail instantly. + return CmdlineResult(CmdlineResult::kFailure, + std::string("Argument ") + possible_name[0] + ": too few tokens " + "matched " + std::to_string(best_match_size) + + " but wanted " + std::to_string(num_tokens.first)); + } + + // We have enough tokens to begin exact parsing. + TokenRange exact_range = possible_name.Slice(0, max_tokens); + + size_t consumed_tokens = 1; // At least 1 if we ever want to try to resume parsing on error + CmdlineResult parse_attempt = arg->ParseArgument(exact_range, &consumed_tokens); + + if (parse_attempt.IsError()) { + // We may also want to continue parsing the other tokens to gather more errors. + return parse_attempt; + } // else the value has been successfully stored into the map + + assert(consumed_tokens > 0); // Don't hang in an infinite loop trying to parse + i += consumed_tokens; + + // TODO: also handle ignoring arguments for backwards compatibility + } // for + + return CmdlineResult(CmdlineResult::kSuccess); + } + + bool ignore_unrecognized_ = false; + std::vector ignore_list_; + std::shared_ptr save_destination_; + std::vector> completed_arguments_; +}; + +// This has to be defined after everything else, since we want the builders to call this. +template class TVariantMapKey> +template +CmdlineParser::ArgumentBuilder +CmdlineParser::CreateArgumentBuilder( + CmdlineParser::Builder& parent) { + return CmdlineParser::ArgumentBuilder( + parent, parent.save_destination_); +} + +// This has to be defined after everything else, since we want the builders to call this. +template class TVariantMapKey> +void CmdlineParser::AppendCompletedArgument( + CmdlineParser::Builder& builder, + detail::CmdlineParseArgumentAny* arg) { + builder.AppendCompletedArgument(arg); +} + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_PARSER_H_ diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc new file mode 100644 index 000000000..7c53e01c4 --- /dev/null +++ b/cmdline/cmdline_parser_test.cc @@ -0,0 +1,618 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cmdline_parser.h" +#include "runtime/runtime_options.h" +#include "runtime/parsed_options.h" + +#include "utils.h" +#include +#include "gtest/gtest.h" +#include "runtime/experimental_flags.h" + +#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast(expected), \ + reinterpret_cast(nullptr)); + +namespace art { + bool UsuallyEquals(double expected, double actual); + + // This has a gtest dependency, which is why it's in the gtest only. + bool operator==(const TestProfilerOptions& lhs, const TestProfilerOptions& rhs) { + return lhs.enabled_ == rhs.enabled_ && + lhs.output_file_name_ == rhs.output_file_name_ && + lhs.period_s_ == rhs.period_s_ && + lhs.duration_s_ == rhs.duration_s_ && + lhs.interval_us_ == rhs.interval_us_ && + UsuallyEquals(lhs.backoff_coefficient_, rhs.backoff_coefficient_) && + UsuallyEquals(lhs.start_immediately_, rhs.start_immediately_) && + UsuallyEquals(lhs.top_k_threshold_, rhs.top_k_threshold_) && + UsuallyEquals(lhs.top_k_change_threshold_, rhs.top_k_change_threshold_) && + lhs.profile_type_ == rhs.profile_type_ && + lhs.max_stack_depth_ == rhs.max_stack_depth_; + } + + bool UsuallyEquals(double expected, double actual) { + using FloatingPoint = ::testing::internal::FloatingPoint; + + FloatingPoint exp(expected); + FloatingPoint act(actual); + + // Compare with ULPs instead of comparing with == + return exp.AlmostEquals(act); + } + + template + bool UsuallyEquals(const T& expected, const T& actual, + typename std::enable_if< + detail::SupportsEqualityOperator::value>::type* = 0) { + return expected == actual; + } + + // Try to use memcmp to compare simple plain-old-data structs. + // + // This should *not* generate false positives, but it can generate false negatives. + // This will mostly work except for fields like float which can have different bit patterns + // that are nevertheless equal. + // If a test is failing because the structs aren't "equal" when they really are + // then it's recommended to implement operator== for it instead. + template + bool UsuallyEquals(const T& expected, const T& actual, + const Ignore& ... more ATTRIBUTE_UNUSED, + typename std::enable_if::value>::type* = 0, + typename std::enable_if::value>::type* = 0 + ) { + return memcmp(std::addressof(expected), std::addressof(actual), sizeof(T)) == 0; + } + + bool UsuallyEquals(const XGcOption& expected, const XGcOption& actual) { + return memcmp(std::addressof(expected), std::addressof(actual), sizeof(expected)) == 0; + } + + bool UsuallyEquals(const char* expected, std::string actual) { + return std::string(expected) == actual; + } + + template + ::testing::AssertionResult IsExpectedKeyValue(const T& expected, + const TMap& map, + const TKey& key) { + auto* actual = map.Get(key); + if (actual != nullptr) { + if (!UsuallyEquals(expected, *actual)) { + return ::testing::AssertionFailure() + << "expected " << detail::ToStringAny(expected) << " but got " + << detail::ToStringAny(*actual); + } + return ::testing::AssertionSuccess(); + } + + return ::testing::AssertionFailure() << "key was not in the map"; + } + + template + ::testing::AssertionResult IsExpectedDefaultKeyValue(const T& expected, + const TMap& map, + const TKey& key) { + const T& actual = map.GetOrDefault(key); + if (!UsuallyEquals(expected, actual)) { + return ::testing::AssertionFailure() + << "expected " << detail::ToStringAny(expected) << " but got " + << detail::ToStringAny(actual); + } + return ::testing::AssertionSuccess(); + } + +class CmdlineParserTest : public ::testing::Test { + public: + CmdlineParserTest() = default; + ~CmdlineParserTest() = default; + + protected: + using M = RuntimeArgumentMap; + using RuntimeParser = ParsedOptions::RuntimeParser; + + static void SetUpTestCase() { + art::InitLogging(nullptr); // argv = null + } + + virtual void SetUp() { + parser_ = ParsedOptions::MakeParser(false); // do not ignore unrecognized options + } + + static ::testing::AssertionResult IsResultSuccessful(CmdlineResult result) { + if (result.IsSuccess()) { + return ::testing::AssertionSuccess(); + } else { + return ::testing::AssertionFailure() + << result.GetStatus() << " with: " << result.GetMessage(); + } + } + + static ::testing::AssertionResult IsResultFailure(CmdlineResult result, + CmdlineResult::Status failure_status) { + if (result.IsSuccess()) { + return ::testing::AssertionFailure() << " got success but expected failure: " + << failure_status; + } else if (result.GetStatus() == failure_status) { + return ::testing::AssertionSuccess(); + } + + return ::testing::AssertionFailure() << " expected failure " << failure_status + << " but got " << result.GetStatus(); + } + + std::unique_ptr parser_; +}; + +#define EXPECT_KEY_EXISTS(map, key) EXPECT_TRUE((map).Exists(key)) +#define EXPECT_KEY_VALUE(map, key, expected) EXPECT_TRUE(IsExpectedKeyValue(expected, map, key)) +#define EXPECT_DEFAULT_KEY_VALUE(map, key, expected) EXPECT_TRUE(IsExpectedDefaultKeyValue(expected, map, key)) + +#define _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv) \ + do { \ + EXPECT_TRUE(IsResultSuccessful(parser_->Parse(argv))); \ + EXPECT_EQ(0u, parser_->GetArgumentsMap().Size()); \ + +#define EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv) \ + _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); \ + } while (false) + +#define EXPECT_SINGLE_PARSE_DEFAULT_VALUE(expected, argv, key)\ + _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); \ + RuntimeArgumentMap args = parser_->ReleaseArgumentsMap(); \ + EXPECT_DEFAULT_KEY_VALUE(args, key, expected); \ + } while (false) // NOLINT [readability/namespace] [5] + +#define _EXPECT_SINGLE_PARSE_EXISTS(argv, key) \ + do { \ + EXPECT_TRUE(IsResultSuccessful(parser_->Parse(argv))); \ + RuntimeArgumentMap args = parser_->ReleaseArgumentsMap(); \ + EXPECT_EQ(1u, args.Size()); \ + EXPECT_KEY_EXISTS(args, key); \ + +#define EXPECT_SINGLE_PARSE_EXISTS(argv, key) \ + _EXPECT_SINGLE_PARSE_EXISTS(argv, key); \ + } while (false) + +#define EXPECT_SINGLE_PARSE_VALUE(expected, argv, key) \ + _EXPECT_SINGLE_PARSE_EXISTS(argv, key); \ + EXPECT_KEY_VALUE(args, key, expected); \ + } while (false) // NOLINT [readability/namespace] [5] + +#define EXPECT_SINGLE_PARSE_VALUE_STR(expected, argv, key) \ + EXPECT_SINGLE_PARSE_VALUE(std::string(expected), argv, key) + +#define EXPECT_SINGLE_PARSE_FAIL(argv, failure_status) \ + do { \ + EXPECT_TRUE(IsResultFailure(parser_->Parse(argv), failure_status));\ + RuntimeArgumentMap args = parser_->ReleaseArgumentsMap();\ + EXPECT_EQ(0u, args.Size()); \ + } while (false) + +TEST_F(CmdlineParserTest, TestSimpleSuccesses) { + auto& parser = *parser_; + + EXPECT_LT(0u, parser.CountDefinedArguments()); + + { + // Test case 1: No command line arguments + EXPECT_TRUE(IsResultSuccessful(parser.Parse(""))); + RuntimeArgumentMap args = parser.ReleaseArgumentsMap(); + EXPECT_EQ(0u, args.Size()); + } + + EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote); + EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath); + EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath); + EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize); + EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize); + EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); + EXPECT_SINGLE_PARSE_VALUE(false, "-XX:DisableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); + EXPECT_SINGLE_PARSE_VALUE(0.5, "-XX:HeapTargetUtilization=0.5", M::HeapTargetUtilization); + EXPECT_SINGLE_PARSE_VALUE(5u, "-XX:ParallelGCThreads=5", M::ParallelGCThreads); + EXPECT_SINGLE_PARSE_EXISTS("-Xno-dex-file-fallback", M::NoDexFileFallback); +} // TEST_F + +TEST_F(CmdlineParserTest, TestSimpleFailures) { + // Test argument is unknown to the parser + EXPECT_SINGLE_PARSE_FAIL("abcdefg^%@#*(@#", CmdlineResult::kUnknown); + // Test value map substitution fails + EXPECT_SINGLE_PARSE_FAIL("-Xverify:whatever", CmdlineResult::kFailure); + // Test value type parsing failures + EXPECT_SINGLE_PARSE_FAIL("-Xsswhatever", CmdlineResult::kFailure); // invalid memory value + EXPECT_SINGLE_PARSE_FAIL("-Xms123", CmdlineResult::kFailure); // memory value too small + EXPECT_SINGLE_PARSE_FAIL("-XX:HeapTargetUtilization=0.0", CmdlineResult::kOutOfRange); // toosmal + EXPECT_SINGLE_PARSE_FAIL("-XX:HeapTargetUtilization=2.0", CmdlineResult::kOutOfRange); // toolarg + EXPECT_SINGLE_PARSE_FAIL("-XX:ParallelGCThreads=-5", CmdlineResult::kOutOfRange); // too small + EXPECT_SINGLE_PARSE_FAIL("-Xgc:blablabla", CmdlineResult::kUsage); // not a valid suboption +} // TEST_F + +TEST_F(CmdlineParserTest, TestLogVerbosity) { + { + const char* log_args = "-verbose:" + "class,compiler,gc,heap,jdwp,jni,monitor,profiler,signals,simulator,startup," + "third-party-jni,threads,verifier"; + + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.class_linker = true; + log_verbosity.compiler = true; + log_verbosity.gc = true; + log_verbosity.heap = true; + log_verbosity.jdwp = true; + log_verbosity.jni = true; + log_verbosity.monitor = true; + log_verbosity.profiler = true; + log_verbosity.signals = true; + log_verbosity.simulator = true; + log_verbosity.startup = true; + log_verbosity.third_party_jni = true; + log_verbosity.threads = true; + log_verbosity.verifier = true; + + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:" + "class,compiler,gc,heap,jdwp,jni,monitor"; + + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.class_linker = true; + log_verbosity.compiler = true; + log_verbosity.gc = true; + log_verbosity.heap = true; + log_verbosity.jdwp = true; + log_verbosity.jni = true; + log_verbosity.monitor = true; + + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + EXPECT_SINGLE_PARSE_FAIL("-verbose:blablabla", CmdlineResult::kUsage); // invalid verbose opt + + { + const char* log_args = "-verbose:deopt"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.deopt = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:collector"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.collector = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } + + { + const char* log_args = "-verbose:oat"; + LogVerbosity log_verbosity = LogVerbosity(); + log_verbosity.oat = true; + EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); + } +} // TEST_F + +// TODO: Enable this b/19274810 +TEST_F(CmdlineParserTest, DISABLED_TestXGcOption) { + /* + * Test success + */ + { + XGcOption option_all_true{}; // NOLINT [readability/braces] [4] + option_all_true.collector_type_ = gc::CollectorType::kCollectorTypeCMS; + option_all_true.verify_pre_gc_heap_ = true; + option_all_true.verify_pre_sweeping_heap_ = true; + option_all_true.verify_post_gc_heap_ = true; + option_all_true.verify_pre_gc_rosalloc_ = true; + option_all_true.verify_pre_sweeping_rosalloc_ = true; + option_all_true.verify_post_gc_rosalloc_ = true; + + const char * xgc_args_all_true = "-Xgc:concurrent," + "preverify,presweepingverify,postverify," + "preverify_rosalloc,presweepingverify_rosalloc," + "postverify_rosalloc,precise," + "verifycardtable"; + + EXPECT_SINGLE_PARSE_VALUE(option_all_true, xgc_args_all_true, M::GcOption); + + XGcOption option_all_false{}; // NOLINT [readability/braces] [4] + option_all_false.collector_type_ = gc::CollectorType::kCollectorTypeMS; + option_all_false.verify_pre_gc_heap_ = false; + option_all_false.verify_pre_sweeping_heap_ = false; + option_all_false.verify_post_gc_heap_ = false; + option_all_false.verify_pre_gc_rosalloc_ = false; + option_all_false.verify_pre_sweeping_rosalloc_ = false; + option_all_false.verify_post_gc_rosalloc_ = false; + + const char* xgc_args_all_false = "-Xgc:nonconcurrent," + "nopreverify,nopresweepingverify,nopostverify,nopreverify_rosalloc," + "nopresweepingverify_rosalloc,nopostverify_rosalloc,noprecise,noverifycardtable"; + + EXPECT_SINGLE_PARSE_VALUE(option_all_false, xgc_args_all_false, M::GcOption); + + XGcOption option_all_default{}; // NOLINT [readability/braces] [4] + + const char* xgc_args_blank = "-Xgc:"; + EXPECT_SINGLE_PARSE_VALUE(option_all_default, xgc_args_blank, M::GcOption); + } + + /* + * Test failures + */ + EXPECT_SINGLE_PARSE_FAIL("-Xgc:blablabla", CmdlineResult::kUsage); // invalid Xgc opt +} // TEST_F + +/* + * {"-Xrunjdwp:_", "-agentlib:jdwp=_"} + */ +TEST_F(CmdlineParserTest, TestJdwpOptions) { + /* + * Test success + */ + { + /* + * "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n" + */ + JDWP::JdwpOptions opt = JDWP::JdwpOptions(); + opt.transport = JDWP::JdwpTransportType::kJdwpTransportSocket; + opt.port = 8000; + opt.server = true; + + const char *opt_args = "-Xrunjdwp:transport=dt_socket,address=8000,server=y"; + + EXPECT_SINGLE_PARSE_VALUE(opt, opt_args, M::JdwpOptions); + } + + { + /* + * "Example: -agentlib:jdwp=transport=dt_socket,address=localhost:6500,server=n\n"); + */ + JDWP::JdwpOptions opt = JDWP::JdwpOptions(); + opt.transport = JDWP::JdwpTransportType::kJdwpTransportSocket; + opt.host = "localhost"; + opt.port = 6500; + opt.server = false; + + const char *opt_args = "-agentlib:jdwp=transport=dt_socket,address=localhost:6500,server=n"; + + EXPECT_SINGLE_PARSE_VALUE(opt, opt_args, M::JdwpOptions); + } + + /* + * Test failures + */ + EXPECT_SINGLE_PARSE_FAIL("-Xrunjdwp:help", CmdlineResult::kUsage); // usage for help only + EXPECT_SINGLE_PARSE_FAIL("-Xrunjdwp:blabla", CmdlineResult::kFailure); // invalid subarg + EXPECT_SINGLE_PARSE_FAIL("-agentlib:jdwp=help", CmdlineResult::kUsage); // usage for help only + EXPECT_SINGLE_PARSE_FAIL("-agentlib:jdwp=blabla", CmdlineResult::kFailure); // invalid subarg +} // TEST_F + +/* + * -D_ -D_ -D_ ... + */ +TEST_F(CmdlineParserTest, TestPropertiesList) { + /* + * Test successes + */ + { + std::vector opt = {"hello"}; + + EXPECT_SINGLE_PARSE_VALUE(opt, "-Dhello", M::PropertiesList); + } + + { + std::vector opt = {"hello", "world"}; + + EXPECT_SINGLE_PARSE_VALUE(opt, "-Dhello -Dworld", M::PropertiesList); + } + + { + std::vector opt = {"one", "two", "three"}; + + EXPECT_SINGLE_PARSE_VALUE(opt, "-Done -Dtwo -Dthree", M::PropertiesList); + } +} // TEST_F + +/* +* -Xcompiler-option foo -Xcompiler-option bar ... +*/ +TEST_F(CmdlineParserTest, TestCompilerOption) { + /* + * Test successes + */ + { + std::vector opt = {"hello"}; + EXPECT_SINGLE_PARSE_VALUE(opt, "-Xcompiler-option hello", M::CompilerOptions); + } + + { + std::vector opt = {"hello", "world"}; + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xcompiler-option hello -Xcompiler-option world", + M::CompilerOptions); + } + + { + std::vector opt = {"one", "two", "three"}; + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xcompiler-option one -Xcompiler-option two -Xcompiler-option three", + M::CompilerOptions); + } +} // TEST_F + +/* +* -Xjit, -Xnojit, -Xjitcodecachesize, Xjitcompilethreshold +*/ +TEST_F(CmdlineParserTest, TestJitOptions) { + /* + * Test successes + */ + { + EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJitCompilation); + EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJitCompilation); + } + { + EXPECT_SINGLE_PARSE_VALUE( + MemoryKiB(16 * KB), "-Xjitinitialsize:16K", M::JITCodeCacheInitialCapacity); + EXPECT_SINGLE_PARSE_VALUE( + MemoryKiB(16 * MB), "-Xjitmaxsize:16M", M::JITCodeCacheMaxCapacity); + } + { + EXPECT_SINGLE_PARSE_VALUE(12345u, "-Xjitthreshold:12345", M::JITCompileThreshold); + } +} // TEST_F + +/* +* -X-profile-* +*/ +TEST_F(CmdlineParserTest, TestProfilerOptions) { + /* + * Test successes + */ + + { + TestProfilerOptions opt; + opt.enabled_ = true; + + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xenable-profiler", + M::ProfilerOpts); + } + + { + TestProfilerOptions opt; + // also need to test 'enabled' + opt.output_file_name_ = "hello_world.txt"; + + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xprofile-filename:hello_world.txt ", + M::ProfilerOpts); + } + + { + TestProfilerOptions opt = TestProfilerOptions(); + // also need to test 'enabled' + opt.output_file_name_ = "output.txt"; + opt.period_s_ = 123u; + opt.duration_s_ = 456u; + opt.interval_us_ = 789u; + opt.backoff_coefficient_ = 2.0; + opt.start_immediately_ = true; + opt.top_k_threshold_ = 50.0; + opt.top_k_change_threshold_ = 60.0; + opt.profile_type_ = kProfilerMethod; + opt.max_stack_depth_ = 1337u; + + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xprofile-filename:output.txt " + "-Xprofile-period:123 " + "-Xprofile-duration:456 " + "-Xprofile-interval:789 " + "-Xprofile-backoff:2.0 " + "-Xprofile-start-immediately " + "-Xprofile-top-k-threshold:50.0 " + "-Xprofile-top-k-change-threshold:60.0 " + "-Xprofile-type:method " + "-Xprofile-max-stack-depth:1337", + M::ProfilerOpts); + } + + { + TestProfilerOptions opt = TestProfilerOptions(); + opt.profile_type_ = kProfilerBoundedStack; + + EXPECT_SINGLE_PARSE_VALUE(opt, + "-Xprofile-type:stack", + M::ProfilerOpts); + } +} // TEST_F + +/* -Xexperimental:_ */ +TEST_F(CmdlineParserTest, TestExperimentalFlags) { + // Default + EXPECT_SINGLE_PARSE_DEFAULT_VALUE(ExperimentalFlags::kNone, + "", + M::Experimental); + + // Disabled explicitly + EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kNone, + "-Xexperimental:none", + M::Experimental); + + // Enabled explicitly + EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kLambdas, + "-Xexperimental:lambdas", + M::Experimental); +} + +// -Xverify:_ +TEST_F(CmdlineParserTest, TestVerify) { + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kNone, "-Xverify:none", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:remote", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:all", M::Verify); + EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kSoftFail, "-Xverify:softfail", M::Verify); +} + +TEST_F(CmdlineParserTest, TestIgnoreUnrecognized) { + RuntimeParser::Builder parserBuilder; + + parserBuilder + .Define("-help") + .IntoKey(M::Help) + .IgnoreUnrecognized(true); + + parser_.reset(new RuntimeParser(parserBuilder.Build())); + + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS("-non-existent-option"); + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS("-non-existent-option1 --non-existent-option-2"); +} // TEST_F + +TEST_F(CmdlineParserTest, TestIgnoredArguments) { + std::initializer_list ignored_args = { + "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa", + "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:abdef", + "-Xdexopt:foobar", "-Xnoquithandler", "-Xjnigreflimit:ixnay", "-Xgenregmap", "-Xnogenregmap", + "-Xverifyopt:never", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:noop", + "-Xincludeselectedmethod", "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck", + "-Xjitoffset:none", "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile", + "-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=1337" + }; + + // Check they are ignored when parsed one at a time + for (auto&& arg : ignored_args) { + SCOPED_TRACE(arg); + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(arg); + } + + // Check they are ignored when we pass it all together at once + std::vector argv = ignored_args; + EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); +} // TEST_F + +TEST_F(CmdlineParserTest, MultipleArguments) { + EXPECT_TRUE(IsResultSuccessful(parser_->Parse( + "-help -XX:ForegroundHeapGrowthMultiplier=0.5 " + "-Xnodex2oat -Xmethod-trace -XX:LargeObjectSpace=map"))); + + auto&& map = parser_->ReleaseArgumentsMap(); + EXPECT_EQ(5u, map.Size()); + EXPECT_KEY_VALUE(map, M::Help, Unit{}); // NOLINT [whitespace/braces] [5] + EXPECT_KEY_VALUE(map, M::ForegroundHeapGrowthMultiplier, 0.5); + EXPECT_KEY_VALUE(map, M::Dex2Oat, false); + EXPECT_KEY_VALUE(map, M::MethodTrace, Unit{}); // NOLINT [whitespace/braces] [5] + EXPECT_KEY_VALUE(map, M::LargeObjectSpace, gc::space::LargeObjectSpaceType::kMap); +} // TEST_F +} // namespace art diff --git a/cmdline/cmdline_result.h b/cmdline/cmdline_result.h new file mode 100644 index 000000000..963dfc12b --- /dev/null +++ b/cmdline/cmdline_result.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_RESULT_H_ +#define ART_CMDLINE_CMDLINE_RESULT_H_ + +#include +#include + +namespace art { + // Result of an attempt to process the command line arguments. If fails, specifies + // the specific error code and an error message. + // Use the value-carrying CmdlineParseResult to get an additional value out in a success case. + struct CmdlineResult { + enum Status { + kSuccess, + // Error codes: + kUsage, + kFailure, + kOutOfRange, + kUnknown, + }; + + // Short-hand for checking if the result was successful. + operator bool() const { + return IsSuccess(); + } + + // Check if the operation has succeeded. + bool IsSuccess() const { return status_ == kSuccess; } + // Check if the operation was not a success. + bool IsError() const { return status_ != kSuccess; } + // Get the specific status, regardless of whether it's failure or success. + Status GetStatus() const { return status_; } + + // Get the error message, *must* only be called for error status results. + const std::string& GetMessage() const { assert(IsError()); return message_; } + + // Constructor any status. No message. + explicit CmdlineResult(Status status) : status_(status) {} + + // Constructor with an error status, copying the message. + CmdlineResult(Status status, const std::string& message) + : status_(status), message_(message) { + assert(status != kSuccess); + } + + // Constructor with an error status, taking over the message. + CmdlineResult(Status status, std::string&& message) + : status_(status), message_(message) { + assert(status != kSuccess); + } + + // Make sure copying exists + CmdlineResult(const CmdlineResult&) = default; + // Make sure moving is cheap + CmdlineResult(CmdlineResult&&) = default; + + private: + const Status status_; + const std::string message_; + }; + + // TODO: code-generate this + static inline std::ostream& operator<<(std::ostream& stream, CmdlineResult::Status status) { + switch (status) { + case CmdlineResult::kSuccess: + stream << "kSuccess"; + break; + case CmdlineResult::kUsage: + stream << "kUsage"; + break; + case CmdlineResult::kFailure: + stream << "kFailure"; + break; + case CmdlineResult::kOutOfRange: + stream << "kOutOfRange"; + break; + case CmdlineResult::kUnknown: + stream << "kUnknown"; + break; + default: + UNREACHABLE(); + } + return stream; + } + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_RESULT_H_ diff --git a/cmdline/cmdline_type_parser.h b/cmdline/cmdline_type_parser.h new file mode 100644 index 000000000..fa5cdaf90 --- /dev/null +++ b/cmdline/cmdline_type_parser.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ +#define ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ + +#include "cmdline_parse_result.h" + +namespace art { + +// Base class for user-defined CmdlineType specializations. +// +// Not strictly necessary, but if the specializations fail to Define all of these functions +// the compilation will fail. +template +struct CmdlineTypeParser { + // Return value of parsing attempts. Represents a Success(T value) or an Error(int code) + using Result = CmdlineParseResult; + + // Parse a single value for an argument definition out of the wildcard component. + // + // e.g. if the argument definition was "foo:_", and the user-provided input was "foo:bar", + // then args is "bar". + Result Parse(const std::string& args ATTRIBUTE_UNUSED) { + assert(false); + return Result::Failure("Missing type specialization and/or value map"); + } + + // Parse a value and append it into the existing value so far, for argument + // definitions which are marked with AppendValues(). + // + // The value is parsed out of the wildcard component as in Parse. + // + // If the initial value does not exist yet, a default value is created by + // value-initializing with 'T()'. + Result ParseAndAppend(const std::string& args ATTRIBUTE_UNUSED, + T& existing_value ATTRIBUTE_UNUSED) { + assert(false); + return Result::Failure("Missing type specialization and/or value map"); + } + + // Runtime type name of T, so that we can print more useful error messages. + static const char* Name() { assert(false); return "UnspecializedType"; } + + // Whether or not your type can parse argument definitions defined without a "_" + // e.g. -Xenable-profiler just mutates the existing profiler struct in-place + // so it doesn't need to do any parsing other than token recognition. + // + // If this is false, then either the argument definition has a _, from which the parsing + // happens, or the tokens get mapped to a value list/map from which a 1:1 matching occurs. + // + // This should almost *always* be false! + static constexpr bool kCanParseBlankless = false; + + protected: + // Don't accidentally initialize instances of this directly; they will assert at runtime. + CmdlineTypeParser() = default; +}; + + +} // namespace art + +#endif // ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h new file mode 100644 index 000000000..4797540c3 --- /dev/null +++ b/cmdline/cmdline_types.h @@ -0,0 +1,867 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ART_CMDLINE_CMDLINE_TYPES_H_ +#define ART_CMDLINE_CMDLINE_TYPES_H_ + +#define CMDLINE_NDEBUG 1 // Do not output any debugging information for parsing. + +#include "memory_representation.h" +#include "detail/cmdline_debug_detail.h" +#include "cmdline_type_parser.h" + +// Includes for the types that are being specialized +#include +#include "unit.h" +#include "jdwp/jdwp.h" +#include "base/logging.h" +#include "base/time_utils.h" +#include "experimental_flags.h" +#include "gc/collector_type.h" +#include "gc/space/large_object_space.h" +#include "profiler_options.h" + +namespace art { + +// The default specialization will always fail parsing the type from a string. +// Provide your own specialization that inherits from CmdlineTypeParser +// and implements either Parse or ParseAndAppend +// (only if the argument was defined with ::AppendValues()) but not both. +template +struct CmdlineType : CmdlineTypeParser { +}; + +// Specializations for CmdlineType follow: + +// Parse argument definitions for Unit-typed arguments. +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& args) { + if (args == "") { + return Result::Success(Unit{}); // NOLINT [whitespace/braces] [5] + } + return Result::Failure("Unexpected extra characters " + args); + } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + /* + * Handle one of the JDWP name/value pairs. + * + * JDWP options are: + * help: if specified, show help message and bail + * transport: may be dt_socket or dt_shmem + * address: for dt_socket, "host:port", or just "port" when listening + * server: if "y", wait for debugger to attach; if "n", attach to debugger + * timeout: how long to wait for debugger to connect / listen + * + * Useful with server=n (these aren't supported yet): + * onthrow=: connect to debugger when exception thrown + * onuncaught=y|n: connect to debugger when uncaught exception thrown + * launch=: launch the debugger itself + * + * The "transport" option is required, as is "address" if server=n. + */ + Result Parse(const std::string& options) { + VLOG(jdwp) << "ParseJdwpOptions: " << options; + + if (options == "help") { + return Result::Usage( + "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n" + "Example: -Xrunjdwp:transport=dt_socket,address=localhost:6500,server=n\n"); + } + + const std::string s; + + std::vector pairs; + Split(options, ',', &pairs); + + JDWP::JdwpOptions jdwp_options; + + for (const std::string& jdwp_option : pairs) { + std::string::size_type equals_pos = jdwp_option.find('='); + if (equals_pos == std::string::npos) { + return Result::Failure(s + + "Can't parse JDWP option '" + jdwp_option + "' in '" + options + "'"); + } + + Result parse_attempt = ParseJdwpOption(jdwp_option.substr(0, equals_pos), + jdwp_option.substr(equals_pos + 1), + &jdwp_options); + if (parse_attempt.IsError()) { + // We fail to parse this JDWP option. + return parse_attempt; + } + } + + if (jdwp_options.transport == JDWP::kJdwpTransportUnknown) { + return Result::Failure(s + "Must specify JDWP transport: " + options); + } + if (!jdwp_options.server && (jdwp_options.host.empty() || jdwp_options.port == 0)) { + return Result::Failure(s + "Must specify JDWP host and port when server=n: " + options); + } + + return Result::Success(std::move(jdwp_options)); + } + + Result ParseJdwpOption(const std::string& name, const std::string& value, + JDWP::JdwpOptions* jdwp_options) { + if (name == "transport") { + if (value == "dt_socket") { + jdwp_options->transport = JDWP::kJdwpTransportSocket; + } else if (value == "dt_android_adb") { + jdwp_options->transport = JDWP::kJdwpTransportAndroidAdb; + } else { + return Result::Failure("JDWP transport not supported: " + value); + } + } else if (name == "server") { + if (value == "n") { + jdwp_options->server = false; + } else if (value == "y") { + jdwp_options->server = true; + } else { + return Result::Failure("JDWP option 'server' must be 'y' or 'n'"); + } + } else if (name == "suspend") { + if (value == "n") { + jdwp_options->suspend = false; + } else if (value == "y") { + jdwp_options->suspend = true; + } else { + return Result::Failure("JDWP option 'suspend' must be 'y' or 'n'"); + } + } else if (name == "address") { + /* this is either or : */ + std::string port_string; + jdwp_options->host.clear(); + std::string::size_type colon = value.find(':'); + if (colon != std::string::npos) { + jdwp_options->host = value.substr(0, colon); + port_string = value.substr(colon + 1); + } else { + port_string = value; + } + if (port_string.empty()) { + return Result::Failure("JDWP address missing port: " + value); + } + char* end; + uint64_t port = strtoul(port_string.c_str(), &end, 10); + if (*end != '\0' || port > 0xffff) { + return Result::Failure("JDWP address has junk in port field: " + value); + } + jdwp_options->port = port; + } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") { + /* valid but unsupported */ + LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'"; + } else { + LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'"; + } + + return Result::SuccessNoValue(); + } + + static const char* Name() { return "JdwpOptions"; } +}; + +template +struct CmdlineType> : CmdlineTypeParser> { + using typename CmdlineTypeParser>::Result; + + Result Parse(const std::string arg) { + CMDLINE_DEBUG_LOG << "Parsing memory: " << arg << std::endl; + size_t val = ParseMemoryOption(arg.c_str(), Divisor); + CMDLINE_DEBUG_LOG << "Memory parsed to size_t value: " << val << std::endl; + + if (val == 0) { + return Result::Failure(std::string("not a valid memory value, or not divisible by ") + + std::to_string(Divisor)); + } + + return Result::Success(Memory(val)); + } + + // Parse a string of the form /[0-9]+[kKmMgG]?/, which is used to specify + // memory sizes. [kK] indicates kilobytes, [mM] megabytes, and + // [gG] gigabytes. + // + // "s" should point just past the "-Xm?" part of the string. + // "div" specifies a divisor, e.g. 1024 if the value must be a multiple + // of 1024. + // + // The spec says the -Xmx and -Xms options must be multiples of 1024. It + // doesn't say anything about -Xss. + // + // Returns 0 (a useless size) if "s" is malformed or specifies a low or + // non-evenly-divisible value. + // + static size_t ParseMemoryOption(const char* s, size_t div) { + // strtoul accepts a leading [+-], which we don't want, + // so make sure our string starts with a decimal digit. + if (isdigit(*s)) { + char* s2; + size_t val = strtoul(s, &s2, 10); + if (s2 != s) { + // s2 should be pointing just after the number. + // If this is the end of the string, the user + // has specified a number of bytes. Otherwise, + // there should be exactly one more character + // that specifies a multiplier. + if (*s2 != '\0') { + // The remainder of the string is either a single multiplier + // character, or nothing to indicate that the value is in + // bytes. + char c = *s2++; + if (*s2 == '\0') { + size_t mul; + if (c == '\0') { + mul = 1; + } else if (c == 'k' || c == 'K') { + mul = KB; + } else if (c == 'm' || c == 'M') { + mul = MB; + } else if (c == 'g' || c == 'G') { + mul = GB; + } else { + // Unknown multiplier character. + return 0; + } + + if (val <= std::numeric_limits::max() / mul) { + val *= mul; + } else { + // Clamp to a multiple of 1024. + val = std::numeric_limits::max() & ~(1024-1); + } + } else { + // There's more than one character after the numeric part. + return 0; + } + } + // The man page says that a -Xm value must be a multiple of 1024. + if (val % div == 0) { + return val; + } + } + } + return 0; + } + + static const char* Name() { return Memory::Name(); } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + char* end = nullptr; + errno = 0; + double value = strtod(str.c_str(), &end); + + if (*end != '\0') { + return Result::Failure("Failed to parse double from " + str); + } + if (errno == ERANGE) { + return Result::OutOfRange( + "Failed to parse double from " + str + "; overflow/underflow occurred"); + } + + return Result::Success(value); + } + + static const char* Name() { return "double"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + const char* begin = str.c_str(); + char* end; + + // Parse into a larger type (long long) because we can't use strtoul + // since it silently converts negative values into unsigned long and doesn't set errno. + errno = 0; + long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4] + if (begin == end || *end != '\0' || errno == EINVAL) { + return Result::Failure("Failed to parse integer from " + str); + } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4] + result < std::numeric_limits::min() + || result > std::numeric_limits::max() || result < 0) { + return Result::OutOfRange( + "Failed to parse integer from " + str + "; out of unsigned int range"); + } + + return Result::Success(static_cast(result)); + } + + static const char* Name() { return "unsigned integer"; } +}; + +// Lightweight nanosecond value type. Allows parser to convert user-input from milliseconds +// to nanoseconds automatically after parsing. +// +// All implicit conversion from uint64_t uses nanoseconds. +struct MillisecondsToNanoseconds { + // Create from nanoseconds. + MillisecondsToNanoseconds(uint64_t nanoseconds) : nanoseconds_(nanoseconds) { // NOLINT [runtime/explicit] [5] + } + + // Create from milliseconds. + static MillisecondsToNanoseconds FromMilliseconds(unsigned int milliseconds) { + return MillisecondsToNanoseconds(MsToNs(milliseconds)); + } + + // Get the underlying nanoseconds value. + uint64_t GetNanoseconds() const { + return nanoseconds_; + } + + // Get the milliseconds value [via a conversion]. Loss of precision will occur. + uint64_t GetMilliseconds() const { + return NsToMs(nanoseconds_); + } + + // Get the underlying nanoseconds value. + operator uint64_t() const { + return GetNanoseconds(); + } + + // Default constructors/copy-constructors. + MillisecondsToNanoseconds() : nanoseconds_(0ul) {} + MillisecondsToNanoseconds(const MillisecondsToNanoseconds&) = default; + MillisecondsToNanoseconds(MillisecondsToNanoseconds&&) = default; + + private: + uint64_t nanoseconds_; +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& str) { + CmdlineType uint_parser; + CmdlineParseResult res = uint_parser.Parse(str); + + if (res.IsSuccess()) { + return Result::Success(MillisecondsToNanoseconds::FromMilliseconds(res.GetValue())); + } else { + return Result::CastError(res); + } + } + + static const char* Name() { return "MillisecondsToNanoseconds"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& args) { + return Result::Success(args); + } + + Result ParseAndAppend(const std::string& args, + std::string& existing_value) { + if (existing_value.empty()) { + existing_value = args; + } else { + existing_value += ' '; + existing_value += args; + } + return Result::SuccessNoValue(); + } +}; + +template <> +struct CmdlineType> : CmdlineTypeParser> { + Result Parse(const std::string& args) { + assert(false && "Use AppendValues() for a string vector type"); + return Result::Failure("Unconditional failure: string vector must be appended: " + args); + } + + Result ParseAndAppend(const std::string& args, + std::vector& existing_value) { + existing_value.push_back(args); + return Result::SuccessNoValue(); + } + + static const char* Name() { return "std::vector"; } +}; + +template +struct ParseStringList { + explicit ParseStringList(std::vector&& list) : list_(list) {} + + operator std::vector() const { + return list_; + } + + operator std::vector&&() && { + return std::move(list_); + } + + size_t Size() const { + return list_.size(); + } + + std::string Join() const { + return art::Join(list_, Separator); + } + + static ParseStringList Split(const std::string& str) { + std::vector list; + art::Split(str, Separator, &list); + return ParseStringList(std::move(list)); + } + + ParseStringList() = default; + ParseStringList(const ParseStringList&) = default; + ParseStringList(ParseStringList&&) = default; + + private: + std::vector list_; +}; + +template +struct CmdlineType> : CmdlineTypeParser> { + using Result = CmdlineParseResult>; + + Result Parse(const std::string& args) { + return Result::Success(ParseStringList::Split(args)); + } + + static const char* Name() { return "ParseStringList"; } +}; + +static gc::CollectorType ParseCollectorType(const std::string& option) { + if (option == "MS" || option == "nonconcurrent") { + return gc::kCollectorTypeMS; + } else if (option == "CMS" || option == "concurrent") { + return gc::kCollectorTypeCMS; + } else if (option == "SS") { + return gc::kCollectorTypeSS; + } else if (option == "GSS") { + return gc::kCollectorTypeGSS; + } else if (option == "CC") { + return gc::kCollectorTypeCC; + } else if (option == "MC") { + return gc::kCollectorTypeMC; + } else { + return gc::kCollectorTypeNone; + } +} + +struct XGcOption { + // These defaults are used when the command line arguments for -Xgc: + // are either omitted completely or partially. + gc::CollectorType collector_type_ = kUseReadBarrier ? + // If RB is enabled (currently a build-time decision), + // use CC as the default GC. + gc::kCollectorTypeCC : + gc::kCollectorTypeDefault; + bool verify_pre_gc_heap_ = false; + bool verify_pre_sweeping_heap_ = kIsDebugBuild; + bool verify_post_gc_heap_ = false; + bool verify_pre_gc_rosalloc_ = kIsDebugBuild; + bool verify_pre_sweeping_rosalloc_ = false; + bool verify_post_gc_rosalloc_ = false; + bool gcstress_ = false; +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& option) { // -Xgc: already stripped + XGcOption xgc{}; // NOLINT [readability/braces] [4] + + std::vector gc_options; + Split(option, ',', &gc_options); + for (const std::string& gc_option : gc_options) { + gc::CollectorType collector_type = ParseCollectorType(gc_option); + if (collector_type != gc::kCollectorTypeNone) { + xgc.collector_type_ = collector_type; + } else if (gc_option == "preverify") { + xgc.verify_pre_gc_heap_ = true; + } else if (gc_option == "nopreverify") { + xgc.verify_pre_gc_heap_ = false; + } else if (gc_option == "presweepingverify") { + xgc.verify_pre_sweeping_heap_ = true; + } else if (gc_option == "nopresweepingverify") { + xgc.verify_pre_sweeping_heap_ = false; + } else if (gc_option == "postverify") { + xgc.verify_post_gc_heap_ = true; + } else if (gc_option == "nopostverify") { + xgc.verify_post_gc_heap_ = false; + } else if (gc_option == "preverify_rosalloc") { + xgc.verify_pre_gc_rosalloc_ = true; + } else if (gc_option == "nopreverify_rosalloc") { + xgc.verify_pre_gc_rosalloc_ = false; + } else if (gc_option == "presweepingverify_rosalloc") { + xgc.verify_pre_sweeping_rosalloc_ = true; + } else if (gc_option == "nopresweepingverify_rosalloc") { + xgc.verify_pre_sweeping_rosalloc_ = false; + } else if (gc_option == "postverify_rosalloc") { + xgc.verify_post_gc_rosalloc_ = true; + } else if (gc_option == "nopostverify_rosalloc") { + xgc.verify_post_gc_rosalloc_ = false; + } else if (gc_option == "gcstress") { + xgc.gcstress_ = true; + } else if (gc_option == "nogcstress") { + xgc.gcstress_ = false; + } else if ((gc_option == "precise") || + (gc_option == "noprecise") || + (gc_option == "verifycardtable") || + (gc_option == "noverifycardtable")) { + // Ignored for backwards compatibility. + } else { + return Result::Usage(std::string("Unknown -Xgc option ") + gc_option); + } + } + + return Result::Success(std::move(xgc)); + } + + static const char* Name() { return "XgcOption"; } +}; + +struct BackgroundGcOption { + // If background_collector_type_ is kCollectorTypeNone, it defaults to the + // XGcOption::collector_type_ after parsing options. If you set this to + // kCollectorTypeHSpaceCompact then we will do an hspace compaction when + // we transition to background instead of a normal collector transition. + gc::CollectorType background_collector_type_; + + BackgroundGcOption(gc::CollectorType background_collector_type) // NOLINT [runtime/explicit] [5] + : background_collector_type_(background_collector_type) {} + BackgroundGcOption() + : background_collector_type_(gc::kCollectorTypeNone) { + + if (kUseReadBarrier) { + background_collector_type_ = gc::kCollectorTypeCC; // Disable background compaction for CC. + } + } + + operator gc::CollectorType() const { return background_collector_type_; } +}; + +template<> +struct CmdlineType + : CmdlineTypeParser, private BackgroundGcOption { + Result Parse(const std::string& substring) { + // Special handling for HSpaceCompact since this is only valid as a background GC type. + if (substring == "HSpaceCompact") { + background_collector_type_ = gc::kCollectorTypeHomogeneousSpaceCompact; + } else { + gc::CollectorType collector_type = ParseCollectorType(substring); + if (collector_type != gc::kCollectorTypeNone) { + background_collector_type_ = collector_type; + } else { + return Result::Failure(); + } + } + + BackgroundGcOption res = *this; + return Result::Success(res); + } + + static const char* Name() { return "BackgroundGcOption"; } +}; + +template <> +struct CmdlineType : CmdlineTypeParser { + Result Parse(const std::string& options) { + LogVerbosity log_verbosity = LogVerbosity(); + + std::vector verbose_options; + Split(options, ',', &verbose_options); + for (size_t j = 0; j < verbose_options.size(); ++j) { + if (verbose_options[j] == "class") { + log_verbosity.class_linker = true; + } else if (verbose_options[j] == "collector") { + log_verbosity.collector = true; + } else if (verbose_options[j] == "compiler") { + log_verbosity.compiler = true; + } else if (verbose_options[j] == "deopt") { + log_verbosity.deopt = true; + } else if (verbose_options[j] == "gc") { + log_verbosity.gc = true; + } else if (verbose_options[j] == "heap") { + log_verbosity.heap = true; + } else if (verbose_options[j] == "jdwp") { + log_verbosity.jdwp = true; + } else if (verbose_options[j] == "jit") { + log_verbosity.jit = true; + } else if (verbose_options[j] == "jni") { + log_verbosity.jni = true; + } else if (verbose_options[j] == "monitor") { + log_verbosity.monitor = true; + } else if (verbose_options[j] == "oat") { + log_verbosity.oat = true; + } else if (verbose_options[j] == "profiler") { + log_verbosity.profiler = true; + } else if (verbose_options[j] == "signals") { + log_verbosity.signals = true; + } else if (verbose_options[j] == "simulator") { + log_verbosity.simulator = true; + } else if (verbose_options[j] == "startup") { + log_verbosity.startup = true; + } else if (verbose_options[j] == "third-party-jni") { + log_verbosity.third_party_jni = true; + } else if (verbose_options[j] == "threads") { + log_verbosity.threads = true; + } else if (verbose_options[j] == "verifier") { + log_verbosity.verifier = true; + } else if (verbose_options[j] == "image") { + log_verbosity.image = true; + } else if (verbose_options[j] == "systrace-locks") { + log_verbosity.systrace_lock_logging = true; + } else { + return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]); + } + } + + return Result::Success(log_verbosity); + } + + static const char* Name() { return "LogVerbosity"; } +}; + +// TODO: Replace with art::ProfilerOptions for the real thing. +struct TestProfilerOptions { + // Whether or not the applications should be profiled. + bool enabled_; + // Destination file name where the profiling data will be saved into. + std::string output_file_name_; + // Generate profile every n seconds. + uint32_t period_s_; + // Run profile for n seconds. + uint32_t duration_s_; + // Microseconds between samples. + uint32_t interval_us_; + // Coefficient to exponential backoff. + double backoff_coefficient_; + // Whether the profile should start upon app startup or be delayed by some random offset. + bool start_immediately_; + // Top K% of samples that are considered relevant when deciding if the app should be recompiled. + double top_k_threshold_; + // How much the top K% samples needs to change in order for the app to be recompiled. + double top_k_change_threshold_; + // The type of profile data dumped to the disk. + ProfileDataType profile_type_; + // The max depth of the stack collected by the profiler + uint32_t max_stack_depth_; + + TestProfilerOptions() : + enabled_(false), + output_file_name_(), + period_s_(0), + duration_s_(0), + interval_us_(0), + backoff_coefficient_(0), + start_immediately_(0), + top_k_threshold_(0), + top_k_change_threshold_(0), + profile_type_(ProfileDataType::kProfilerMethod), + max_stack_depth_(0) { + } + + TestProfilerOptions(const TestProfilerOptions&) = default; + TestProfilerOptions(TestProfilerOptions&&) = default; +}; + +static inline std::ostream& operator<<(std::ostream& stream, const TestProfilerOptions& options) { + stream << "TestProfilerOptions {" << std::endl; + +#define PRINT_TO_STREAM(field) \ + stream << #field << ": '" << options.field << "'" << std::endl; + + PRINT_TO_STREAM(enabled_); + PRINT_TO_STREAM(output_file_name_); + PRINT_TO_STREAM(period_s_); + PRINT_TO_STREAM(duration_s_); + PRINT_TO_STREAM(interval_us_); + PRINT_TO_STREAM(backoff_coefficient_); + PRINT_TO_STREAM(start_immediately_); + PRINT_TO_STREAM(top_k_threshold_); + PRINT_TO_STREAM(top_k_change_threshold_); + PRINT_TO_STREAM(profile_type_); + PRINT_TO_STREAM(max_stack_depth_); + + stream << "}"; + + return stream; +#undef PRINT_TO_STREAM +} + +template <> +struct CmdlineType : CmdlineTypeParser { + using Result = CmdlineParseResult; + + private: + using StringResult = CmdlineParseResult; + using DoubleResult = CmdlineParseResult; + + template + static Result ParseInto(TestProfilerOptions& options, + T TestProfilerOptions::*pField, + CmdlineParseResult&& result) { + assert(pField != nullptr); + + if (result.IsSuccess()) { + options.*pField = result.ReleaseValue(); + return Result::SuccessNoValue(); + } + + return Result::CastError(result); + } + + template + static Result ParseIntoRangeCheck(TestProfilerOptions& options, + T TestProfilerOptions::*pField, + CmdlineParseResult&& result, + T min, + T max) { + if (result.IsSuccess()) { + const T& value = result.GetValue(); + + if (value < min || value > max) { + CmdlineParseResult out_of_range = CmdlineParseResult::OutOfRange(value, min, max); + return Result::CastError(out_of_range); + } + } + + return ParseInto(options, pField, std::forward>(result)); + } + + static StringResult ParseStringAfterChar(const std::string& s, char c) { + std::string parsed_value; + + std::string::size_type colon = s.find(c); + if (colon == std::string::npos) { + return StringResult::Usage(std::string() + "Missing char " + c + " in option " + s); + } + // Add one to remove the char we were trimming until. + parsed_value = s.substr(colon + 1); + return StringResult::Success(parsed_value); + } + + static std::string RemovePrefix(const std::string& source) { + size_t prefix_idx = source.find(":"); + + if (prefix_idx == std::string::npos) { + return ""; + } + + return source.substr(prefix_idx + 1); + } + + public: + Result ParseAndAppend(const std::string& option, TestProfilerOptions& existing) { + // Special case which doesn't include a wildcard argument definition. + // We pass-it through as-is. + if (option == "-Xenable-profiler") { + existing.enabled_ = true; + return Result::SuccessNoValue(); + } + + // The rest of these options are always the wildcard from '-Xprofile-*' + std::string suffix = RemovePrefix(option); + + if (StartsWith(option, "filename:")) { + CmdlineType type_parser; + + return ParseInto(existing, + &TestProfilerOptions::output_file_name_, + type_parser.Parse(suffix)); + } else if (StartsWith(option, "period:")) { + CmdlineType type_parser; + + return ParseInto(existing, + &TestProfilerOptions::period_s_, + type_parser.Parse(suffix)); + } else if (StartsWith(option, "duration:")) { + CmdlineType type_parser; + + return ParseInto(existing, + &TestProfilerOptions::duration_s_, + type_parser.Parse(suffix)); + } else if (StartsWith(option, "interval:")) { + CmdlineType type_parser; + + return ParseInto(existing, + &TestProfilerOptions::interval_us_, + type_parser.Parse(suffix)); + } else if (StartsWith(option, "backoff:")) { + CmdlineType type_parser; + + return ParseIntoRangeCheck(existing, + &TestProfilerOptions::backoff_coefficient_, + type_parser.Parse(suffix), + 1.0, + 10.0); + + } else if (option == "start-immediately") { + existing.start_immediately_ = true; + return Result::SuccessNoValue(); + } else if (StartsWith(option, "top-k-threshold:")) { + CmdlineType type_parser; + + return ParseIntoRangeCheck(existing, + &TestProfilerOptions::top_k_threshold_, + type_parser.Parse(suffix), + 0.0, + 100.0); + } else if (StartsWith(option, "top-k-change-threshold:")) { + CmdlineType type_parser; + + return ParseIntoRangeCheck(existing, + &TestProfilerOptions::top_k_change_threshold_, + type_parser.Parse(suffix), + 0.0, + 100.0); + } else if (option == "type:method") { + existing.profile_type_ = kProfilerMethod; + return Result::SuccessNoValue(); + } else if (option == "type:stack") { + existing.profile_type_ = kProfilerBoundedStack; + return Result::SuccessNoValue(); + } else if (StartsWith(option, "max-stack-depth:")) { + CmdlineType type_parser; + + return ParseInto(existing, + &TestProfilerOptions::max_stack_depth_, + type_parser.Parse(suffix)); + } else { + return Result::Failure(std::string("Invalid suboption '") + option + "'"); + } + } + + static const char* Name() { return "TestProfilerOptions"; } + static constexpr bool kCanParseBlankless = true; +}; + +template<> +struct CmdlineType : CmdlineTypeParser { + Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) { + if (option == "none") { + existing = ExperimentalFlags::kNone; + } else if (option == "lambdas") { + existing = existing | ExperimentalFlags::kLambdas; + } else { + return Result::Failure(std::string("Unknown option '") + option + "'"); + } + return Result::SuccessNoValue(); + } + + static const char* Name() { return "ExperimentalFlags"; } +}; + +} // namespace art +#endif // ART_CMDLINE_CMDLINE_TYPES_H_ diff --git a/cmdline/detail/cmdline_debug_detail.h b/cmdline/detail/cmdline_debug_detail.h new file mode 100644 index 000000000..79028f490 --- /dev/null +++ b/cmdline/detail/cmdline_debug_detail.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ +#define ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ + +#include +#ifndef CMDLINE_NDEBUG +#define CMDLINE_DEBUG_LOG std::cerr +#else +#define CMDLINE_DEBUG_LOG ::art::detail::debug_log_ignore() +#endif + +namespace art { + // Implementation details for some template querying. Don't look inside if you hate templates. + namespace detail { + struct debug_log_ignore { + // Ignore most of the normal operator<< usage. + template + debug_log_ignore& operator<<(const T&) { return *this; } + // Ignore std::endl and the like. + debug_log_ignore& operator<<(std::ostream& (*)(std::ostream&) ) { return *this; } + }; + } // namespace detail // NOLINT [readability/namespace] [5] +} // namespace art + +#endif // ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h new file mode 100644 index 000000000..4b56804ea --- /dev/null +++ b/cmdline/detail/cmdline_parse_argument_detail.h @@ -0,0 +1,503 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ +#define ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "cmdline_parse_result.h" +#include "cmdline_types.h" +#include "token_range.h" +#include "unit.h" + +namespace art { + // Implementation details for the parser. Do not look inside if you hate templates. + namespace detail { + // A non-templated base class for argument parsers. Used by the general parser + // to parse arguments, without needing to know the argument type at compile time. + // + // This is an application of the type erasure idiom. + struct CmdlineParseArgumentAny { + virtual ~CmdlineParseArgumentAny() {} + + // Attempt to parse this argument starting at arguments[position]. + // If the parsing succeeds, the parsed value will be saved as a side-effect. + // + // In most situations, the parsing will not match by returning kUnknown. In this case, + // no tokens were consumed and the position variable will not be updated. + // + // At other times, parsing may fail due to validation but the initial token was still matched + // (for example an out of range value, or passing in a string where an int was expected). + // In this case the tokens are still consumed, and the position variable will get incremented + // by all the consumed tokens. + // + // The # of tokens consumed by the parse attempt will be set as an out-parameter into + // consumed_tokens. The parser should skip this many tokens before parsing the next + // argument. + virtual CmdlineResult ParseArgument(const TokenRange& arguments, size_t* consumed_tokens) = 0; + // How many tokens should be taken off argv for parsing this argument. + // For example "--help" is just 1, "-compiler-option _" would be 2 (since there's a space). + // + // A [min,max] range is returned to represent argument definitions with multiple + // value tokens. (e.g. {"-h", "-h " } would return [1,2]). + virtual std::pair GetNumTokens() const = 0; + // Get the run-time typename of the argument type. + virtual const char* GetTypeName() const = 0; + // Try to do a close match, returning how many tokens were matched against this argument + // definition. More tokens is better. + // + // Do a quick match token-by-token, and see if they match. + // Any tokens with a wildcard in them are only matched up until the wildcard. + // If this is true, then the wildcard matching later on can still fail, so this is not + // a guarantee that the argument is correct, it's more of a strong hint that the + // user-provided input *probably* was trying to match this argument. + // + // Returns how many tokens were either matched (or ignored because there was a + // wildcard present). 0 means no match. If the Size() tokens are returned. + virtual size_t MaybeMatches(const TokenRange& tokens) = 0; + }; + + template + using EnableIfNumeric = std::enable_if::value>; + + template + using DisableIfNumeric = std::enable_if::value>; + + // Argument definition information, created by an ArgumentBuilder and an UntypedArgumentBuilder. + template + struct CmdlineParserArgumentInfo { + // This version will only be used if TArg is arithmetic and thus has the <= operators. + template // Necessary to get SFINAE to kick in. + bool CheckRange(const TArg& value, typename EnableIfNumeric::type* = 0) { + if (has_range_) { + return min_ <= value && value <= max_; + } + return true; + } + + // This version will be used at other times when TArg is not arithmetic. + template + bool CheckRange(const TArg&, typename DisableIfNumeric::type* = 0) { + assert(!has_range_); + return true; + } + + // Do a quick match token-by-token, and see if they match. + // Any tokens with a wildcard in them only match the prefix up until the wildcard. + // + // If this is true, then the wildcard matching later on can still fail, so this is not + // a guarantee that the argument is correct, it's more of a strong hint that the + // user-provided input *probably* was trying to match this argument. + size_t MaybeMatches(TokenRange token_list) const { + auto best_match = FindClosestMatch(token_list); + + return best_match.second; + } + + // Attempt to find the closest match (see MaybeMatches). + // + // Returns the token range that was the closest match and the # of tokens that + // this range was matched up until. + std::pair FindClosestMatch(TokenRange token_list) const { + const TokenRange* best_match_ptr = nullptr; + + size_t best_match = 0; + for (auto&& token_range : tokenized_names_) { + size_t this_match = token_range.MaybeMatches(token_list, std::string("_")); + + if (this_match > best_match) { + best_match_ptr = &token_range; + best_match = this_match; + } + } + + return std::make_pair(best_match_ptr, best_match); + } + + // Mark the argument definition as completed, do not mutate the object anymore after this + // call is done. + // + // Performs several sanity checks and token calculations. + void CompleteArgument() { + assert(names_.size() >= 1); + assert(!is_completed_); + + is_completed_ = true; + + size_t blank_count = 0; + size_t token_count = 0; + + size_t global_blank_count = 0; + size_t global_token_count = 0; + for (auto&& name : names_) { + std::string s(name); + + size_t local_blank_count = std::count(s.begin(), s.end(), '_'); + size_t local_token_count = std::count(s.begin(), s.end(), ' '); + + if (global_blank_count != 0) { + assert(local_blank_count == global_blank_count + && "Every argument descriptor string must have same amount of blanks (_)"); + } + + if (local_blank_count != 0) { + global_blank_count = local_blank_count; + blank_count++; + + assert(local_blank_count == 1 && "More than one blank is not supported"); + assert(s.back() == '_' && "The blank character must only be at the end of the string"); + } + + if (global_token_count != 0) { + assert(local_token_count == global_token_count + && "Every argument descriptor string must have same amount of tokens (spaces)"); + } + + if (local_token_count != 0) { + global_token_count = local_token_count; + token_count++; + } + + // Tokenize every name, turning it from a string to a token list. + tokenized_names_.clear(); + for (auto&& name1 : names_) { + // Split along ' ' only, removing any duplicated spaces. + tokenized_names_.push_back( + TokenRange::Split(name1, {' '}).RemoveToken(" ")); + } + + // remove the _ character from each of the token ranges + // we will often end up with an empty token (i.e. ["-XX", "_"] -> ["-XX", ""] + // and this is OK because we still need an empty token to simplify + // range comparisons + simple_names_.clear(); + + for (auto&& tokenized_name : tokenized_names_) { + simple_names_.push_back(tokenized_name.RemoveCharacter('_')); + } + } + + if (token_count != 0) { + assert(("Every argument descriptor string must have equal amount of tokens (spaces)" && + token_count == names_.size())); + } + + if (blank_count != 0) { + assert(("Every argument descriptor string must have an equal amount of blanks (_)" && + blank_count == names_.size())); + } + + using_blanks_ = blank_count > 0; + { + size_t smallest_name_token_range_size = + std::accumulate(tokenized_names_.begin(), tokenized_names_.end(), ~(0u), + [](size_t min, const TokenRange& cur) { + return std::min(min, cur.Size()); + }); + size_t largest_name_token_range_size = + std::accumulate(tokenized_names_.begin(), tokenized_names_.end(), 0u, + [](size_t max, const TokenRange& cur) { + return std::max(max, cur.Size()); + }); + + token_range_size_ = std::make_pair(smallest_name_token_range_size, + largest_name_token_range_size); + } + + if (has_value_list_) { + assert(names_.size() == value_list_.size() + && "Number of arg descriptors must match number of values"); + assert(!has_value_map_); + } + if (has_value_map_) { + if (!using_blanks_) { + assert(names_.size() == value_map_.size() && + "Since no blanks were specified, each arg is mapped directly into a mapped " + "value without parsing; sizes must match"); + } + + assert(!has_value_list_); + } + + if (!using_blanks_ && !CmdlineType::kCanParseBlankless) { + assert((has_value_map_ || has_value_list_) && + "Arguments without a blank (_) must provide either a value map or a value list"); + } + + TypedCheck(); + } + + // List of aliases for a single argument definition, e.g. {"-Xdex2oat", "-Xnodex2oat"}. + std::vector names_; + // Is there at least 1 wildcard '_' in the argument definition? + bool using_blanks_ = false; + // [min, max] token counts in each arg def + std::pair token_range_size_; + + // contains all the names in a tokenized form, i.e. as a space-delimited list + std::vector tokenized_names_; + + // contains the tokenized names, but with the _ character stripped + std::vector simple_names_; + + // For argument definitions created with '.AppendValues()' + // Meaning that parsing should mutate the existing value in-place if possible. + bool appending_values_ = false; + + // For argument definitions created with '.WithRange(min, max)' + bool has_range_ = false; + TArg min_; + TArg max_; + + // For argument definitions created with '.WithValueMap' + bool has_value_map_ = false; + std::vector> value_map_; + + // For argument definitions created with '.WithValues' + bool has_value_list_ = false; + std::vector value_list_; + + // Make sure there's a default constructor. + CmdlineParserArgumentInfo() = default; + + // Ensure there's a default move constructor. + CmdlineParserArgumentInfo(CmdlineParserArgumentInfo&&) = default; + + private: + // Perform type-specific checks at runtime. + template + void TypedCheck(typename std::enable_if::value>::type* = 0) { + assert(!using_blanks_ && + "Blanks are not supported in Unit arguments; since a Unit has no parse-able value"); + } + + void TypedCheck() {} + + bool is_completed_ = false; + }; + + // A virtual-implementation of the necessary argument information in order to + // be able to parse arguments. + template + struct CmdlineParseArgument : CmdlineParseArgumentAny { + CmdlineParseArgument(CmdlineParserArgumentInfo&& argument_info, + std::function&& save_argument, + std::function&& load_argument) + : argument_info_(std::forward(argument_info)), + save_argument_(std::forward(save_argument)), + load_argument_(std::forward(load_argument)) { + } + + using UserTypeInfo = CmdlineType; + + virtual CmdlineResult ParseArgument(const TokenRange& arguments, size_t* consumed_tokens) { + assert(arguments.Size() > 0); + assert(consumed_tokens != nullptr); + + auto closest_match_res = argument_info_.FindClosestMatch(arguments); + size_t best_match_size = closest_match_res.second; + const TokenRange* best_match_arg_def = closest_match_res.first; + + if (best_match_size > arguments.Size()) { + // The best match has more tokens than were provided. + // Shouldn't happen in practice since the outer parser does this check. + return CmdlineResult(CmdlineResult::kUnknown, "Size mismatch"); + } + + assert(best_match_arg_def != nullptr); + *consumed_tokens = best_match_arg_def->Size(); + + if (!argument_info_.using_blanks_) { + return ParseArgumentSingle(arguments.Join(' ')); + } + + // Extract out the blank value from arguments + // e.g. for a def of "foo:_" and input "foo:bar", blank_value == "bar" + std::string blank_value = ""; + size_t idx = 0; + for (auto&& def_token : *best_match_arg_def) { + auto&& arg_token = arguments[idx]; + + // Does this definition-token have a wildcard in it? + if (def_token.find('_') == std::string::npos) { + // No, regular token. Match 1:1 against the argument token. + bool token_match = def_token == arg_token; + + if (!token_match) { + return CmdlineResult(CmdlineResult::kFailure, + std::string("Failed to parse ") + best_match_arg_def->GetToken(0) + + " at token " + std::to_string(idx)); + } + } else { + // This is a wild-carded token. + TokenRange def_split_wildcards = TokenRange::Split(def_token, {'_'}); + + // Extract the wildcard contents out of the user-provided arg_token. + std::unique_ptr arg_matches = + def_split_wildcards.MatchSubstrings(arg_token, "_"); + if (arg_matches == nullptr) { + return CmdlineResult(CmdlineResult::kFailure, + std::string("Failed to parse ") + best_match_arg_def->GetToken(0) + + ", with a wildcard pattern " + def_token + + " at token " + std::to_string(idx)); + } + + // Get the corresponding wildcard tokens from arg_matches, + // and concatenate it to blank_value. + for (size_t sub_idx = 0; + sub_idx < def_split_wildcards.Size() && sub_idx < arg_matches->Size(); ++sub_idx) { + if (def_split_wildcards[sub_idx] == "_") { + blank_value += arg_matches->GetToken(sub_idx); + } + } + } + + ++idx; + } + + return ParseArgumentSingle(blank_value); + } + + private: + virtual CmdlineResult ParseArgumentSingle(const std::string& argument) { + // TODO: refactor to use LookupValue for the value lists/maps + + // Handle the 'WithValueMap(...)' argument definition + if (argument_info_.has_value_map_) { + for (auto&& value_pair : argument_info_.value_map_) { + const char* name = value_pair.first; + + if (argument == name) { + return SaveArgument(value_pair.second); + } + } + + // Error case: Fail, telling the user what the allowed values were. + std::vector allowed_values; + for (auto&& value_pair : argument_info_.value_map_) { + const char* name = value_pair.first; + allowed_values.push_back(name); + } + + std::string allowed_values_flat = Join(allowed_values, ','); + return CmdlineResult(CmdlineResult::kFailure, + "Argument value '" + argument + "' does not match any of known valid" + "values: {" + allowed_values_flat + "}"); + } + + // Handle the 'WithValues(...)' argument definition + if (argument_info_.has_value_list_) { + size_t arg_def_idx = 0; + for (auto&& value : argument_info_.value_list_) { + auto&& arg_def_token = argument_info_.names_[arg_def_idx]; + + if (arg_def_token == argument) { + return SaveArgument(value); + } + ++arg_def_idx; + } + + assert(arg_def_idx + 1 == argument_info_.value_list_.size() && + "Number of named argument definitions must match number of values defined"); + + // Error case: Fail, telling the user what the allowed values were. + std::vector allowed_values; + for (auto&& arg_name : argument_info_.names_) { + allowed_values.push_back(arg_name); + } + + std::string allowed_values_flat = Join(allowed_values, ','); + return CmdlineResult(CmdlineResult::kFailure, + "Argument value '" + argument + "' does not match any of known valid" + "values: {" + allowed_values_flat + "}"); + } + + // Handle the regular case where we parsed an unknown value from a blank. + UserTypeInfo type_parser; + + if (argument_info_.appending_values_) { + TArg& existing = load_argument_(); + CmdlineParseResult result = type_parser.ParseAndAppend(argument, existing); + + assert(!argument_info_.has_range_); + + return result; + } + + CmdlineParseResult result = type_parser.Parse(argument); + + if (result.IsSuccess()) { + TArg& value = result.GetValue(); + + // Do a range check for 'WithRange(min,max)' argument definition. + if (!argument_info_.CheckRange(value)) { + return CmdlineParseResult::OutOfRange( + value, argument_info_.min_, argument_info_.max_); + } + + return SaveArgument(value); + } + + // Some kind of type-specific parse error. Pass the result as-is. + CmdlineResult raw_result = std::move(result); + return raw_result; + } + + public: + virtual const char* GetTypeName() const { + // TODO: Obviate the need for each type specialization to hardcode the type name + return UserTypeInfo::Name(); + } + + // How many tokens should be taken off argv for parsing this argument. + // For example "--help" is just 1, "-compiler-option _" would be 2 (since there's a space). + // + // A [min,max] range is returned to represent argument definitions with multiple + // value tokens. (e.g. {"-h", "-h " } would return [1,2]). + virtual std::pair GetNumTokens() const { + return argument_info_.token_range_size_; + } + + // See if this token range might begin the same as the argument definition. + virtual size_t MaybeMatches(const TokenRange& tokens) { + return argument_info_.MaybeMatches(tokens); + } + + private: + CmdlineResult SaveArgument(const TArg& value) { + assert(!argument_info_.appending_values_ + && "If the values are being appended, then the updated parse value is " + "updated by-ref as a side effect and shouldn't be stored directly"); + TArg val = value; + save_argument_(val); + return CmdlineResult(CmdlineResult::kSuccess); + } + + CmdlineParserArgumentInfo argument_info_; + std::function save_argument_; + std::function load_argument_; + }; + } // namespace detail // NOLINT [readability/namespace] [5] [whitespace/comments] [2] +} // namespace art + +#endif // ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ diff --git a/cmdline/detail/cmdline_parser_detail.h b/cmdline/detail/cmdline_parser_detail.h new file mode 100644 index 000000000..9b43bb0f5 --- /dev/null +++ b/cmdline/detail/cmdline_parser_detail.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ +#define ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ + +#include +#include +#include + +namespace art { + // Implementation details for some template querying. Don't look inside if you hate templates. + namespace detail { + template + typename std::remove_reference::type& FakeReference(); + + // SupportsInsertionOperator::value will evaluate to a boolean, + // whose value is true if the TStream class supports the << operator against T, + // and false otherwise. + template + struct SupportsInsertionOperator { + private: + template + static std::true_type InsertionOperatorTest(TStream& os, const T& value, + std::remove_reference* = 0); // NOLINT [whitespace/operators] [3] + + template + static std::false_type InsertionOperatorTest(TStream& os, const T& ... args); + + public: + static constexpr bool value = + decltype(InsertionOperatorTest(FakeReference(), std::declval()))::value; + }; + + template + struct SupportsEqualityOperatorImpl; + + template + struct SupportsEqualityOperatorImpl { + private: + template + static std::true_type EqualityOperatorTest(const TL& left, const TR& right, + std::remove_reference* = 0); // NOLINT [whitespace/operators] [3] + + template + static std::false_type EqualityOperatorTest(const TL& left, const T& ... args); + + public: + static constexpr bool value = + decltype(EqualityOperatorTest(std::declval(), std::declval()))::value; + }; + + // Partial specialization when TLeft/TRight are both floating points. + // This is a work-around because decltype(floatvar1 == floatvar2) + // will not compile with clang: + // error: comparing floating point with == or != is unsafe [-Werror,-Wfloat-equal] + template + struct SupportsEqualityOperatorImpl { + static constexpr bool value = true; + }; + + // SupportsEqualityOperatorImpl::value will evaluate to a boolean, + // whose value is true if T1 can be compared against T2 with ==, + // and false otherwise. + template + struct SupportsEqualityOperator : + SupportsEqualityOperatorImpl::value + && std::is_floating_point::value> { + }; + + // Convert any kind of type to an std::string, even if there's no + // serialization support for it. Unknown types get converted to an + // an arbitrary value. + // + // Meant for printing user-visible errors or unit test failures only. + template + std::string ToStringAny(const T& value, + typename std::enable_if< + SupportsInsertionOperator::value>::type* = 0) { + std::stringstream stream; + stream << value; + return stream.str(); + } + + template + std::string ToStringAny(const std::vector value, + typename std::enable_if< + SupportsInsertionOperator::value>::type* = 0) { + std::stringstream stream; + stream << "vector{"; + + for (size_t i = 0; i < value.size(); ++i) { + stream << ToStringAny(value[i]); + + if (i != value.size() - 1) { + stream << ','; + } + } + + stream << "}"; + return stream.str(); + } + + template + std::string ToStringAny(const T&, + typename std::enable_if< + !SupportsInsertionOperator::value>::type* = 0 + ) { + return std::string("(unknown type [no operator<< implemented] for )"); + } + } // namespace detail // NOLINT [readability/namespace] [5] +} // namespace art + +#endif // ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ diff --git a/cmdline/memory_representation.h b/cmdline/memory_representation.h new file mode 100644 index 000000000..2619c317e --- /dev/null +++ b/cmdline/memory_representation.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_MEMORY_REPRESENTATION_H_ +#define ART_CMDLINE_MEMORY_REPRESENTATION_H_ + +#include +#include +#include + +#include "base/bit_utils.h" + +namespace art { + +// An integral representation of bytes of memory. +// The underlying runtime size_t value is guaranteed to be a multiple of Divisor. +template +struct Memory { + static_assert(IsPowerOfTwo(kDivisor), "Divisor must be a power of 2"); + + static Memory FromBytes(size_t bytes) { + assert(bytes % kDivisor == 0); + return Memory(bytes); + } + + Memory() : Value(0u) {} + Memory(size_t value) : Value(value) { // NOLINT [runtime/explicit] [5] + assert(value % kDivisor == 0); + } + operator size_t() const { return Value; } + + size_t ToBytes() const { + return Value; + } + + static const char* Name() { + static std::string str; + if (str.empty()) { + str = "Memory<" + std::to_string(kDivisor) + '>'; + } + + return str.c_str(); + } + + size_t Value; +}; + +template +std::ostream& operator<<(std::ostream& stream, Memory memory) { + return stream << memory.Value << '*' << kDivisor; +} + +using MemoryKiB = Memory<1024>; + +} // namespace art + +#endif // ART_CMDLINE_MEMORY_REPRESENTATION_H_ diff --git a/cmdline/token_range.h b/cmdline/token_range.h new file mode 100644 index 000000000..335806795 --- /dev/null +++ b/cmdline/token_range.h @@ -0,0 +1,425 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_TOKEN_RANGE_H_ +#define ART_CMDLINE_TOKEN_RANGE_H_ + +#include +#include +#include +#include +#include + +namespace art { +// A range of tokens to make token matching algorithms easier. +// +// We try really hard to avoid copying and store only a pointer and iterators to the +// interiors of the vector, so a typical copy constructor never ends up doing a deep copy. +// It is up to the user to play nice and not to mutate the strings in-place. +// +// Tokens are only copied if a mutating operation is performed (and even then only +// if it *actually* mutates the token). +struct TokenRange { + // Short-hand for a vector of strings. A single string and a token is synonymous. + using TokenList = std::vector; + + // Copying-from-vector constructor. + explicit TokenRange(const TokenList& token_list) + : token_list_(new TokenList(token_list)), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Copying-from-iterator constructor + template + TokenRange(ForwardIterator it_begin, ForwardIterator it_end) + : token_list_(new TokenList(it_begin, it_end)), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + +#if 0 + // Copying-from-vector constructor. + TokenRange(const TokenList& token_list ATTRIBUTE_UNUSED, + TokenList::const_iterator it_begin, + TokenList::const_iterator it_end) + : token_list_(new TokenList(it_begin, it_end)), + begin_(token_list_->begin()), + end_(token_list_->end()) { + assert(it_begin >= token_list.begin()); + assert(it_end <= token_list.end()); + } +#endif + + // Copying from char array constructor, convertings into tokens (strings) along the way. + TokenRange(const char* token_list[], size_t length) + : token_list_(new TokenList(&token_list[0], &token_list[length])), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Non-copying move-from-vector constructor. Takes over the token vector. + explicit TokenRange(TokenList&& token_list) + : token_list_(new TokenList(std::forward(token_list))), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Non-copying constructor. Retain reference to existing list of tokens. + TokenRange(std::shared_ptr token_list, + TokenList::const_iterator it_begin, + TokenList::const_iterator it_end) + : token_list_(token_list), + begin_(it_begin), + end_(it_end) { + assert(it_begin >= token_list->begin()); + assert(it_end <= token_list->end()); + } + + // Non-copying copy constructor. + TokenRange(const TokenRange&) = default; + + // Non-copying move constructor. + TokenRange(TokenRange&&) = default; + + // Non-copying constructor. Retains reference to an existing list of tokens, with offset. + explicit TokenRange(std::shared_ptr token_list) + : token_list_(token_list), + begin_(token_list_->begin()), + end_(token_list_->end()) + {} + + // Iterator type for begin() and end(). Guaranteed to be a RandomAccessIterator. + using iterator = TokenList::const_iterator; + + // Iterator type for const begin() and const end(). Guaranteed to be a RandomAccessIterator. + using const_iterator = iterator; + + // Create a token range by splitting a string. Each separator gets their own token. + // Since the separator are retained as tokens, it might be useful to call + // RemoveToken afterwards. + static TokenRange Split(const std::string& string, std::initializer_list separators) { + TokenList new_token_list; + + std::string tok; + for (auto&& c : string) { + for (char sep : separators) { + if (c == sep) { + // We spotted a separator character. + // Push back everything before the last separator as a new token. + // Push back the separator as a token. + if (!tok.empty()) { + new_token_list.push_back(tok); + tok = ""; + } + new_token_list.push_back(std::string() + sep); + } else { + // Build up the token with another character. + tok += c; + } + } + } + + if (!tok.empty()) { + new_token_list.push_back(tok); + } + + return TokenRange(std::move(new_token_list)); + } + + // A RandomAccessIterator to the first element in this range. + iterator begin() const { + return begin_; + } + + // A RandomAccessIterator to one past the last element in this range. + iterator end() const { + return end_; + } + + // The size of the range, i.e. how many tokens are in it. + size_t Size() const { + return std::distance(begin_, end_); + } + + // Are there 0 tokens in this range? + bool IsEmpty() const { + return Size() > 0; + } + + // Look up a token by it's offset. + const std::string& GetToken(size_t offset) const { + assert(offset < Size()); + return *(begin_ + offset); + } + + // Does this token range equal the other range? + // Equality is defined as having both the same size, and + // each corresponding token being equal. + bool operator==(const TokenRange& other) const { + if (this == &other) { + return true; + } + + if (Size() != other.Size()) { + return false; + } + + return std::equal(begin(), end(), other.begin()); + } + + // Look up the token at the requested index. + const std::string& operator[](int index) const { + assert(index >= 0 && static_cast(index) < Size()); + return *(begin() + index); + } + + // Does this current range start with the other range? + bool StartsWith(const TokenRange& other) const { + if (this == &other) { + return true; + } + + if (Size() < other.Size()) { + return false; + } + + auto& smaller = Size() < other.Size() ? *this : other; + auto& greater = Size() < other.Size() ? other : *this; + + return std::equal(smaller.begin(), smaller.end(), greater.begin()); + } + + // Remove all characters 'c' from each token, potentially copying the underlying tokens. + TokenRange RemoveCharacter(char c) const { + TokenList new_token_list(begin(), end()); + + bool changed = false; + for (auto&& token : new_token_list) { + auto it = std::remove_if(token.begin(), token.end(), [&](char ch) { + if (ch == c) { + changed = true; + return true; + } + return false; + }); + token.erase(it, token.end()); + } + + if (!changed) { + return *this; + } + + return TokenRange(std::move(new_token_list)); + } + + // Remove all tokens matching this one, potentially copying the underlying tokens. + TokenRange RemoveToken(const std::string& token) { + return RemoveIf([&](const std::string& tok) { return tok == token; }); + } + + // Discard all empty tokens, potentially copying the underlying tokens. + TokenRange DiscardEmpty() const { + return RemoveIf([](const std::string& token) { return token.empty(); }); + } + + // Create a non-copying subset of this range. + // Length is trimmed so that the Slice does not go out of range. + TokenRange Slice(size_t offset, size_t length = std::string::npos) const { + assert(offset < Size()); + + if (length != std::string::npos && offset + length > Size()) { + length = Size() - offset; + } + + iterator it_end; + if (length == std::string::npos) { + it_end = end(); + } else { + it_end = begin() + offset + length; + } + + return TokenRange(token_list_, begin() + offset, it_end); + } + + // Try to match the string with tokens from this range. + // Each token is used to match exactly once (after which the next token is used, and so on). + // The matching happens from left-to-right in a non-greedy fashion. + // If the currently-matched token is the wildcard, then the new outputted token will + // contain as much as possible until the next token is matched. + // + // For example, if this == ["a:", "_", "b:] and "_" is the match string, then + // MatchSubstrings on "a:foob:" will yield: ["a:", "foo", "b:"] + // + // Since the string matching can fail (e.g. ["foo"] against "bar"), then this + // function can fail, in which cause it will return null. + std::unique_ptr MatchSubstrings(const std::string& string, + const std::string& wildcard) const { + TokenList new_token_list; + + size_t wildcard_idx = std::string::npos; + size_t string_idx = 0; + + // Function to push all the characters matched as a wildcard so far + // as a brand new token. It resets the wildcard matching. + // Empty wildcards are possible and ok, but only if wildcard matching was on. + auto maybe_push_wildcard_token = [&]() { + if (wildcard_idx != std::string::npos) { + size_t wildcard_length = string_idx - wildcard_idx; + std::string wildcard_substr = string.substr(wildcard_idx, wildcard_length); + new_token_list.push_back(std::move(wildcard_substr)); + + wildcard_idx = std::string::npos; + } + }; + + for (iterator it = begin(); it != end(); ++it) { + const std::string& tok = *it; + + if (tok == wildcard) { + maybe_push_wildcard_token(); + wildcard_idx = string_idx; + continue; + } + + size_t next_token_idx = string.find(tok); + if (next_token_idx == std::string::npos) { + // Could not find token at all + return nullptr; + } else if (next_token_idx != string_idx && wildcard_idx == std::string::npos) { + // Found the token at a non-starting location, and we weren't + // trying to parse the wildcard. + return nullptr; + } + + new_token_list.push_back(string.substr(next_token_idx, tok.size())); + maybe_push_wildcard_token(); + string_idx += tok.size(); + } + + size_t remaining = string.size() - string_idx; + if (remaining > 0) { + if (wildcard_idx == std::string::npos) { + // Some characters were still remaining in the string, + // but it wasn't trying to match a wildcard. + return nullptr; + } + } + + // If some characters are remaining, the rest must be a wildcard. + string_idx += remaining; + maybe_push_wildcard_token(); + + return std::unique_ptr(new TokenRange(std::move(new_token_list))); + } + + // Do a quick match token-by-token, and see if they match. + // Any tokens with a wildcard in them are only matched up until the wildcard. + // If this is true, then the wildcard matching later on can still fail, so this is not + // a guarantee that the argument is correct, it's more of a strong hint that the + // user-provided input *probably* was trying to match this argument. + // + // Returns how many tokens were either matched (or ignored because there was a + // wildcard present). 0 means no match. If the size() tokens are returned. + size_t MaybeMatches(const TokenRange& token_list, const std::string& wildcard) const { + auto token_it = token_list.begin(); + auto token_end = token_list.end(); + auto name_it = begin(); + auto name_end = end(); + + size_t matched_tokens = 0; + + while (token_it != token_end && name_it != name_end) { + // Skip token matching when the corresponding name has a wildcard in it. + const std::string& name = *name_it; + + size_t wildcard_idx = name.find(wildcard); + if (wildcard_idx == std::string::npos) { // No wildcard present + // Did the definition token match the user token? + if (name != *token_it) { + return matched_tokens; + } + } else { + std::string name_prefix = name.substr(0, wildcard_idx); + + // Did the user token start with the up-to-the-wildcard prefix? + if (!StartsWith(*token_it, name_prefix)) { + return matched_tokens; + } + } + + ++token_it; + ++name_it; + ++matched_tokens; + } + + // If we got this far, it's either a full match or the token list was too short. + return matched_tokens; + } + + // Flatten the token range by joining every adjacent token with the separator character. + // e.g. ["hello", "world"].join('$') == "hello$world" + std::string Join(char separator) const { + TokenList tmp(begin(), end()); + return art::Join(tmp, separator); + // TODO: Join should probably take an offset or iterators + } + + private: + static bool StartsWith(const std::string& larger, const std::string& smaller) { + if (larger.size() >= smaller.size()) { + return std::equal(smaller.begin(), smaller.end(), larger.begin()); + } + + return false; + } + + template + TokenRange RemoveIf(const TPredicate& predicate) const { + // If any of the tokens in the token lists are empty, then + // we need to remove them and compress the token list into a smaller one. + bool remove = false; + for (auto it = begin_; it != end_; ++it) { + auto&& token = *it; + + if (predicate(token)) { + remove = true; + break; + } + } + + // Actually copy the token list and remove the tokens that don't match our predicate. + if (remove) { + auto token_list = std::make_shared(begin(), end()); + TokenList::iterator new_end = + std::remove_if(token_list->begin(), token_list->end(), predicate); + token_list->erase(new_end, token_list->end()); + + assert(token_list_->size() > token_list->size() && "Nothing was actually removed!"); + + return TokenRange(token_list); + } + + return *this; + } + + const std::shared_ptr> token_list_; + const iterator begin_; + const iterator end_; +}; +} // namespace art + +#endif // ART_CMDLINE_TOKEN_RANGE_H_ diff --git a/cmdline/unit.h b/cmdline/unit.h new file mode 100644 index 000000000..ad6a03d12 --- /dev/null +++ b/cmdline/unit.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_CMDLINE_UNIT_H_ +#define ART_CMDLINE_UNIT_H_ + +namespace art { + +// Used for arguments that simply indicate presence (e.g. "-help") without any values. +struct Unit { + // Avoid 'Conditional jump or move depends on uninitialised value(s)' errors + // when running valgrind by specifying a user-defined constructor. + Unit() {} + Unit(const Unit&) = default; + ~Unit() {} + bool operator==(Unit) const { + return true; + } +}; + +} // namespace art + +#endif // ART_CMDLINE_UNIT_H_ diff --git a/compiler/Android.mk b/compiler/Android.mk new file mode 100644 index 000000000..e9c22d2b0 --- /dev/null +++ b/compiler/Android.mk @@ -0,0 +1,330 @@ +# +# Copyright (C) 2012 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +LOCAL_PATH := $(call my-dir) + +include art/build/Android.common_build.mk + +LIBART_COMPILER_SRC_FILES := \ + compiled_method.cc \ + debug/elf_debug_writer.cc \ + dex/dex_to_dex_compiler.cc \ + dex/verified_method.cc \ + dex/verification_results.cc \ + dex/quick_compiler_callbacks.cc \ + dex/quick/dex_file_method_inliner.cc \ + dex/quick/dex_file_to_method_inliner_map.cc \ + driver/compiled_method_storage.cc \ + driver/compiler_driver.cc \ + driver/compiler_options.cc \ + driver/dex_compilation_unit.cc \ + linker/buffered_output_stream.cc \ + linker/file_output_stream.cc \ + linker/multi_oat_relative_patcher.cc \ + linker/output_stream.cc \ + linker/vector_output_stream.cc \ + linker/relative_patcher.cc \ + jit/jit_compiler.cc \ + jni/quick/calling_convention.cc \ + jni/quick/jni_compiler.cc \ + optimizing/block_builder.cc \ + optimizing/bounds_check_elimination.cc \ + optimizing/builder.cc \ + optimizing/code_generator.cc \ + optimizing/code_generator_utils.cc \ + optimizing/constant_folding.cc \ + optimizing/dead_code_elimination.cc \ + optimizing/dex_cache_array_fixups_arm.cc \ + optimizing/graph_checker.cc \ + optimizing/graph_visualizer.cc \ + optimizing/gvn.cc \ + optimizing/induction_var_analysis.cc \ + optimizing/induction_var_range.cc \ + optimizing/inliner.cc \ + optimizing/instruction_builder.cc \ + optimizing/instruction_simplifier.cc \ + optimizing/intrinsics.cc \ + optimizing/licm.cc \ + optimizing/load_store_elimination.cc \ + optimizing/locations.cc \ + optimizing/nodes.cc \ + optimizing/nodes_arm64.cc \ + optimizing/optimization.cc \ + optimizing/optimizing_compiler.cc \ + optimizing/parallel_move_resolver.cc \ + optimizing/prepare_for_register_allocation.cc \ + optimizing/reference_type_propagation.cc \ + optimizing/register_allocator.cc \ + optimizing/select_generator.cc \ + optimizing/sharpening.cc \ + optimizing/side_effects_analysis.cc \ + optimizing/ssa_builder.cc \ + optimizing/ssa_liveness_analysis.cc \ + optimizing/ssa_phi_elimination.cc \ + optimizing/stack_map_stream.cc \ + trampolines/trampoline_compiler.cc \ + utils/assembler.cc \ + utils/swap_space.cc \ + compiler.cc \ + elf_writer.cc \ + elf_writer_quick.cc \ + image_writer.cc \ + oat_writer.cc + +LIBART_COMPILER_SRC_FILES_arm := \ + jni/quick/arm/calling_convention_arm.cc \ + linker/arm/relative_patcher_arm_base.cc \ + linker/arm/relative_patcher_thumb2.cc \ + optimizing/code_generator_arm.cc \ + optimizing/intrinsics_arm.cc \ + utils/arm/assembler_arm.cc \ + utils/arm/assembler_arm32.cc \ + utils/arm/assembler_thumb2.cc \ + utils/arm/managed_register_arm.cc \ + +# TODO We should really separate out those files that are actually needed for both variants of an +# architecture into its own category. Currently we just include all of the 32bit variant in the +# 64bit variant. It also might be good to allow one to compile only the 64bit variant without the +# 32bit one. +LIBART_COMPILER_SRC_FILES_arm64 := \ + $(LIBART_COMPILER_SRC_FILES_arm) \ + jni/quick/arm64/calling_convention_arm64.cc \ + linker/arm64/relative_patcher_arm64.cc \ + optimizing/code_generator_arm64.cc \ + optimizing/instruction_simplifier_arm.cc \ + optimizing/instruction_simplifier_arm64.cc \ + optimizing/instruction_simplifier_shared.cc \ + optimizing/intrinsics_arm64.cc \ + utils/arm64/assembler_arm64.cc \ + utils/arm64/managed_register_arm64.cc \ + +LIBART_COMPILER_SRC_FILES_mips := \ + jni/quick/mips/calling_convention_mips.cc \ + optimizing/code_generator_mips.cc \ + optimizing/intrinsics_mips.cc \ + utils/mips/assembler_mips.cc \ + utils/mips/managed_register_mips.cc \ + +LIBART_COMPILER_SRC_FILES_mips64 := \ + $(LIBART_COMPILER_SRC_FILES_mips) \ + jni/quick/mips64/calling_convention_mips64.cc \ + optimizing/code_generator_mips64.cc \ + optimizing/intrinsics_mips64.cc \ + utils/mips64/assembler_mips64.cc \ + utils/mips64/managed_register_mips64.cc \ + + +LIBART_COMPILER_SRC_FILES_x86 := \ + jni/quick/x86/calling_convention_x86.cc \ + linker/x86/relative_patcher_x86.cc \ + linker/x86/relative_patcher_x86_base.cc \ + optimizing/code_generator_x86.cc \ + optimizing/intrinsics_x86.cc \ + optimizing/pc_relative_fixups_x86.cc \ + utils/x86/assembler_x86.cc \ + utils/x86/managed_register_x86.cc \ + +LIBART_COMPILER_SRC_FILES_x86_64 := \ + $(LIBART_COMPILER_SRC_FILES_x86) \ + jni/quick/x86_64/calling_convention_x86_64.cc \ + linker/x86_64/relative_patcher_x86_64.cc \ + optimizing/intrinsics_x86_64.cc \ + optimizing/code_generator_x86_64.cc \ + utils/x86_64/assembler_x86_64.cc \ + utils/x86_64/managed_register_x86_64.cc \ + + +LIBART_COMPILER_CFLAGS := + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \ + compiled_method.h \ + dex/compiler_enums.h \ + dex/dex_to_dex_compiler.h \ + driver/compiler_driver.h \ + driver/compiler_options.h \ + image_writer.h \ + optimizing/locations.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm := \ + utils/arm/constants_arm.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \ + $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm) + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \ + utils/mips/assembler_mips.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \ + $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips) \ + utils/mips64/assembler_mips64.h + +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86 := +LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86_64 := \ + $(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_x86) + +# $(1): target or host +# $(2): ndebug or debug +# $(3): static or shared (empty means shared, applies only for host) +define build-libart-compiler + ifneq ($(1),target) + ifneq ($(1),host) + $$(error expected target or host for argument 1, received $(1)) + endif + endif + ifneq ($(2),ndebug) + ifneq ($(2),debug) + $$(error expected ndebug or debug for argument 2, received $(2)) + endif + endif + + art_target_or_host := $(1) + art_ndebug_or_debug := $(2) + art_static_or_shared := $(3) + + include $(CLEAR_VARS) + ifeq ($$(art_target_or_host),host) + LOCAL_IS_HOST_MODULE := true + art_codegen_targets := $(ART_HOST_CODEGEN_ARCHS) + else + art_codegen_targets := $(ART_TARGET_CODEGEN_ARCHS) + endif + LOCAL_CPP_EXTENSION := $(ART_CPP_EXTENSION) + ifeq ($$(art_ndebug_or_debug),ndebug) + LOCAL_MODULE := libart-compiler + ifeq ($$(art_static_or_shared), static) + LOCAL_STATIC_LIBRARIES += libart liblz4 liblzma + else + LOCAL_SHARED_LIBRARIES += libart liblz4 liblzma + endif + ifeq ($$(art_target_or_host),target) + LOCAL_FDO_SUPPORT := true + endif + else # debug + LOCAL_MODULE := libartd-compiler + ifeq ($$(art_static_or_shared), static) + LOCAL_STATIC_LIBRARIES += libartd liblz4 liblzma + else + LOCAL_SHARED_LIBRARIES += libartd liblz4 liblzma + endif + endif + + LOCAL_MODULE_TAGS := optional + ifeq ($$(art_static_or_shared), static) + LOCAL_MODULE_CLASS := STATIC_LIBRARIES + else + LOCAL_MODULE_CLASS := SHARED_LIBRARIES + endif + + # Sort removes duplicates. + LOCAL_SRC_FILES := $$(LIBART_COMPILER_SRC_FILES) \ + $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_SRC_FILES_$$(arch)))) + + GENERATED_SRC_DIR := $$(call local-generated-sources-dir) + ENUM_OPERATOR_OUT_CC_FILES := $$(patsubst %.h,%_operator_out.cc,\ + $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES) \ + $$(sort $$(foreach arch,$$(art_codegen_targets), $$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_$$(arch))))) + ENUM_OPERATOR_OUT_GEN := $$(addprefix $$(GENERATED_SRC_DIR)/,$$(ENUM_OPERATOR_OUT_CC_FILES)) + +$$(ENUM_OPERATOR_OUT_GEN): art/tools/generate-operator-out.py +$$(ENUM_OPERATOR_OUT_GEN): PRIVATE_CUSTOM_TOOL = art/tools/generate-operator-out.py $(LOCAL_PATH) $$< > $$@ +$$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PATH)/%.h + $$(transform-generated-source) + + LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN) + + LOCAL_CFLAGS := $$(LIBART_COMPILER_CFLAGS) + ifeq ($$(art_target_or_host),target) + $(call set-target-local-clang-vars) + $(call set-target-local-cflags-vars,$(2)) + else # host + LOCAL_CLANG := $(ART_HOST_CLANG) + LOCAL_CFLAGS += $(ART_HOST_CFLAGS) + LOCAL_ASFLAGS += $(ART_HOST_ASFLAGS) + LOCAL_LDLIBS := $(ART_HOST_LDLIBS) + ifeq ($$(art_static_or_shared),static) + LOCAL_LDFLAGS += -static + endif + ifeq ($$(art_ndebug_or_debug),debug) + LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS) + else + LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS) + endif + endif + + LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime art/disassembler + + ifeq ($$(art_target_or_host),host) + # For compiler driver TLS. + LOCAL_LDLIBS += -lpthread + endif + LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_build.mk + LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk + # Vixl assembly support for ARM64 targets. + ifeq ($$(art_ndebug_or_debug),debug) + ifeq ($$(art_static_or_shared), static) + LOCAL_WHOLESTATIC_LIBRARIES += libvixl + else + LOCAL_SHARED_LIBRARIES += libvixl + endif + else + ifeq ($$(art_static_or_shared), static) + LOCAL_WHOLE_STATIC_LIBRARIES += libvixl + else + LOCAL_SHARED_LIBRARIES += libvixl + endif + endif + + LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE) + + ifeq ($$(art_target_or_host),target) + # For atrace. + LOCAL_SHARED_LIBRARIES += libcutils + include $(BUILD_SHARED_LIBRARY) + else # host + LOCAL_MULTILIB := both + ifeq ($$(art_static_or_shared), static) + include $(BUILD_HOST_STATIC_LIBRARY) + else + include $(BUILD_HOST_SHARED_LIBRARY) + endif + endif + + # Clear locally defined variables. + art_target_or_host := + art_ndebug_or_debug := + art_static_or_shared := + art_codegen_targets := +endef + +# We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target. +ifeq ($(ART_BUILD_HOST_NDEBUG),true) + $(eval $(call build-libart-compiler,host,ndebug)) + ifeq ($(ART_BUILD_HOST_STATIC),true) + $(eval $(call build-libart-compiler,host,ndebug,static)) + endif +endif +ifeq ($(ART_BUILD_HOST_DEBUG),true) + $(eval $(call build-libart-compiler,host,debug)) + ifeq ($(ART_BUILD_HOST_STATIC),true) + $(eval $(call build-libart-compiler,host,debug,static)) + endif +endif +ifeq ($(ART_BUILD_TARGET_NDEBUG),true) + $(eval $(call build-libart-compiler,target,ndebug)) +endif +ifeq ($(ART_BUILD_TARGET_DEBUG),true) + $(eval $(call build-libart-compiler,target,debug)) +endif diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h new file mode 100644 index 000000000..f8b746093 --- /dev/null +++ b/compiler/cfi_test.h @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_CFI_TEST_H_ +#define ART_COMPILER_CFI_TEST_H_ + +#include +#include +#include + +#include "arch/instruction_set.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/dwarf_test.h" +#include "debug/dwarf/headers.h" +#include "disassembler/disassembler.h" +#include "gtest/gtest.h" + +namespace art { + +constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT; + +class CFITest : public dwarf::DwarfTest { + public: + void GenerateExpected(FILE* f, InstructionSet isa, const char* isa_str, + const std::vector& actual_asm, + const std::vector& actual_cfi) { + std::vector lines; + // Print the raw bytes. + fprintf(f, "static constexpr uint8_t expected_asm_%s[] = {", isa_str); + HexDump(f, actual_asm); + fprintf(f, "\n};\n"); + fprintf(f, "static constexpr uint8_t expected_cfi_%s[] = {", isa_str); + HexDump(f, actual_cfi); + fprintf(f, "\n};\n"); + // Pretty-print CFI opcodes. + constexpr bool is64bit = false; + dwarf::DebugFrameOpCodeWriter<> initial_opcodes; + dwarf::WriteCIE(is64bit, dwarf::Reg(8), + initial_opcodes, kCFIFormat, &debug_frame_data_); + std::vector debug_frame_patches; + dwarf::WriteFDE(is64bit, 0, 0, 0, actual_asm.size(), ArrayRef(actual_cfi), + kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); + ReformatCfi(Objdump(false, "-W"), &lines); + // Pretty-print assembly. + const uint8_t* asm_base = actual_asm.data(); + const uint8_t* asm_end = asm_base + actual_asm.size(); + auto* opts = new DisassemblerOptions(false, asm_base, asm_end, true); + std::unique_ptr disasm(Disassembler::Create(isa, opts)); + std::stringstream stream; + const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0); + disasm->Dump(stream, base, base + actual_asm.size()); + ReformatAsm(&stream, &lines); + // Print CFI and assembly interleaved. + std::stable_sort(lines.begin(), lines.end(), CompareByAddress); + for (const std::string& line : lines) { + fprintf(f, "// %s\n", line.c_str()); + } + fprintf(f, "\n"); + } + + private: + // Helper - get offset just past the end of given string. + static size_t FindEndOf(const std::string& str, const char* substr) { + size_t pos = str.find(substr); + CHECK_NE(std::string::npos, pos); + return pos + strlen(substr); + } + + // Spit to lines and remove raw instruction bytes. + static void ReformatAsm(std::stringstream* stream, + std::vector* output) { + std::string line; + while (std::getline(*stream, line)) { + line = line.substr(0, FindEndOf(line, ": ")) + + line.substr(FindEndOf(line, "\t")); + size_t pos; + while ((pos = line.find(" ")) != std::string::npos) { + line = line.replace(pos, 2, " "); + } + while (!line.empty() && line.back() == ' ') { + line.pop_back(); + } + output->push_back(line); + } + } + + // Find interesting parts of objdump output and prefix the lines with address. + static void ReformatCfi(const std::vector& lines, + std::vector* output) { + std::string address; + for (const std::string& line : lines) { + if (line.find("DW_CFA_nop") != std::string::npos) { + // Ignore. + } else if (line.find("DW_CFA_advance_loc") != std::string::npos) { + // The last 8 characters are the address. + address = "0x" + line.substr(line.size() - 8); + } else if (line.find("DW_CFA_") != std::string::npos) { + std::string new_line(line); + // "bad register" warning is caused by always using host (x86) objdump. + const char* bad_reg = "bad register: "; + size_t pos; + if ((pos = new_line.find(bad_reg)) != std::string::npos) { + new_line = new_line.replace(pos, strlen(bad_reg), ""); + } + // Remove register names in parentheses since they have x86 names. + if ((pos = new_line.find(" (")) != std::string::npos) { + new_line = new_line.replace(pos, FindEndOf(new_line, ")") - pos, ""); + } + // Use the .cfi_ prefix. + new_line = ".cfi_" + new_line.substr(FindEndOf(new_line, "DW_CFA_")); + output->push_back(address + ": " + new_line); + } + } + } + + // Compare strings by the address prefix. + static bool CompareByAddress(const std::string& lhs, const std::string& rhs) { + EXPECT_EQ(lhs[10], ':'); + EXPECT_EQ(rhs[10], ':'); + return strncmp(lhs.c_str(), rhs.c_str(), 10) < 0; + } + + // Pretty-print byte array. 12 bytes per line. + static void HexDump(FILE* f, const std::vector& data) { + for (size_t i = 0; i < data.size(); i++) { + fprintf(f, i % 12 == 0 ? "\n " : " "); // Whitespace. + fprintf(f, "0x%02X,", data[i]); + } + } +}; + +} // namespace art + +#endif // ART_COMPILER_CFI_TEST_H_ diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc new file mode 100644 index 000000000..bcd8940b5 --- /dev/null +++ b/compiler/common_compiler_test.cc @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_compiler_test.h" + +#include "arch/instruction_set_features.h" +#include "art_field-inl.h" +#include "art_method.h" +#include "class_linker.h" +#include "compiled_method.h" +#include "dex/quick_compiler_callbacks.h" +#include "dex/quick/dex_file_to_method_inliner_map.h" +#include "dex/verification_results.h" +#include "driver/compiler_driver.h" +#include "driver/compiler_options.h" +#include "interpreter/interpreter.h" +#include "mirror/class_loader.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "mirror/object-inl.h" +#include "oat_quick_method_header.h" +#include "scoped_thread_state_change.h" +#include "thread-inl.h" +#include "utils.h" + +namespace art { + +CommonCompilerTest::CommonCompilerTest() {} +CommonCompilerTest::~CommonCompilerTest() {} + +void CommonCompilerTest::MakeExecutable(ArtMethod* method) { + CHECK(method != nullptr); + + const CompiledMethod* compiled_method = nullptr; + if (!method->IsAbstract()) { + mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); + const DexFile& dex_file = *dex_cache->GetDexFile(); + compiled_method = + compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, + method->GetDexMethodIndex())); + } + if (compiled_method != nullptr) { + ArrayRef code = compiled_method->GetQuickCode(); + uint32_t code_size = code.size(); + CHECK_NE(0u, code_size); + ArrayRef vmap_table = compiled_method->GetVmapTable(); + uint32_t vmap_table_offset = vmap_table.empty() ? 0u + : sizeof(OatQuickMethodHeader) + vmap_table.size(); + OatQuickMethodHeader method_header(vmap_table_offset, + compiled_method->GetFrameSizeInBytes(), + compiled_method->GetCoreSpillMask(), + compiled_method->GetFpSpillMask(), + code_size); + + header_code_and_maps_chunks_.push_back(std::vector()); + std::vector* chunk = &header_code_and_maps_chunks_.back(); + const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet()); + const size_t size = vmap_table.size() + sizeof(method_header) + code_size; + chunk->reserve(size + max_padding); + chunk->resize(sizeof(method_header)); + memcpy(&(*chunk)[0], &method_header, sizeof(method_header)); + chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end()); + chunk->insert(chunk->end(), code.begin(), code.end()); + CHECK_EQ(chunk->size(), size); + const void* unaligned_code_ptr = chunk->data() + (size - code_size); + size_t offset = dchecked_integral_cast(reinterpret_cast(unaligned_code_ptr)); + size_t padding = compiled_method->AlignCode(offset) - offset; + // Make sure no resizing takes place. + CHECK_GE(chunk->capacity(), chunk->size() + padding); + chunk->insert(chunk->begin(), padding, 0); + const void* code_ptr = reinterpret_cast(unaligned_code_ptr) + padding; + CHECK_EQ(code_ptr, static_cast(chunk->data() + (chunk->size() - code_size))); + MakeExecutable(code_ptr, code.size()); + const void* method_code = CompiledMethod::CodePointer(code_ptr, + compiled_method->GetInstructionSet()); + LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code; + class_linker_->SetEntryPointsToCompiledCode(method, method_code); + } else { + // No code? You must mean to go into the interpreter. + // Or the generic JNI... + class_linker_->SetEntryPointsToInterpreter(method); + } +} + +void CommonCompilerTest::MakeExecutable(const void* code_start, size_t code_length) { + CHECK(code_start != nullptr); + CHECK_NE(code_length, 0U); + uintptr_t data = reinterpret_cast(code_start); + uintptr_t base = RoundDown(data, kPageSize); + uintptr_t limit = RoundUp(data + code_length, kPageSize); + uintptr_t len = limit - base; + int result = mprotect(reinterpret_cast(base), len, PROT_READ | PROT_WRITE | PROT_EXEC); + CHECK_EQ(result, 0); + + FlushInstructionCache(reinterpret_cast(base), reinterpret_cast(base + len)); +} + +void CommonCompilerTest::MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name) { + std::string class_descriptor(DotToDescriptor(class_name)); + Thread* self = Thread::Current(); + StackHandleScope<1> hs(self); + Handle loader(hs.NewHandle(class_loader)); + mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); + CHECK(klass != nullptr) << "Class not found " << class_name; + size_t pointer_size = class_linker_->GetImagePointerSize(); + for (auto& m : klass->GetMethods(pointer_size)) { + MakeExecutable(&m); + } +} + +// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler +// driver assumes ownership of the set, so the test should properly release the set. +std::unordered_set* CommonCompilerTest::GetImageClasses() { + // Empty set: by default no classes are retained in the image. + return new std::unordered_set(); +} + +// Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler +// driver assumes ownership of the set, so the test should properly release the set. +std::unordered_set* CommonCompilerTest::GetCompiledClasses() { + // Null, no selection of compiled-classes. + return nullptr; +} + +// Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler +// driver assumes ownership of the set, so the test should properly release the set. +std::unordered_set* CommonCompilerTest::GetCompiledMethods() { + // Null, no selection of compiled-methods. + return nullptr; +} + +// Get ProfileCompilationInfo that should be passed to the driver. +ProfileCompilationInfo* CommonCompilerTest::GetProfileCompilationInfo() { + // Null, profile information will not be taken into account. + return nullptr; +} + +void CommonCompilerTest::SetUp() { + CommonRuntimeTest::SetUp(); + { + ScopedObjectAccess soa(Thread::Current()); + + const InstructionSet instruction_set = kRuntimeISA; + // Take the default set of instruction features from the build. + instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines()); + + runtime_->SetInstructionSet(instruction_set); + for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) { + Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i); + if (!runtime_->HasCalleeSaveMethod(type)) { + runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type); + } + } + + timer_.reset(new CumulativeLogger("Compilation times")); + CreateCompilerDriver(compiler_kind_, instruction_set); + } +} + +void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, + InstructionSet isa, + size_t number_of_threads) { + compiler_driver_.reset(new CompilerDriver(compiler_options_.get(), + verification_results_.get(), + method_inliner_map_.get(), + kind, + isa, + instruction_set_features_.get(), + /* boot_image */ true, + /* app_image */ false, + GetImageClasses(), + GetCompiledClasses(), + GetCompiledMethods(), + number_of_threads, + /* dump_stats */ true, + /* dump_passes */ true, + timer_.get(), + /* swap_fd */ -1, + GetProfileCompilationInfo())); + // We typically don't generate an image in unit tests, disable this optimization by default. + compiler_driver_->SetSupportBootImageFixup(false); +} + +void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) { + CommonRuntimeTest::SetUpRuntimeOptions(options); + + compiler_options_.reset(new CompilerOptions); + verification_results_.reset(new VerificationResults(compiler_options_.get())); + method_inliner_map_.reset(new DexFileToMethodInlinerMap); + callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(), + method_inliner_map_.get(), + CompilerCallbacks::CallbackMode::kCompileApp)); +} + +Compiler::Kind CommonCompilerTest::GetCompilerKind() const { + return compiler_kind_; +} + +void CommonCompilerTest::SetCompilerKind(Compiler::Kind compiler_kind) { + compiler_kind_ = compiler_kind; +} + +InstructionSet CommonCompilerTest::GetInstructionSet() const { + DCHECK(compiler_driver_.get() != nullptr); + return compiler_driver_->GetInstructionSet(); +} + +void CommonCompilerTest::TearDown() { + timer_.reset(); + compiler_driver_.reset(); + callbacks_.reset(); + method_inliner_map_.reset(); + verification_results_.reset(); + compiler_options_.reset(); + image_reservation_.reset(); + + CommonRuntimeTest::TearDown(); +} + +void CommonCompilerTest::CompileClass(mirror::ClassLoader* class_loader, const char* class_name) { + std::string class_descriptor(DotToDescriptor(class_name)); + Thread* self = Thread::Current(); + StackHandleScope<1> hs(self); + Handle loader(hs.NewHandle(class_loader)); + mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); + CHECK(klass != nullptr) << "Class not found " << class_name; + auto pointer_size = class_linker_->GetImagePointerSize(); + for (auto& m : klass->GetMethods(pointer_size)) { + CompileMethod(&m); + } +} + +void CommonCompilerTest::CompileMethod(ArtMethod* method) { + CHECK(method != nullptr); + TimingLogger timings("CommonTest::CompileMethod", false, false); + TimingLogger::ScopedTiming t(__FUNCTION__, &timings); + compiler_driver_->CompileOne(Thread::Current(), method, &timings); + TimingLogger::ScopedTiming t2("MakeExecutable", &timings); + MakeExecutable(method); +} + +void CommonCompilerTest::CompileDirectMethod(Handle class_loader, + const char* class_name, const char* method_name, + const char* signature) { + std::string class_descriptor(DotToDescriptor(class_name)); + Thread* self = Thread::Current(); + mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); + CHECK(klass != nullptr) << "Class not found " << class_name; + auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = klass->FindDirectMethod(method_name, signature, pointer_size); + CHECK(method != nullptr) << "Direct method not found: " + << class_name << "." << method_name << signature; + CompileMethod(method); +} + +void CommonCompilerTest::CompileVirtualMethod(Handle class_loader, + const char* class_name, const char* method_name, + const char* signature) { + std::string class_descriptor(DotToDescriptor(class_name)); + Thread* self = Thread::Current(); + mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); + CHECK(klass != nullptr) << "Class not found " << class_name; + auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = klass->FindVirtualMethod(method_name, signature, pointer_size); + CHECK(method != nullptr) << "Virtual method not found: " + << class_name << "." << method_name << signature; + CompileMethod(method); +} + +void CommonCompilerTest::ReserveImageSpace() { + // Reserve where the image will be loaded up front so that other parts of test set up don't + // accidentally end up colliding with the fixed memory address when we need to load the image. + std::string error_msg; + MemMap::Init(); + image_reservation_.reset(MemMap::MapAnonymous("image reservation", + reinterpret_cast(ART_BASE_ADDRESS), + (size_t)120 * 1024 * 1024, // 120MB + PROT_NONE, + false /* no need for 4gb flag with fixed mmap*/, + false /* not reusing existing reservation */, + &error_msg)); + CHECK(image_reservation_.get() != nullptr) << error_msg; +} + +void CommonCompilerTest::UnreserveImageSpace() { + image_reservation_.reset(); +} + +} // namespace art diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h new file mode 100644 index 000000000..2d139eb84 --- /dev/null +++ b/compiler/common_compiler_test.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_ +#define ART_COMPILER_COMMON_COMPILER_TEST_H_ + +#include +#include +#include + +#include "common_runtime_test.h" +#include "compiler.h" +#include "jit/offline_profiling_info.h" +#include "oat_file.h" + +namespace art { +namespace mirror { + class ClassLoader; +} // namespace mirror + +class CompilerDriver; +class CompilerOptions; +class CumulativeLogger; +class DexFileToMethodInlinerMap; +class VerificationResults; + +template class Handle; + +class CommonCompilerTest : public CommonRuntimeTest { + public: + CommonCompilerTest(); + ~CommonCompilerTest(); + + // Create an OatMethod based on pointers (for unit tests). + OatFile::OatMethod CreateOatMethod(const void* code); + + void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + + static void MakeExecutable(const void* code_start, size_t code_length); + + void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name) + SHARED_REQUIRES(Locks::mutator_lock_); + + protected: + virtual void SetUp(); + + virtual void SetUpRuntimeOptions(RuntimeOptions* options); + + Compiler::Kind GetCompilerKind() const; + void SetCompilerKind(Compiler::Kind compiler_kind); + + InstructionSet GetInstructionSet() const; + + // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler + // driver assumes ownership of the set, so the test should properly release the set. + virtual std::unordered_set* GetImageClasses(); + + // Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler + // driver assumes ownership of the set, so the test should properly release the set. + virtual std::unordered_set* GetCompiledClasses(); + + // Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler + // driver assumes ownership of the set, so the test should properly release the set. + virtual std::unordered_set* GetCompiledMethods(); + + virtual ProfileCompilationInfo* GetProfileCompilationInfo(); + + virtual void TearDown(); + + void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) + SHARED_REQUIRES(Locks::mutator_lock_); + + void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + + void CompileDirectMethod(Handle class_loader, const char* class_name, + const char* method_name, const char* signature) + SHARED_REQUIRES(Locks::mutator_lock_); + + void CompileVirtualMethod(Handle class_loader, const char* class_name, + const char* method_name, const char* signature) + SHARED_REQUIRES(Locks::mutator_lock_); + + void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U); + + void ReserveImageSpace(); + + void UnreserveImageSpace(); + + Compiler::Kind compiler_kind_ = Compiler::kOptimizing; + std::unique_ptr compiler_options_; + std::unique_ptr verification_results_; + std::unique_ptr method_inliner_map_; + std::unique_ptr compiler_driver_; + std::unique_ptr timer_; + std::unique_ptr instruction_set_features_; + + + private: + std::unique_ptr image_reservation_; + + // Chunks must not move their storage after being created - use the node-based std::list. + std::list> header_code_and_maps_chunks_; +}; + +// TODO: When read barrier works with all tests, get rid of this. +#define TEST_DISABLED_FOR_READ_BARRIER() \ + if (kUseReadBarrier) { \ + printf("WARNING: TEST DISABLED FOR READ BARRIER\n"); \ + return; \ + } + +// TODO: When read barrier works with all Optimizing back ends, get rid of this. +#define TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS() \ + if (kUseReadBarrier && GetCompilerKind() == Compiler::kOptimizing) { \ + switch (GetInstructionSet()) { \ + case kArm64: \ + case kThumb2: \ + case kX86: \ + case kX86_64: \ + /* Instruction set has read barrier support. */ \ + break; \ + \ + default: \ + /* Instruction set does not have barrier support. */ \ + printf("WARNING: TEST DISABLED FOR READ BARRIER WITH OPTIMIZING " \ + "FOR THIS INSTRUCTION SET\n"); \ + return; \ + } \ + } + +} // namespace art + +#endif // ART_COMPILER_COMMON_COMPILER_TEST_H_ diff --git a/compiler/compiled_class.h b/compiler/compiled_class.h new file mode 100644 index 000000000..b88d613ad --- /dev/null +++ b/compiler/compiled_class.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILED_CLASS_H_ +#define ART_COMPILER_COMPILED_CLASS_H_ + +#include "mirror/class.h" + +namespace art { + +class CompiledClass { + public: + explicit CompiledClass(mirror::Class::Status status) : status_(status) {} + ~CompiledClass() {} + mirror::Class::Status GetStatus() const { + return status_; + } + private: + const mirror::Class::Status status_; +}; + +} // namespace art + +#endif // ART_COMPILER_COMPILED_CLASS_H_ diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc new file mode 100644 index 000000000..f06d90c81 --- /dev/null +++ b/compiler/compiled_method.cc @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiled_method.h" + +#include "driver/compiled_method_storage.h" +#include "driver/compiler_driver.h" +#include "utils/swap_space.h" + +namespace art { + +CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, + const ArrayRef& quick_code) + : compiler_driver_(compiler_driver), + instruction_set_(instruction_set), + quick_code_(compiler_driver_->GetCompiledMethodStorage()->DeduplicateCode(quick_code)) { +} + +CompiledCode::~CompiledCode() { + compiler_driver_->GetCompiledMethodStorage()->ReleaseCode(quick_code_); +} + +bool CompiledCode::operator==(const CompiledCode& rhs) const { + if (quick_code_ != nullptr) { + if (rhs.quick_code_ == nullptr) { + return false; + } else if (quick_code_->size() != rhs.quick_code_->size()) { + return false; + } else { + return std::equal(quick_code_->begin(), quick_code_->end(), rhs.quick_code_->begin()); + } + } + return (rhs.quick_code_ == nullptr); +} + +size_t CompiledCode::AlignCode(size_t offset) const { + return AlignCode(offset, instruction_set_); +} + +size_t CompiledCode::AlignCode(size_t offset, InstructionSet instruction_set) { + return RoundUp(offset, GetInstructionSetAlignment(instruction_set)); +} + +size_t CompiledCode::CodeDelta() const { + return CodeDelta(instruction_set_); +} + +size_t CompiledCode::CodeDelta(InstructionSet instruction_set) { + switch (instruction_set) { + case kArm: + case kArm64: + case kMips: + case kMips64: + case kX86: + case kX86_64: + return 0; + case kThumb2: { + // +1 to set the low-order bit so a BLX will switch to Thumb mode + return 1; + } + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + return 0; + } +} + +const void* CompiledCode::CodePointer(const void* code_pointer, + InstructionSet instruction_set) { + switch (instruction_set) { + case kArm: + case kArm64: + case kMips: + case kMips64: + case kX86: + case kX86_64: + return code_pointer; + case kThumb2: { + uintptr_t address = reinterpret_cast(code_pointer); + // Set the low-order bit so a BLX will switch to Thumb mode + address |= 0x1; + return reinterpret_cast(address); + } + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + return nullptr; + } +} + +CompiledMethod::CompiledMethod(CompilerDriver* driver, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const size_t frame_size_in_bytes, + const uint32_t core_spill_mask, + const uint32_t fp_spill_mask, + const ArrayRef& src_mapping_table, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches) + : CompiledCode(driver, instruction_set, quick_code), + frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask), + fp_spill_mask_(fp_spill_mask), + src_mapping_table_( + driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)), + vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)), + cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)), + patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) { +} + +CompiledMethod* CompiledMethod::SwapAllocCompiledMethod( + CompilerDriver* driver, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const size_t frame_size_in_bytes, + const uint32_t core_spill_mask, + const uint32_t fp_spill_mask, + const ArrayRef& src_mapping_table, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches) { + SwapAllocator alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator()); + CompiledMethod* ret = alloc.allocate(1); + alloc.construct(ret, + driver, + instruction_set, + quick_code, + frame_size_in_bytes, + core_spill_mask, + fp_spill_mask, + src_mapping_table, + vmap_table, + cfi_info, patches); + return ret; +} + +void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) { + SwapAllocator alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator()); + alloc.destroy(m); + alloc.deallocate(m, 1); +} + +CompiledMethod::~CompiledMethod() { + CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage(); + storage->ReleaseLinkerPatches(patches_); + storage->ReleaseCFIInfo(cfi_info_); + storage->ReleaseVMapTable(vmap_table_); + storage->ReleaseSrcMappingTable(src_mapping_table_); +} + +} // namespace art diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h new file mode 100644 index 000000000..9479ff38b --- /dev/null +++ b/compiler/compiled_method.h @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILED_METHOD_H_ +#define ART_COMPILER_COMPILED_METHOD_H_ + +#include +#include +#include +#include + +#include "arch/instruction_set.h" +#include "base/bit_utils.h" +#include "base/length_prefixed_array.h" +#include "method_reference.h" +#include "utils/array_ref.h" + +namespace art { + +class CompilerDriver; +class CompiledMethodStorage; + +class CompiledCode { + public: + // For Quick to supply an code blob + CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, + const ArrayRef& quick_code); + + virtual ~CompiledCode(); + + InstructionSet GetInstructionSet() const { + return instruction_set_; + } + + ArrayRef GetQuickCode() const { + return GetArray(quick_code_); + } + + bool operator==(const CompiledCode& rhs) const; + + // To align an offset from a page-aligned value to make it suitable + // for code storage. For example on ARM, to ensure that PC relative + // valu computations work out as expected. + size_t AlignCode(size_t offset) const; + static size_t AlignCode(size_t offset, InstructionSet instruction_set); + + // returns the difference between the code address and a usable PC. + // mainly to cope with kThumb2 where the lower bit must be set. + size_t CodeDelta() const; + static size_t CodeDelta(InstructionSet instruction_set); + + // Returns a pointer suitable for invoking the code at the argument + // code_pointer address. Mainly to cope with kThumb2 where the + // lower bit must be set to indicate Thumb mode. + static const void* CodePointer(const void* code_pointer, + InstructionSet instruction_set); + + protected: + template + static ArrayRef GetArray(const LengthPrefixedArray* array) { + if (array == nullptr) { + return ArrayRef(); + } + DCHECK_NE(array->size(), 0u); + return ArrayRef(&array->At(0), array->size()); + } + + CompilerDriver* GetCompilerDriver() { + return compiler_driver_; + } + + private: + CompilerDriver* const compiler_driver_; + + const InstructionSet instruction_set_; + + // Used to store the PIC code for Quick. + const LengthPrefixedArray* const quick_code_; +}; + +class SrcMapElem { + public: + uint32_t from_; + int32_t to_; +}; + +inline bool operator<(const SrcMapElem& lhs, const SrcMapElem& rhs) { + if (lhs.from_ != rhs.from_) { + return lhs.from_ < rhs.from_; + } + return lhs.to_ < rhs.to_; +} + +inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) { + return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_; +} + +template +class SrcMap FINAL : public std::vector { + public: + using std::vector::begin; + using typename std::vector::const_iterator; + using std::vector::empty; + using std::vector::end; + using std::vector::resize; + using std::vector::shrink_to_fit; + using std::vector::size; + + explicit SrcMap() {} + explicit SrcMap(const Allocator& alloc) : std::vector(alloc) {} + + template + SrcMap(InputIt first, InputIt last, const Allocator& alloc) + : std::vector(first, last, alloc) {} + + void push_back(const SrcMapElem& elem) { + if (!empty()) { + // Check that the addresses are inserted in sorted order. + DCHECK_GE(elem.from_, this->back().from_); + // If two consequitive entries map to the same value, ignore the later. + // E.g. for map {{0, 1}, {4, 1}, {8, 2}}, all values in [0,8) map to 1. + if (elem.to_ == this->back().to_) { + return; + } + } + std::vector::push_back(elem); + } + + // Returns true and the corresponding "to" value if the mapping is found. + // Oterwise returns false and 0. + std::pair Find(uint32_t from) const { + // Finds first mapping such that lb.from_ >= from. + auto lb = std::lower_bound(begin(), end(), SrcMapElem {from, INT32_MIN}); + if (lb != end() && lb->from_ == from) { + // Found exact match. + return std::make_pair(true, lb->to_); + } else if (lb != begin()) { + // The previous mapping is still in effect. + return std::make_pair(true, (--lb)->to_); + } else { + // Not found because 'from' is smaller than first entry in the map. + return std::make_pair(false, 0); + } + } +}; + +using DefaultSrcMap = SrcMap>; + +class LinkerPatch { + public: + // Note: We explicitly specify the underlying type of the enum because GCC + // would otherwise select a bigger underlying type and then complain that + // 'art::LinkerPatch::patch_type_' is too small to hold all + // values of 'enum class art::LinkerPatch::Type' + // which is ridiculous given we have only a handful of values here. If we + // choose to squeeze the Type into fewer than 8 bits, we'll have to declare + // patch_type_ as an uintN_t and do explicit static_cast<>s. + enum class Type : uint8_t { + kRecordPosition, // Just record patch position for patchoat. + kMethod, + kCall, + kCallRelative, // NOTE: Actual patching is instruction_set-dependent. + kType, + kString, + kStringRelative, // NOTE: Actual patching is instruction_set-dependent. + kDexCacheArray, // NOTE: Actual patching is instruction_set-dependent. + }; + + static LinkerPatch RecordPosition(size_t literal_offset) { + return LinkerPatch(literal_offset, Type::kRecordPosition, /* target_dex_file */ nullptr); + } + + static LinkerPatch MethodPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t target_method_idx) { + LinkerPatch patch(literal_offset, Type::kMethod, target_dex_file); + patch.method_idx_ = target_method_idx; + return patch; + } + + static LinkerPatch CodePatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t target_method_idx) { + LinkerPatch patch(literal_offset, Type::kCall, target_dex_file); + patch.method_idx_ = target_method_idx; + return patch; + } + + static LinkerPatch RelativeCodePatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t target_method_idx) { + LinkerPatch patch(literal_offset, Type::kCallRelative, target_dex_file); + patch.method_idx_ = target_method_idx; + return patch; + } + + static LinkerPatch TypePatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t target_type_idx) { + LinkerPatch patch(literal_offset, Type::kType, target_dex_file); + patch.type_idx_ = target_type_idx; + return patch; + } + + static LinkerPatch StringPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t target_string_idx) { + LinkerPatch patch(literal_offset, Type::kString, target_dex_file); + patch.string_idx_ = target_string_idx; + return patch; + } + + static LinkerPatch RelativeStringPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + uint32_t target_string_idx) { + LinkerPatch patch(literal_offset, Type::kStringRelative, target_dex_file); + patch.string_idx_ = target_string_idx; + patch.pc_insn_offset_ = pc_insn_offset; + return patch; + } + + static LinkerPatch DexCacheArrayPatch(size_t literal_offset, + const DexFile* target_dex_file, + uint32_t pc_insn_offset, + size_t element_offset) { + DCHECK(IsUint<32>(element_offset)); + LinkerPatch patch(literal_offset, Type::kDexCacheArray, target_dex_file); + patch.pc_insn_offset_ = pc_insn_offset; + patch.element_offset_ = element_offset; + return patch; + } + + LinkerPatch(const LinkerPatch& other) = default; + LinkerPatch& operator=(const LinkerPatch& other) = default; + + size_t LiteralOffset() const { + return literal_offset_; + } + + Type GetType() const { + return patch_type_; + } + + bool IsPcRelative() const { + switch (GetType()) { + case Type::kCallRelative: + case Type::kStringRelative: + case Type::kDexCacheArray: + return true; + default: + return false; + } + } + + MethodReference TargetMethod() const { + DCHECK(patch_type_ == Type::kMethod || + patch_type_ == Type::kCall || + patch_type_ == Type::kCallRelative); + return MethodReference(target_dex_file_, method_idx_); + } + + const DexFile* TargetTypeDexFile() const { + DCHECK(patch_type_ == Type::kType); + return target_dex_file_; + } + + uint32_t TargetTypeIndex() const { + DCHECK(patch_type_ == Type::kType); + return type_idx_; + } + + const DexFile* TargetStringDexFile() const { + DCHECK(patch_type_ == Type::kString || patch_type_ == Type::kStringRelative); + return target_dex_file_; + } + + uint32_t TargetStringIndex() const { + DCHECK(patch_type_ == Type::kString || patch_type_ == Type::kStringRelative); + return string_idx_; + } + + const DexFile* TargetDexCacheDexFile() const { + DCHECK(patch_type_ == Type::kDexCacheArray); + return target_dex_file_; + } + + size_t TargetDexCacheElementOffset() const { + DCHECK(patch_type_ == Type::kDexCacheArray); + return element_offset_; + } + + uint32_t PcInsnOffset() const { + DCHECK(patch_type_ == Type::kStringRelative || patch_type_ == Type::kDexCacheArray); + return pc_insn_offset_; + } + + private: + LinkerPatch(size_t literal_offset, Type patch_type, const DexFile* target_dex_file) + : target_dex_file_(target_dex_file), + literal_offset_(literal_offset), + patch_type_(patch_type) { + cmp1_ = 0u; + cmp2_ = 0u; + // The compiler rejects methods that are too big, so the compiled code + // of a single method really shouln't be anywhere close to 16MiB. + DCHECK(IsUint<24>(literal_offset)); + } + + const DexFile* target_dex_file_; + uint32_t literal_offset_ : 24; // Method code size up to 16MiB. + Type patch_type_ : 8; + union { + uint32_t cmp1_; // Used for relational operators. + uint32_t method_idx_; // Method index for Call/Method patches. + uint32_t type_idx_; // Type index for Type patches. + uint32_t string_idx_; // String index for String patches. + uint32_t element_offset_; // Element offset in the dex cache arrays. + static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators"); + static_assert(sizeof(element_offset_) == sizeof(cmp1_), "needed by relational operators"); + }; + union { + // Note: To avoid uninitialized padding on 64-bit systems, we use `size_t` for `cmp2_`. + // This allows a hashing function to treat an array of linker patches as raw memory. + size_t cmp2_; // Used for relational operators. + // Literal offset of the insn loading PC (same as literal_offset if it's the same insn, + // may be different if the PC-relative addressing needs multiple insns). + uint32_t pc_insn_offset_; + static_assert(sizeof(pc_insn_offset_) <= sizeof(cmp2_), "needed by relational operators"); + }; + + friend bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs); + friend bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs); +}; +std::ostream& operator<<(std::ostream& os, const LinkerPatch::Type& type); + +inline bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs) { + return lhs.literal_offset_ == rhs.literal_offset_ && + lhs.patch_type_ == rhs.patch_type_ && + lhs.target_dex_file_ == rhs.target_dex_file_ && + lhs.cmp1_ == rhs.cmp1_ && + lhs.cmp2_ == rhs.cmp2_; +} + +inline bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs) { + return (lhs.literal_offset_ != rhs.literal_offset_) ? lhs.literal_offset_ < rhs.literal_offset_ + : (lhs.patch_type_ != rhs.patch_type_) ? lhs.patch_type_ < rhs.patch_type_ + : (lhs.target_dex_file_ != rhs.target_dex_file_) ? lhs.target_dex_file_ < rhs.target_dex_file_ + : (lhs.cmp1_ != rhs.cmp1_) ? lhs.cmp1_ < rhs.cmp1_ + : lhs.cmp2_ < rhs.cmp2_; +} + +class CompiledMethod FINAL : public CompiledCode { + public: + // Constructs a CompiledMethod. + // Note: Consider using the static allocation methods below that will allocate the CompiledMethod + // in the swap space. + CompiledMethod(CompilerDriver* driver, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const size_t frame_size_in_bytes, + const uint32_t core_spill_mask, + const uint32_t fp_spill_mask, + const ArrayRef& src_mapping_table, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches); + + virtual ~CompiledMethod(); + + static CompiledMethod* SwapAllocCompiledMethod( + CompilerDriver* driver, + InstructionSet instruction_set, + const ArrayRef& quick_code, + const size_t frame_size_in_bytes, + const uint32_t core_spill_mask, + const uint32_t fp_spill_mask, + const ArrayRef& src_mapping_table, + const ArrayRef& vmap_table, + const ArrayRef& cfi_info, + const ArrayRef& patches); + + static void ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m); + + size_t GetFrameSizeInBytes() const { + return frame_size_in_bytes_; + } + + uint32_t GetCoreSpillMask() const { + return core_spill_mask_; + } + + uint32_t GetFpSpillMask() const { + return fp_spill_mask_; + } + + ArrayRef GetSrcMappingTable() const { + return GetArray(src_mapping_table_); + } + + ArrayRef GetVmapTable() const { + return GetArray(vmap_table_); + } + + ArrayRef GetCFIInfo() const { + return GetArray(cfi_info_); + } + + ArrayRef GetPatches() const { + return GetArray(patches_); + } + + private: + // For quick code, the size of the activation used by the code. + const size_t frame_size_in_bytes_; + // For quick code, a bit mask describing spilled GPR callee-save registers. + const uint32_t core_spill_mask_; + // For quick code, a bit mask describing spilled FPR callee-save registers. + const uint32_t fp_spill_mask_; + // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset. + const LengthPrefixedArray* const src_mapping_table_; + // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed. + const LengthPrefixedArray* const vmap_table_; + // For quick code, a FDE entry for the debug_frame section. + const LengthPrefixedArray* const cfi_info_; + // For quick code, linker patches needed by the method. + const LengthPrefixedArray* const patches_; +}; + +} // namespace art + +#endif // ART_COMPILER_COMPILED_METHOD_H_ diff --git a/compiler/compiled_method_test.cc b/compiler/compiled_method_test.cc new file mode 100644 index 000000000..99ee875da --- /dev/null +++ b/compiler/compiled_method_test.cc @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "compiled_method.h" + +namespace art { + +TEST(CompiledMethod, SrcMapElemOperators) { + SrcMapElem elems[] = { + { 1u, -1 }, + { 1u, 0 }, + { 1u, 1 }, + { 2u, -1 }, + { 2u, 0 }, // Index 4. + { 2u, 1 }, + { 2u, 0u }, // Index 6: Arbitrarily add identical SrcMapElem with index 4. + }; + + for (size_t i = 0; i != arraysize(elems); ++i) { + for (size_t j = 0; j != arraysize(elems); ++j) { + bool expected = (i != 6u ? i : 4u) == (j != 6u ? j : 4u); + EXPECT_EQ(expected, elems[i] == elems[j]) << i << " " << j; + } + } + + for (size_t i = 0; i != arraysize(elems); ++i) { + for (size_t j = 0; j != arraysize(elems); ++j) { + bool expected = (i != 6u ? i : 4u) < (j != 6u ? j : 4u); + EXPECT_EQ(expected, elems[i] < elems[j]) << i << " " << j; + } + } +} + +TEST(CompiledMethod, LinkerPatchOperators) { + const DexFile* dex_file1 = reinterpret_cast(1); + const DexFile* dex_file2 = reinterpret_cast(2); + LinkerPatch patches[] = { + LinkerPatch::MethodPatch(16u, dex_file1, 1000u), + LinkerPatch::MethodPatch(16u, dex_file1, 1001u), + LinkerPatch::MethodPatch(16u, dex_file2, 1000u), + LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // Index 3. + LinkerPatch::CodePatch(16u, dex_file1, 1000u), + LinkerPatch::CodePatch(16u, dex_file1, 1001u), + LinkerPatch::CodePatch(16u, dex_file2, 1000u), + LinkerPatch::CodePatch(16u, dex_file2, 1001u), + LinkerPatch::RelativeCodePatch(16u, dex_file1, 1000u), + LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u), + LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u), + LinkerPatch::RelativeCodePatch(16u, dex_file2, 1001u), + LinkerPatch::TypePatch(16u, dex_file1, 1000u), + LinkerPatch::TypePatch(16u, dex_file1, 1001u), + LinkerPatch::TypePatch(16u, dex_file2, 1000u), + LinkerPatch::TypePatch(16u, dex_file2, 1001u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2000u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2000u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2001u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2001u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2000u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2000u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2001u), + LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2001u), + LinkerPatch::MethodPatch(32u, dex_file1, 1000u), + LinkerPatch::MethodPatch(32u, dex_file1, 1001u), + LinkerPatch::MethodPatch(32u, dex_file2, 1000u), + LinkerPatch::MethodPatch(32u, dex_file2, 1001u), + LinkerPatch::CodePatch(32u, dex_file1, 1000u), + LinkerPatch::CodePatch(32u, dex_file1, 1001u), + LinkerPatch::CodePatch(32u, dex_file2, 1000u), + LinkerPatch::CodePatch(32u, dex_file2, 1001u), + LinkerPatch::RelativeCodePatch(32u, dex_file1, 1000u), + LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u), + LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u), + LinkerPatch::RelativeCodePatch(32u, dex_file2, 1001u), + LinkerPatch::TypePatch(32u, dex_file1, 1000u), + LinkerPatch::TypePatch(32u, dex_file1, 1001u), + LinkerPatch::TypePatch(32u, dex_file2, 1000u), + LinkerPatch::TypePatch(32u, dex_file2, 1001u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2000u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2000u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2001u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2001u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2000u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2000u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2001u), + LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2001u), + LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // identical with patch as index 3. + }; + constexpr size_t last_index = arraysize(patches) - 1u; + + for (size_t i = 0; i != arraysize(patches); ++i) { + for (size_t j = 0; j != arraysize(patches); ++j) { + bool expected = (i != last_index ? i : 3u) == (j != last_index ? j : 3u); + EXPECT_EQ(expected, patches[i] == patches[j]) << i << " " << j; + } + } + + for (size_t i = 0; i != arraysize(patches); ++i) { + for (size_t j = 0; j != arraysize(patches); ++j) { + bool expected = (i != last_index ? i : 3u) < (j != last_index ? j : 3u); + EXPECT_EQ(expected, patches[i] < patches[j]) << i << " " << j; + } + } +} + +} // namespace art diff --git a/compiler/compiler.cc b/compiler/compiler.cc new file mode 100644 index 000000000..16263177d --- /dev/null +++ b/compiler/compiler.cc @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler.h" + +#include "base/logging.h" +#include "driver/compiler_driver.h" +#include "optimizing/optimizing_compiler.h" +#include "utils.h" + +namespace art { + +Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) { + switch (kind) { + case kQuick: + // TODO: Remove Quick in options. + case kOptimizing: + return CreateOptimizingCompiler(driver); + + default: + LOG(FATAL) << "UNREACHABLE"; + UNREACHABLE(); + } +} + +bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item, + uint32_t method_idx, + const DexFile& dex_file) { + /* + * Skip compilation for pathologically large methods - either by instruction count or num vregs. + * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter + * of that, which also guarantees we cannot overflow our 16-bit internal Quick SSA name space. + */ + if (code_item.insns_size_in_code_units_ >= UINT16_MAX / 4) { + LOG(INFO) << "Method exceeds compiler instruction limit: " + << code_item.insns_size_in_code_units_ + << " in " << PrettyMethod(method_idx, dex_file); + return true; + } + if (code_item.registers_size_ >= UINT16_MAX / 4) { + LOG(INFO) << "Method exceeds compiler virtual register limit: " + << code_item.registers_size_ << " in " << PrettyMethod(method_idx, dex_file); + return true; + } + return false; +} + +} // namespace art diff --git a/compiler/compiler.h b/compiler/compiler.h new file mode 100644 index 000000000..487a27fec --- /dev/null +++ b/compiler/compiler.h @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_COMPILER_H_ +#define ART_COMPILER_COMPILER_H_ + +#include "dex_file.h" +#include "os.h" + +namespace art { + +namespace jit { + class JitCodeCache; +} + +class ArtMethod; +class CompilerDriver; +class CompiledMethod; +class OatWriter; + +class Compiler { + public: + enum Kind { + kQuick, + kOptimizing + }; + + static Compiler* Create(CompilerDriver* driver, Kind kind); + + virtual void Init() = 0; + + virtual void UnInit() const = 0; + + virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const = 0; + + virtual CompiledMethod* Compile(const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + Handle dex_cache) const = 0; + + virtual CompiledMethod* JniCompile(uint32_t access_flags, + uint32_t method_idx, + const DexFile& dex_file) const = 0; + + virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED, + jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, + bool osr ATTRIBUTE_UNUSED) + SHARED_REQUIRES(Locks::mutator_lock_) { + return false; + } + + virtual uintptr_t GetEntryPointOf(ArtMethod* method) const + SHARED_REQUIRES(Locks::mutator_lock_) = 0; + + uint64_t GetMaximumCompilationTimeBeforeWarning() const { + return maximum_compilation_time_before_warning_; + } + + virtual ~Compiler() {} + + /* + * @brief Generate and return Dwarf CFI initialization, if supported by the + * backend. + * @param driver CompilerDriver for this compile. + * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF + * information. + * @note This is used for backtrace information in generated code. + */ + virtual std::vector* GetCallFrameInformationInitialization( + const CompilerDriver& driver ATTRIBUTE_UNUSED) const { + return nullptr; + } + + // Returns whether the method to compile is such a pathological case that + // it's not worth compiling. + static bool IsPathologicalCase(const DexFile::CodeItem& code_item, + uint32_t method_idx, + const DexFile& dex_file); + + protected: + Compiler(CompilerDriver* driver, uint64_t warning) : + driver_(driver), maximum_compilation_time_before_warning_(warning) { + } + + CompilerDriver* GetCompilerDriver() const { + return driver_; + } + + private: + CompilerDriver* const driver_; + const uint64_t maximum_compilation_time_before_warning_; + + DISALLOW_COPY_AND_ASSIGN(Compiler); +}; + +} // namespace art + +#endif // ART_COMPILER_COMPILER_H_ diff --git a/compiler/debug/dwarf/debug_abbrev_writer.h b/compiler/debug/dwarf/debug_abbrev_writer.h new file mode 100644 index 000000000..0fc843cdf --- /dev/null +++ b/compiler/debug/dwarf/debug_abbrev_writer.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_ +#define ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_ + +#include +#include +#include + +#include "base/casts.h" +#include "base/stl_util.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/writer.h" +#include "leb128.h" + +namespace art { +namespace dwarf { + +// Writer for the .debug_abbrev. +// +// Abbreviations specify the format of entries in .debug_info. +// Each entry specifies abbreviation code, which in turns +// determines all the attributes and their format. +// It is possible to think of them as type definitions. +template > +class DebugAbbrevWriter FINAL : private Writer { + static_assert(std::is_same::value, "Invalid value type"); + + public: + explicit DebugAbbrevWriter(Vector* buffer) + : Writer(buffer), + current_abbrev_(buffer->get_allocator()) { + this->PushUint8(0); // Add abbrev table terminator. + } + + // Start abbreviation declaration. + void StartAbbrev(Tag tag) { + DCHECK(current_abbrev_.empty()); + EncodeUnsignedLeb128(¤t_abbrev_, tag); + has_children_offset_ = current_abbrev_.size(); + current_abbrev_.push_back(0); // Place-holder for DW_CHILDREN. + } + + // Add attribute specification. + void AddAbbrevAttribute(Attribute name, Form type) { + EncodeUnsignedLeb128(¤t_abbrev_, name); + EncodeUnsignedLeb128(¤t_abbrev_, type); + } + + // End abbreviation declaration and return its code. + // This will deduplicate abbreviations. + uint32_t EndAbbrev(Children has_children) { + DCHECK(!current_abbrev_.empty()); + current_abbrev_[has_children_offset_] = has_children; + auto it = abbrev_codes_.insert(std::make_pair(std::move(current_abbrev_), NextAbbrevCode())); + uint32_t abbrev_code = it.first->second; + if (UNLIKELY(it.second)) { // Inserted new entry. + const Vector& abbrev = it.first->first; + this->Pop(); // Remove abbrev table terminator. + this->PushUleb128(abbrev_code); + this->PushData(abbrev.data(), abbrev.size()); + this->PushUint8(0); // Attribute list end. + this->PushUint8(0); // Attribute list end. + this->PushUint8(0); // Add abbrev table terminator. + } + current_abbrev_.clear(); + return abbrev_code; + } + + // Get the next free abbrev code. + uint32_t NextAbbrevCode() { + return dchecked_integral_cast(1 + abbrev_codes_.size()); + } + + private: + Vector current_abbrev_; + size_t has_children_offset_ = 0; + std::unordered_map > abbrev_codes_; +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_ diff --git a/compiler/debug/dwarf/debug_frame_opcode_writer.h b/compiler/debug/dwarf/debug_frame_opcode_writer.h new file mode 100644 index 000000000..7c75c9bf3 --- /dev/null +++ b/compiler/debug/dwarf/debug_frame_opcode_writer.h @@ -0,0 +1,341 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_ +#define ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_ + +#include "base/bit_utils.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/register.h" +#include "debug/dwarf/writer.h" + +namespace art { +namespace dwarf { + +// Writer for .debug_frame opcodes (DWARF-3). +// See the DWARF specification for the precise meaning of the opcodes. +// The writer is very light-weight, however it will do the following for you: +// * Choose the most compact encoding of a given opcode. +// * Keep track of current state and convert absolute values to deltas. +// * Divide by header-defined factors as appropriate. +template > +class DebugFrameOpCodeWriter : private Writer { + static_assert(std::is_same::value, "Invalid value type"); + + public: + // To save space, DWARF divides most offsets by header-defined factors. + // They are used in integer divisions, so we make them constants. + // We usually subtract from stack base pointer, so making the factor + // negative makes the encoded values positive and thus easier to encode. + static constexpr int kDataAlignmentFactor = -4; + static constexpr int kCodeAlignmentFactor = 1; + + // Explicitely advance the program counter to given location. + void ALWAYS_INLINE AdvancePC(int absolute_pc) { + DCHECK_GE(absolute_pc, current_pc_); + if (UNLIKELY(enabled_)) { + int delta = FactorCodeOffset(absolute_pc - current_pc_); + if (delta != 0) { + if (delta <= 0x3F) { + this->PushUint8(DW_CFA_advance_loc | delta); + } else if (delta <= UINT8_MAX) { + this->PushUint8(DW_CFA_advance_loc1); + this->PushUint8(delta); + } else if (delta <= UINT16_MAX) { + this->PushUint8(DW_CFA_advance_loc2); + this->PushUint16(delta); + } else { + this->PushUint8(DW_CFA_advance_loc4); + this->PushUint32(delta); + } + } + current_pc_ = absolute_pc; + } + } + + // Override this method to automatically advance the PC before each opcode. + virtual void ImplicitlyAdvancePC() { } + + // Common alias in assemblers - spill relative to current stack pointer. + void ALWAYS_INLINE RelOffset(Reg reg, int offset) { + Offset(reg, offset - current_cfa_offset_); + } + + // Common alias in assemblers - increase stack frame size. + void ALWAYS_INLINE AdjustCFAOffset(int delta) { + DefCFAOffset(current_cfa_offset_ + delta); + } + + // Custom alias - spill many registers based on bitmask. + void ALWAYS_INLINE RelOffsetForMany(Reg reg_base, int offset, + uint32_t reg_mask, int reg_size) { + DCHECK(reg_size == 4 || reg_size == 8); + if (UNLIKELY(enabled_)) { + for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) { + // Skip zero bits and go to the set bit. + int num_zeros = CTZ(reg_mask); + i += num_zeros; + reg_mask >>= num_zeros; + RelOffset(Reg(reg_base.num() + i), offset); + offset += reg_size; + } + } + } + + // Custom alias - unspill many registers based on bitmask. + void ALWAYS_INLINE RestoreMany(Reg reg_base, uint32_t reg_mask) { + if (UNLIKELY(enabled_)) { + for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) { + // Skip zero bits and go to the set bit. + int num_zeros = CTZ(reg_mask); + i += num_zeros; + reg_mask >>= num_zeros; + Restore(Reg(reg_base.num() + i)); + } + } + } + + void ALWAYS_INLINE Nop() { + if (UNLIKELY(enabled_)) { + this->PushUint8(DW_CFA_nop); + } + } + + void ALWAYS_INLINE Offset(Reg reg, int offset) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + int factored_offset = FactorDataOffset(offset); // May change sign. + if (factored_offset >= 0) { + if (0 <= reg.num() && reg.num() <= 0x3F) { + this->PushUint8(DW_CFA_offset | reg.num()); + this->PushUleb128(factored_offset); + } else { + this->PushUint8(DW_CFA_offset_extended); + this->PushUleb128(reg.num()); + this->PushUleb128(factored_offset); + } + } else { + uses_dwarf3_features_ = true; + this->PushUint8(DW_CFA_offset_extended_sf); + this->PushUleb128(reg.num()); + this->PushSleb128(factored_offset); + } + } + } + + void ALWAYS_INLINE Restore(Reg reg) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + if (0 <= reg.num() && reg.num() <= 0x3F) { + this->PushUint8(DW_CFA_restore | reg.num()); + } else { + this->PushUint8(DW_CFA_restore_extended); + this->PushUleb128(reg.num()); + } + } + } + + void ALWAYS_INLINE Undefined(Reg reg) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + this->PushUint8(DW_CFA_undefined); + this->PushUleb128(reg.num()); + } + } + + void ALWAYS_INLINE SameValue(Reg reg) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + this->PushUint8(DW_CFA_same_value); + this->PushUleb128(reg.num()); + } + } + + // The previous value of "reg" is stored in register "new_reg". + void ALWAYS_INLINE Register(Reg reg, Reg new_reg) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + this->PushUint8(DW_CFA_register); + this->PushUleb128(reg.num()); + this->PushUleb128(new_reg.num()); + } + } + + void ALWAYS_INLINE RememberState() { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + this->PushUint8(DW_CFA_remember_state); + } + } + + void ALWAYS_INLINE RestoreState() { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + this->PushUint8(DW_CFA_restore_state); + } + } + + void ALWAYS_INLINE DefCFA(Reg reg, int offset) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + if (offset >= 0) { + this->PushUint8(DW_CFA_def_cfa); + this->PushUleb128(reg.num()); + this->PushUleb128(offset); // Non-factored. + } else { + uses_dwarf3_features_ = true; + this->PushUint8(DW_CFA_def_cfa_sf); + this->PushUleb128(reg.num()); + this->PushSleb128(FactorDataOffset(offset)); + } + } + current_cfa_offset_ = offset; + } + + void ALWAYS_INLINE DefCFARegister(Reg reg) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + this->PushUint8(DW_CFA_def_cfa_register); + this->PushUleb128(reg.num()); + } + } + + void ALWAYS_INLINE DefCFAOffset(int offset) { + if (UNLIKELY(enabled_)) { + if (current_cfa_offset_ != offset) { + ImplicitlyAdvancePC(); + if (offset >= 0) { + this->PushUint8(DW_CFA_def_cfa_offset); + this->PushUleb128(offset); // Non-factored. + } else { + uses_dwarf3_features_ = true; + this->PushUint8(DW_CFA_def_cfa_offset_sf); + this->PushSleb128(FactorDataOffset(offset)); + } + } + } + // Uncoditional so that the user can still get and check the value. + current_cfa_offset_ = offset; + } + + void ALWAYS_INLINE ValOffset(Reg reg, int offset) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + uses_dwarf3_features_ = true; + int factored_offset = FactorDataOffset(offset); // May change sign. + if (factored_offset >= 0) { + this->PushUint8(DW_CFA_val_offset); + this->PushUleb128(reg.num()); + this->PushUleb128(factored_offset); + } else { + this->PushUint8(DW_CFA_val_offset_sf); + this->PushUleb128(reg.num()); + this->PushSleb128(factored_offset); + } + } + } + + void ALWAYS_INLINE DefCFAExpression(uint8_t* expr, int expr_size) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + uses_dwarf3_features_ = true; + this->PushUint8(DW_CFA_def_cfa_expression); + this->PushUleb128(expr_size); + this->PushData(expr, expr_size); + } + } + + void ALWAYS_INLINE Expression(Reg reg, uint8_t* expr, int expr_size) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + uses_dwarf3_features_ = true; + this->PushUint8(DW_CFA_expression); + this->PushUleb128(reg.num()); + this->PushUleb128(expr_size); + this->PushData(expr, expr_size); + } + } + + void ALWAYS_INLINE ValExpression(Reg reg, uint8_t* expr, int expr_size) { + if (UNLIKELY(enabled_)) { + ImplicitlyAdvancePC(); + uses_dwarf3_features_ = true; + this->PushUint8(DW_CFA_val_expression); + this->PushUleb128(reg.num()); + this->PushUleb128(expr_size); + this->PushData(expr, expr_size); + } + } + + bool IsEnabled() const { return enabled_; } + + void SetEnabled(bool value) { + enabled_ = value; + if (enabled_ && opcodes_.capacity() == 0u) { + opcodes_.reserve(kDefaultCapacity); + } + } + + int GetCurrentPC() const { return current_pc_; } + + int GetCurrentCFAOffset() const { return current_cfa_offset_; } + + void SetCurrentCFAOffset(int offset) { current_cfa_offset_ = offset; } + + using Writer::data; + + explicit DebugFrameOpCodeWriter(bool enabled = true, + const typename Vector::allocator_type& alloc = + typename Vector::allocator_type()) + : Writer(&opcodes_), + enabled_(false), + opcodes_(alloc), + current_cfa_offset_(0), + current_pc_(0), + uses_dwarf3_features_(false) { + SetEnabled(enabled); + } + + virtual ~DebugFrameOpCodeWriter() { } + + protected: + // Best guess based on couple of observed outputs. + static constexpr size_t kDefaultCapacity = 32u; + + int FactorDataOffset(int offset) const { + DCHECK_EQ(offset % kDataAlignmentFactor, 0); + return offset / kDataAlignmentFactor; + } + + int FactorCodeOffset(int offset) const { + DCHECK_EQ(offset % kCodeAlignmentFactor, 0); + return offset / kCodeAlignmentFactor; + } + + bool enabled_; // If disabled all writes are no-ops. + Vector opcodes_; + int current_cfa_offset_; + int current_pc_; + bool uses_dwarf3_features_; + + private: + DISALLOW_COPY_AND_ASSIGN(DebugFrameOpCodeWriter); +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_ diff --git a/compiler/debug/dwarf/debug_info_entry_writer.h b/compiler/debug/dwarf/debug_info_entry_writer.h new file mode 100644 index 000000000..85f021ec5 --- /dev/null +++ b/compiler/debug/dwarf/debug_info_entry_writer.h @@ -0,0 +1,228 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_ +#define ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_ + +#include +#include + +#include "base/casts.h" +#include "debug/dwarf/debug_abbrev_writer.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/expression.h" +#include "debug/dwarf/writer.h" +#include "leb128.h" + +namespace art { +namespace dwarf { + +/* + * Writer for debug information entries (DIE). + * + * Usage: + * StartTag(DW_TAG_compile_unit); + * WriteStrp(DW_AT_producer, "Compiler name", debug_str); + * StartTag(DW_TAG_subprogram); + * WriteStrp(DW_AT_name, "Foo", debug_str); + * EndTag(); + * EndTag(); + */ +template > +class DebugInfoEntryWriter FINAL : private Writer { + static_assert(std::is_same::value, "Invalid value type"); + + public: + static constexpr size_t kCompilationUnitHeaderSize = 11; + + // Start debugging information entry. + // Returns offset of the entry in compilation unit. + size_t StartTag(Tag tag) { + if (inside_entry_) { + // Write abbrev code for the previous entry. + // Parent entry is finalized before any children are written. + this->UpdateUleb128(abbrev_code_offset_, debug_abbrev_->EndAbbrev(DW_CHILDREN_yes)); + inside_entry_ = false; + } + debug_abbrev_->StartAbbrev(tag); + // Abbrev code placeholder of sufficient size. + abbrev_code_offset_ = this->data()->size(); + this->PushUleb128(debug_abbrev_->NextAbbrevCode()); + depth_++; + inside_entry_ = true; + return abbrev_code_offset_ + kCompilationUnitHeaderSize; + } + + // End debugging information entry. + void EndTag() { + DCHECK_GT(depth_, 0); + if (inside_entry_) { + // Write abbrev code for this entry. + this->UpdateUleb128(abbrev_code_offset_, debug_abbrev_->EndAbbrev(DW_CHILDREN_no)); + inside_entry_ = false; + // This entry has no children and so there is no terminator. + } else { + // The entry has been already finalized so it must be parent entry + // and we need to write the terminator required by DW_CHILDREN_yes. + this->PushUint8(0); + } + depth_--; + } + + void WriteAddr(Attribute attrib, uint64_t value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_addr); + patch_locations_.push_back(this->data()->size()); + if (is64bit_) { + this->PushUint64(value); + } else { + this->PushUint32(value); + } + } + + void WriteBlock(Attribute attrib, const uint8_t* ptr, size_t num_bytes) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_block); + this->PushUleb128(num_bytes); + this->PushData(ptr, num_bytes); + } + + void WriteExprLoc(Attribute attrib, const Expression& expr) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_exprloc); + this->PushUleb128(dchecked_integral_cast(expr.size())); + this->PushData(expr.data()); + } + + void WriteData1(Attribute attrib, uint8_t value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data1); + this->PushUint8(value); + } + + void WriteData2(Attribute attrib, uint16_t value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data2); + this->PushUint16(value); + } + + void WriteData4(Attribute attrib, uint32_t value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data4); + this->PushUint32(value); + } + + void WriteData8(Attribute attrib, uint64_t value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data8); + this->PushUint64(value); + } + + void WriteSecOffset(Attribute attrib, uint32_t offset) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_sec_offset); + this->PushUint32(offset); + } + + void WriteSdata(Attribute attrib, int value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_sdata); + this->PushSleb128(value); + } + + void WriteUdata(Attribute attrib, int value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_udata); + this->PushUleb128(value); + } + + void WriteUdata(Attribute attrib, uint32_t value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_udata); + this->PushUleb128(value); + } + + void WriteFlag(Attribute attrib, bool value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_flag); + this->PushUint8(value ? 1 : 0); + } + + void WriteFlagPresent(Attribute attrib) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_flag_present); + } + + void WriteRef4(Attribute attrib, uint32_t cu_offset) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_ref4); + this->PushUint32(cu_offset); + } + + void WriteRef(Attribute attrib, uint32_t cu_offset) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_ref_udata); + this->PushUleb128(cu_offset); + } + + void WriteString(Attribute attrib, const char* value) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_string); + this->PushString(value); + } + + void WriteStrp(Attribute attrib, size_t debug_str_offset) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_strp); + this->PushUint32(dchecked_integral_cast(debug_str_offset)); + } + + void WriteStrp(Attribute attrib, const char* str, size_t len, + std::vector* debug_str) { + debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_strp); + this->PushUint32(debug_str->size()); + debug_str->insert(debug_str->end(), str, str + len); + debug_str->push_back(0); + } + + void WriteStrp(Attribute attrib, const char* str, std::vector* debug_str) { + WriteStrp(attrib, str, strlen(str), debug_str); + } + + bool Is64bit() const { return is64bit_; } + + const std::vector& GetPatchLocations() const { + return patch_locations_; + } + + int Depth() const { return depth_; } + + using Writer::data; + using Writer::size; + using Writer::UpdateUint32; + + DebugInfoEntryWriter(bool is64bitArch, + DebugAbbrevWriter* debug_abbrev, + const typename Vector::allocator_type& alloc = + typename Vector::allocator_type()) + : Writer(&entries_), + debug_abbrev_(debug_abbrev), + entries_(alloc), + is64bit_(is64bitArch) { + } + + ~DebugInfoEntryWriter() { + DCHECK(!inside_entry_); + DCHECK_EQ(depth_, 0); + } + + private: + DebugAbbrevWriter* debug_abbrev_; + Vector entries_; + bool is64bit_; + int depth_ = 0; + size_t abbrev_code_offset_ = 0; // Location to patch once we know the code. + bool inside_entry_ = false; // Entry ends at first child (if any). + std::vector patch_locations_; +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_ diff --git a/compiler/debug/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h new file mode 100644 index 000000000..b4a4d63f0 --- /dev/null +++ b/compiler/debug/dwarf/debug_line_opcode_writer.h @@ -0,0 +1,261 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_ +#define ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_ + +#include + +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/writer.h" + +namespace art { +namespace dwarf { + +// Writer for the .debug_line opcodes (DWARF-3). +// The writer is very light-weight, however it will do the following for you: +// * Choose the most compact encoding of a given opcode. +// * Keep track of current state and convert absolute values to deltas. +// * Divide by header-defined factors as appropriate. +template> +class DebugLineOpCodeWriter FINAL : private Writer { + static_assert(std::is_same::value, "Invalid value type"); + + public: + static constexpr int kOpcodeBase = 13; + static constexpr bool kDefaultIsStmt = false; + static constexpr int kLineBase = -5; + static constexpr int kLineRange = 14; + + void AddRow() { + this->PushUint8(DW_LNS_copy); + } + + void AdvancePC(uint64_t absolute_address) { + DCHECK_NE(current_address_, 0u); // Use SetAddress for the first advance. + DCHECK_GE(absolute_address, current_address_); + if (absolute_address != current_address_) { + uint64_t delta = FactorCodeOffset(absolute_address - current_address_); + if (delta <= INT32_MAX) { + this->PushUint8(DW_LNS_advance_pc); + this->PushUleb128(static_cast(delta)); + current_address_ = absolute_address; + } else { + SetAddress(absolute_address); + } + } + } + + void AdvanceLine(int absolute_line) { + int delta = absolute_line - current_line_; + if (delta != 0) { + this->PushUint8(DW_LNS_advance_line); + this->PushSleb128(delta); + current_line_ = absolute_line; + } + } + + void SetFile(int file) { + if (current_file_ != file) { + this->PushUint8(DW_LNS_set_file); + this->PushUleb128(file); + current_file_ = file; + } + } + + void SetColumn(int column) { + this->PushUint8(DW_LNS_set_column); + this->PushUleb128(column); + } + + void SetIsStmt(bool is_stmt) { + if (is_stmt_ != is_stmt) { + this->PushUint8(DW_LNS_negate_stmt); + is_stmt_ = is_stmt; + } + } + + void SetBasicBlock() { + this->PushUint8(DW_LNS_set_basic_block); + } + + void SetPrologueEnd() { + uses_dwarf3_features_ = true; + this->PushUint8(DW_LNS_set_prologue_end); + } + + void SetEpilogueBegin() { + uses_dwarf3_features_ = true; + this->PushUint8(DW_LNS_set_epilogue_begin); + } + + void SetISA(int isa) { + uses_dwarf3_features_ = true; + this->PushUint8(DW_LNS_set_isa); + this->PushUleb128(isa); + } + + void EndSequence() { + this->PushUint8(0); + this->PushUleb128(1); + this->PushUint8(DW_LNE_end_sequence); + current_address_ = 0; + current_file_ = 1; + current_line_ = 1; + is_stmt_ = kDefaultIsStmt; + } + + // Uncoditionally set address using the long encoding. + // This gives the linker opportunity to relocate the address. + void SetAddress(uint64_t absolute_address) { + DCHECK_GE(absolute_address, current_address_); + FactorCodeOffset(absolute_address); // Check if it is factorable. + this->PushUint8(0); + if (use_64bit_address_) { + this->PushUleb128(1 + 8); + this->PushUint8(DW_LNE_set_address); + patch_locations_.push_back(this->data()->size()); + this->PushUint64(absolute_address); + } else { + this->PushUleb128(1 + 4); + this->PushUint8(DW_LNE_set_address); + patch_locations_.push_back(this->data()->size()); + this->PushUint32(absolute_address); + } + current_address_ = absolute_address; + } + + void DefineFile(const char* filename, + int directory_index, + int modification_time, + int file_size) { + int size = 1 + + strlen(filename) + 1 + + UnsignedLeb128Size(directory_index) + + UnsignedLeb128Size(modification_time) + + UnsignedLeb128Size(file_size); + this->PushUint8(0); + this->PushUleb128(size); + size_t start = data()->size(); + this->PushUint8(DW_LNE_define_file); + this->PushString(filename); + this->PushUleb128(directory_index); + this->PushUleb128(modification_time); + this->PushUleb128(file_size); + DCHECK_EQ(start + size, data()->size()); + } + + // Compact address and line opcode. + void AddRow(uint64_t absolute_address, int absolute_line) { + DCHECK_GE(absolute_address, current_address_); + + // If the address is definitely too far, use the long encoding. + uint64_t delta_address = FactorCodeOffset(absolute_address - current_address_); + if (delta_address > UINT8_MAX) { + AdvancePC(absolute_address); + delta_address = 0; + } + + // If the line is definitely too far, use the long encoding. + int delta_line = absolute_line - current_line_; + if (!(kLineBase <= delta_line && delta_line < kLineBase + kLineRange)) { + AdvanceLine(absolute_line); + delta_line = 0; + } + + // Both address and line should be reasonable now. Use the short encoding. + int opcode = kOpcodeBase + (delta_line - kLineBase) + + (static_cast(delta_address) * kLineRange); + if (opcode > UINT8_MAX) { + // If the address is still too far, try to increment it by const amount. + int const_advance = (0xFF - kOpcodeBase) / kLineRange; + opcode -= (kLineRange * const_advance); + if (opcode <= UINT8_MAX) { + this->PushUint8(DW_LNS_const_add_pc); + } else { + // Give up and use long encoding for address. + AdvancePC(absolute_address); + // Still use the opcode to do line advance and copy. + opcode = kOpcodeBase + (delta_line - kLineBase); + } + } + DCHECK(kOpcodeBase <= opcode && opcode <= 0xFF); + this->PushUint8(opcode); // Special opcode. + current_line_ = absolute_line; + current_address_ = absolute_address; + } + + int GetCodeFactorBits() const { + return code_factor_bits_; + } + + uint64_t CurrentAddress() const { + return current_address_; + } + + int CurrentFile() const { + return current_file_; + } + + int CurrentLine() const { + return current_line_; + } + + const std::vector& GetPatchLocations() const { + return patch_locations_; + } + + using Writer::data; + + DebugLineOpCodeWriter(bool use64bitAddress, + int codeFactorBits, + const typename Vector::allocator_type& alloc = + typename Vector::allocator_type()) + : Writer(&opcodes_), + opcodes_(alloc), + uses_dwarf3_features_(false), + use_64bit_address_(use64bitAddress), + code_factor_bits_(codeFactorBits), + current_address_(0), + current_file_(1), + current_line_(1), + is_stmt_(kDefaultIsStmt) { + } + + private: + uint64_t FactorCodeOffset(uint64_t offset) const { + DCHECK_GE(code_factor_bits_, 0); + DCHECK_EQ((offset >> code_factor_bits_) << code_factor_bits_, offset); + return offset >> code_factor_bits_; + } + + Vector opcodes_; + bool uses_dwarf3_features_; + bool use_64bit_address_; + int code_factor_bits_; + uint64_t current_address_; + int current_file_; + int current_line_; + bool is_stmt_; + std::vector patch_locations_; + + DISALLOW_COPY_AND_ASSIGN(DebugLineOpCodeWriter); +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_ diff --git a/compiler/debug/dwarf/dwarf_constants.h b/compiler/debug/dwarf/dwarf_constants.h new file mode 100644 index 000000000..96f805e85 --- /dev/null +++ b/compiler/debug/dwarf/dwarf_constants.h @@ -0,0 +1,694 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_ +#define ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_ + +namespace art { +namespace dwarf { + +// Based on the Dwarf 4 specification at dwarfstd.com and issues marked +// for inclusion in Dwarf 5 on same. Values not specified in the Dwarf 4 +// standard might change or be removed in the future and may be different +// than the values used currently by other implementations for the same trait, +// use at your own risk. + +enum Tag { + DW_TAG_array_type = 0x01, + DW_TAG_class_type = 0x02, + DW_TAG_entry_point = 0x03, + DW_TAG_enumeration_type = 0x04, + DW_TAG_formal_parameter = 0x05, + DW_TAG_imported_declaration = 0x08, + DW_TAG_label = 0x0a, + DW_TAG_lexical_block = 0x0b, + DW_TAG_member = 0x0d, + DW_TAG_pointer_type = 0x0f, + DW_TAG_reference_type = 0x10, + DW_TAG_compile_unit = 0x11, + DW_TAG_string_type = 0x12, + DW_TAG_structure_type = 0x13, + DW_TAG_subroutine_type = 0x15, + DW_TAG_typedef = 0x16, + DW_TAG_union_type = 0x17, + DW_TAG_unspecified_parameters = 0x18, + DW_TAG_variant = 0x19, + DW_TAG_common_block = 0x1a, + DW_TAG_common_inclusion = 0x1b, + DW_TAG_inheritance = 0x1c, + DW_TAG_inlined_subroutine = 0x1d, + DW_TAG_module = 0x1e, + DW_TAG_ptr_to_member_type = 0x1f, + DW_TAG_set_type = 0x20, + DW_TAG_subrange_type = 0x21, + DW_TAG_with_stmt = 0x22, + DW_TAG_access_declaration = 0x23, + DW_TAG_base_type = 0x24, + DW_TAG_catch_block = 0x25, + DW_TAG_const_type = 0x26, + DW_TAG_constant = 0x27, + DW_TAG_enumerator = 0x28, + DW_TAG_file_type = 0x29, + DW_TAG_friend = 0x2a, + DW_TAG_namelist = 0x2b, + DW_TAG_namelist_item = 0x2c, + DW_TAG_packed_type = 0x2d, + DW_TAG_subprogram = 0x2e, + DW_TAG_template_type_parameter = 0x2f, + DW_TAG_template_value_parameter = 0x30, + DW_TAG_thrown_type = 0x31, + DW_TAG_try_block = 0x32, + DW_TAG_variant_part = 0x33, + DW_TAG_variable = 0x34, + DW_TAG_volatile_type = 0x35, + DW_TAG_dwarf_procedure = 0x36, + DW_TAG_restrict_type = 0x37, + DW_TAG_interface_type = 0x38, + DW_TAG_namespace = 0x39, + DW_TAG_imported_module = 0x3a, + DW_TAG_unspecified_type = 0x3b, + DW_TAG_partial_unit = 0x3c, + DW_TAG_imported_unit = 0x3d, + DW_TAG_condition = 0x3f, + DW_TAG_shared_type = 0x40, + DW_TAG_type_unit = 0x41, + DW_TAG_rvalue_reference_type = 0x42, + DW_TAG_template_alias = 0x43, +#ifdef INCLUDE_DWARF5_VALUES + // Values to be added in Dwarf 5. Final value not yet specified. Values listed + // may be different than other implementations. Use with caution. + // TODO Update these values when Dwarf 5 is released. + DW_TAG_coarray_type = 0x44, + DW_TAG_call_site = 0x45, + DW_TAG_call_site_parameter = 0x46, + DW_TAG_generic_subrange = 0x47, + DW_TAG_atomic_type = 0x48, + DW_TAG_dynamic_type = 0x49, + DW_TAG_aligned_type = 0x50, +#endif + DW_TAG_lo_user = 0x4080, + DW_TAG_hi_user = 0xffff +}; + +enum Children : uint8_t { + DW_CHILDREN_no = 0x00, + DW_CHILDREN_yes = 0x01 +}; + +enum Attribute { + DW_AT_sibling = 0x01, + DW_AT_location = 0x02, + DW_AT_name = 0x03, + DW_AT_ordering = 0x09, + DW_AT_byte_size = 0x0b, + DW_AT_bit_offset = 0x0c, + DW_AT_bit_size = 0x0d, + DW_AT_stmt_list = 0x10, + DW_AT_low_pc = 0x11, + DW_AT_high_pc = 0x12, + DW_AT_language = 0x13, + DW_AT_discr = 0x15, + DW_AT_discr_value = 0x16, + DW_AT_visibility = 0x17, + DW_AT_import = 0x18, + DW_AT_string_length = 0x19, + DW_AT_common_reference = 0x1a, + DW_AT_comp_dir = 0x1b, + DW_AT_const_value = 0x1c, + DW_AT_containing_type = 0x1d, + DW_AT_default_value = 0x1e, + DW_AT_inline = 0x20, + DW_AT_is_optional = 0x21, + DW_AT_lower_bound = 0x22, + DW_AT_producer = 0x25, + DW_AT_prototyped = 0x27, + DW_AT_return_addr = 0x2a, + DW_AT_start_scope = 0x2c, + DW_AT_bit_stride = 0x2e, + DW_AT_upper_bound = 0x2f, + DW_AT_abstract_origin = 0x31, + DW_AT_accessibility = 0x32, + DW_AT_address_class = 0x33, + DW_AT_artificial = 0x34, + DW_AT_base_types = 0x35, + DW_AT_calling_convention = 0x36, + DW_AT_count = 0x37, + DW_AT_data_member_location = 0x38, + DW_AT_decl_column = 0x39, + DW_AT_decl_file = 0x3a, + DW_AT_decl_line = 0x3b, + DW_AT_declaration = 0x3c, + DW_AT_discr_list = 0x3d, + DW_AT_encoding = 0x3e, + DW_AT_external = 0x3f, + DW_AT_frame_base = 0x40, + DW_AT_friend = 0x41, + DW_AT_identifier_case = 0x42, + DW_AT_macro_info = 0x43, + DW_AT_namelist_item = 0x44, + DW_AT_priority = 0x45, + DW_AT_segment = 0x46, + DW_AT_specification = 0x47, + DW_AT_static_link = 0x48, + DW_AT_type = 0x49, + DW_AT_use_location = 0x4a, + DW_AT_variable_parameter = 0x4b, + DW_AT_virtuality = 0x4c, + DW_AT_vtable_elem_location = 0x4d, + DW_AT_allocated = 0x4e, + DW_AT_associated = 0x4f, + DW_AT_data_location = 0x50, + DW_AT_byte_stride = 0x51, + DW_AT_entry_pc = 0x52, + DW_AT_use_UTF8 = 0x53, + DW_AT_extension = 0x54, + DW_AT_ranges = 0x55, + DW_AT_trampoline = 0x56, + DW_AT_call_column = 0x57, + DW_AT_call_file = 0x58, + DW_AT_call_line = 0x59, + DW_AT_description = 0x5a, + DW_AT_binary_scale = 0x5b, + DW_AT_decimal_scale = 0x5c, + DW_AT_small = 0x5d, + DW_AT_decimal_sign = 0x5e, + DW_AT_digit_count = 0x5f, + DW_AT_picture_string = 0x60, + DW_AT_mutable = 0x61, + DW_AT_threads_scaled = 0x62, + DW_AT_explicit = 0x63, + DW_AT_object_pointer = 0x64, + DW_AT_endianity = 0x65, + DW_AT_elemental = 0x66, + DW_AT_pure = 0x67, + DW_AT_recursive = 0x68, + DW_AT_signature = 0x69, + DW_AT_main_subprogram = 0x6a, + DW_AT_data_bit_offset = 0x6b, + DW_AT_const_expr = 0x6c, + DW_AT_enum_class = 0x6d, + DW_AT_linkage_name = 0x6e, +#ifdef INCLUDE_DWARF5_VALUES + // Values to be added in Dwarf 5. Final value not yet specified. Values listed + // may be different than other implementations. Use with caution. + // TODO Update these values when Dwarf 5 is released. + DW_AT_call_site_value = 0x6f, + DW_AT_call_site_data_value = 0x70, + DW_AT_call_site_target = 0x71, + DW_AT_call_site_target_clobbered = 0x72, + DW_AT_tail_call = 0x73, + DW_AT_all_tail_call_sites = 0x74, + DW_AT_all_call_sites = 0x75, + DW_AT_all_source_call_sites = 0x76, + DW_AT_call_site_parameter = 0x77, + DW_AT_tail_call = 0x78, + DW_AT_all_tail_call_sites = 0x79, + DW_AT_all_call_sites = 0x7a, + DW_AT_all_source_call_sites = 0x7b, + DW_AT_rank = 0x7c, + DW_AT_string_bitsize = 0x7d, + DW_AT_string_byte_size = 0x7e, + DW_AT_reference = 0x7f, + DW_AT_rvalue_reference = 0x80, + DW_AT_noreturn = 0x81, + DW_AT_alignment = 0x82, +#endif + DW_AT_lo_user = 0x2000, + DW_AT_hi_user = 0xffff +}; + +enum Form : uint8_t { + DW_FORM_addr = 0x01, + DW_FORM_block2 = 0x03, + DW_FORM_block4 = 0x04, + DW_FORM_data2 = 0x05, + DW_FORM_data4 = 0x06, + DW_FORM_data8 = 0x07, + DW_FORM_string = 0x08, + DW_FORM_block = 0x09, + DW_FORM_block1 = 0x0a, + DW_FORM_data1 = 0x0b, + DW_FORM_flag = 0x0c, + DW_FORM_sdata = 0x0d, + DW_FORM_strp = 0x0e, + DW_FORM_udata = 0x0f, + DW_FORM_ref_addr = 0x10, + DW_FORM_ref1 = 0x11, + DW_FORM_ref2 = 0x12, + DW_FORM_ref4 = 0x13, + DW_FORM_ref8 = 0x14, + DW_FORM_ref_udata = 0x15, + DW_FORM_indirect = 0x16, + DW_FORM_sec_offset = 0x17, + DW_FORM_exprloc = 0x18, + DW_FORM_flag_present = 0x19, + DW_FORM_ref_sig8 = 0x20 +}; + +enum Operation : uint16_t { + DW_OP_addr = 0x03, + DW_OP_deref = 0x06, + DW_OP_const1u = 0x08, + DW_OP_const1s = 0x09, + DW_OP_const2u = 0x0a, + DW_OP_const2s = 0x0b, + DW_OP_const4u = 0x0c, + DW_OP_const4s = 0x0d, + DW_OP_const8u = 0x0e, + DW_OP_const8s = 0x0f, + DW_OP_constu = 0x10, + DW_OP_consts = 0x11, + DW_OP_dup = 0x12, + DW_OP_drop = 0x13, + DW_OP_over = 0x14, + DW_OP_pick = 0x15, + DW_OP_swap = 0x16, + DW_OP_rot = 0x17, + DW_OP_xderef = 0x18, + DW_OP_abs = 0x19, + DW_OP_and = 0x1a, + DW_OP_div = 0x1b, + DW_OP_minus = 0x1c, + DW_OP_mod = 0x1d, + DW_OP_mul = 0x1e, + DW_OP_neg = 0x1f, + DW_OP_not = 0x20, + DW_OP_or = 0x21, + DW_OP_plus = 0x22, + DW_OP_plus_uconst = 0x23, + DW_OP_shl = 0x24, + DW_OP_shr = 0x25, + DW_OP_shra = 0x26, + DW_OP_xor = 0x27, + DW_OP_skip = 0x2f, + DW_OP_bra = 0x28, + DW_OP_eq = 0x29, + DW_OP_ge = 0x2a, + DW_OP_gt = 0x2b, + DW_OP_le = 0x2c, + DW_OP_lt = 0x2d, + DW_OP_ne = 0x2e, + DW_OP_lit0 = 0x30, + DW_OP_lit1 = 0x31, + DW_OP_lit2 = 0x32, + DW_OP_lit3 = 0x33, + DW_OP_lit4 = 0x34, + DW_OP_lit5 = 0x35, + DW_OP_lit6 = 0x36, + DW_OP_lit7 = 0x37, + DW_OP_lit8 = 0x38, + DW_OP_lit9 = 0x39, + DW_OP_lit10 = 0x3a, + DW_OP_lit11 = 0x3b, + DW_OP_lit12 = 0x3c, + DW_OP_lit13 = 0x3d, + DW_OP_lit14 = 0x3e, + DW_OP_lit15 = 0x3f, + DW_OP_lit16 = 0x40, + DW_OP_lit17 = 0x41, + DW_OP_lit18 = 0x42, + DW_OP_lit19 = 0x43, + DW_OP_lit20 = 0x44, + DW_OP_lit21 = 0x45, + DW_OP_lit22 = 0x46, + DW_OP_lit23 = 0x47, + DW_OP_lit24 = 0x48, + DW_OP_lit25 = 0x49, + DW_OP_lit26 = 0x4a, + DW_OP_lit27 = 0x4b, + DW_OP_lit28 = 0x4c, + DW_OP_lit29 = 0x4d, + DW_OP_lit30 = 0x4e, + DW_OP_lit31 = 0x4f, + DW_OP_reg0 = 0x50, + DW_OP_reg1 = 0x51, + DW_OP_reg2 = 0x52, + DW_OP_reg3 = 0x53, + DW_OP_reg4 = 0x54, + DW_OP_reg5 = 0x55, + DW_OP_reg6 = 0x56, + DW_OP_reg7 = 0x57, + DW_OP_reg8 = 0x58, + DW_OP_reg9 = 0x59, + DW_OP_reg10 = 0x5a, + DW_OP_reg11 = 0x5b, + DW_OP_reg12 = 0x5c, + DW_OP_reg13 = 0x5d, + DW_OP_reg14 = 0x5e, + DW_OP_reg15 = 0x5f, + DW_OP_reg16 = 0x60, + DW_OP_reg17 = 0x61, + DW_OP_reg18 = 0x62, + DW_OP_reg19 = 0x63, + DW_OP_reg20 = 0x64, + DW_OP_reg21 = 0x65, + DW_OP_reg22 = 0x66, + DW_OP_reg23 = 0x67, + DW_OP_reg24 = 0x68, + DW_OP_reg25 = 0x69, + DW_OP_reg26 = 0x6a, + DW_OP_reg27 = 0x6b, + DW_OP_reg28 = 0x6c, + DW_OP_reg29 = 0x6d, + DW_OP_reg30 = 0x6e, + DW_OP_reg31 = 0x6f, + DW_OP_breg0 = 0x70, + DW_OP_breg1 = 0x71, + DW_OP_breg2 = 0x72, + DW_OP_breg3 = 0x73, + DW_OP_breg4 = 0x74, + DW_OP_breg5 = 0x75, + DW_OP_breg6 = 0x76, + DW_OP_breg7 = 0x77, + DW_OP_breg8 = 0x78, + DW_OP_breg9 = 0x79, + DW_OP_breg10 = 0x7a, + DW_OP_breg11 = 0x7b, + DW_OP_breg12 = 0x7c, + DW_OP_breg13 = 0x7d, + DW_OP_breg14 = 0x7e, + DW_OP_breg15 = 0x7f, + DW_OP_breg16 = 0x80, + DW_OP_breg17 = 0x81, + DW_OP_breg18 = 0x82, + DW_OP_breg19 = 0x83, + DW_OP_breg20 = 0x84, + DW_OP_breg21 = 0x85, + DW_OP_breg22 = 0x86, + DW_OP_breg23 = 0x87, + DW_OP_breg24 = 0x88, + DW_OP_breg25 = 0x89, + DW_OP_breg26 = 0x8a, + DW_OP_breg27 = 0x8b, + DW_OP_breg28 = 0x8c, + DW_OP_breg29 = 0x8d, + DW_OP_breg30 = 0x8e, + DW_OP_breg31 = 0x8f, + DW_OP_regx = 0x90, + DW_OP_fbreg = 0x91, + DW_OP_bregx = 0x92, + DW_OP_piece = 0x93, + DW_OP_deref_size = 0x94, + DW_OP_xderef_size = 0x95, + DW_OP_nop = 0x96, + DW_OP_push_object_address = 0x97, + DW_OP_call2 = 0x98, + DW_OP_call4 = 0x99, + DW_OP_call_ref = 0x9a, + DW_OP_form_tls_address = 0x9b, + DW_OP_call_frame_cfa = 0x9c, + DW_OP_bit_piece = 0x9d, + DW_OP_implicit_value = 0x9e, + DW_OP_stack_value = 0x9f, +#ifdef INCLUDE_DWARF5_VALUES + // Values to be added in Dwarf 5. Final value not yet specified. Values listed + // may be different than other implementations. Use with caution. + // TODO Update these values when Dwarf 5 is released. + DW_OP_entry_value = 0xa0, + DW_OP_const_type = 0xa1, + DW_OP_regval_type = 0xa2, + DW_OP_deref_type = 0xa3, + DW_OP_xderef_type = 0xa4, + DW_OP_convert = 0xa5, + DW_OP_reinterpret = 0xa6, +#endif + DW_OP_lo_user = 0xe0, + DW_OP_hi_user = 0xff +}; + +enum BaseTypeEncoding : uint8_t { + DW_ATE_address = 0x01, + DW_ATE_boolean = 0x02, + DW_ATE_complex_float = 0x03, + DW_ATE_float = 0x04, + DW_ATE_signed = 0x05, + DW_ATE_signed_char = 0x06, + DW_ATE_unsigned = 0x07, + DW_ATE_unsigned_char = 0x08, + DW_ATE_imaginary_float = 0x09, + DW_ATE_packed_decimal = 0x0a, + DW_ATE_numeric_string = 0x0b, + DW_ATE_edited = 0x0c, + DW_ATE_signed_fixed = 0x0d, + DW_ATE_unsigned_fixed = 0x0e, + DW_ATE_decimal_float = 0x0f, + DW_ATE_UTF = 0x10, + DW_ATE_lo_user = 0x80, + DW_ATE_hi_user = 0xff +}; + +enum DecimalSign : uint8_t { + DW_DS_unsigned = 0x01, + DW_DS_leading_overpunch = 0x02, + DW_DS_trailing_overpunch = 0x03, + DW_DS_leading_separate = 0x04, + DW_DS_trailing_separate = 0x05 +}; + +enum Endianity : uint8_t { + DW_END_default = 0x00, + DW_END_big = 0x01, + DW_END_little = 0x02, + DW_END_lo_user = 0x40, + DW_END_hi_user = 0xff +}; + +enum Accessibility : uint8_t { + DW_ACCESS_public = 0x01, + DW_ACCESS_protected = 0x02, + DW_ACCESS_private = 0x03 +}; + +enum Visibility : uint8_t { + DW_VIS_local = 0x01, + DW_VIS_exported = 0x02, + DW_VIS_qualified = 0x03 +}; + +enum Virtuality : uint8_t { + DW_VIRTUALITY_none = 0x00, + DW_VIRTUALITY_virtual = 0x01, + DW_VIRTUALITY_pure_virtual = 0x02 +}; + +enum Language { + DW_LANG_C89 = 0x01, + DW_LANG_C = 0x02, + DW_LANG_Ada83 = 0x03, + DW_LANG_C_plus_plus = 0x04, + DW_LANG_Cobol74 = 0x05, + DW_LANG_Cobol85 = 0x06, + DW_LANG_Fortran77 = 0x07, + DW_LANG_Fortran90 = 0x08, + DW_LANG_Pascal83 = 0x09, + DW_LANG_Modula2 = 0x0a, + DW_LANG_Java = 0x0b, + DW_LANG_C99 = 0x0c, + DW_LANG_Ada95 = 0x0d, + DW_LANG_Fortran95 = 0x0e, + DW_LANG_PLI = 0x0f, + DW_LANG_ObjC = 0x10, + DW_LANG_ObjC_plus_plus = 0x11, + DW_LANG_UPC = 0x12, + DW_LANG_D = 0x13, + DW_LANG_Python = 0x14, +#ifdef INCLUDE_DWARF5_VALUES + // Values to be added in Dwarf 5. Final value not yet specified. Values listed + // may be different than other implementations. Use with caution. + // TODO Update these values when Dwarf 5 is released. + DW_LANG_OpenCL = 0x15, + DW_LANG_Go = 0x16, + DW_LANG_Modula3 = 0x17, + DW_LANG_Haskell = 0x18, + DW_LANG_C_plus_plus_03 = 0x19, + DW_LANG_C_plus_plus_11 = 0x1a, + DW_LANG_OCaml = 0x1b, + DW_LANG_Rust = 0x1c, + DW_LANG_C11 = 0x1d, + DW_LANG_Swift = 0x1e, + DW_LANG_Julia = 0x1f, +#endif + DW_LANG_lo_user = 0x8000, + DW_LANG_hi_user = 0xffff +}; + +enum Identifier : uint8_t { + DW_ID_case_sensitive = 0x00, + DW_ID_up_case = 0x01, + DW_ID_down_case = 0x02, + DW_ID_case_insensitive = 0x03 +}; + +enum CallingConvention : uint8_t { + DW_CC_normal = 0x01, + DW_CC_program = 0x02, + DW_CC_nocall = 0x03, + DW_CC_lo_user = 0x40, + DW_CC_hi_user = 0xff +}; + +enum Inline : uint8_t { + DW_INL_not_inlined = 0x00, + DW_INL_inlined = 0x01, + DW_INL_declared_not_inlined = 0x02, + DW_INL_declared_inlined = 0x03 +}; + +enum ArrayOrdering : uint8_t { + DW_ORD_row_major = 0x00, + DW_ORD_col_major = 0x01 +}; + +enum DiscriminantList : uint8_t { + DW_DSC_label = 0x00, + DW_DSC_range = 0x01 +}; + +enum LineNumberOpcode : uint8_t { + DW_LNS_copy = 0x01, + DW_LNS_advance_pc = 0x02, + DW_LNS_advance_line = 0x03, + DW_LNS_set_file = 0x04, + DW_LNS_set_column = 0x05, + DW_LNS_negate_stmt = 0x06, + DW_LNS_set_basic_block = 0x07, + DW_LNS_const_add_pc = 0x08, + DW_LNS_fixed_advance_pc = 0x09, + DW_LNS_set_prologue_end = 0x0a, + DW_LNS_set_epilogue_begin = 0x0b, + DW_LNS_set_isa = 0x0c +}; + +enum LineNumberExtendedOpcode : uint8_t { + DW_LNE_end_sequence = 0x01, + DW_LNE_set_address = 0x02, + DW_LNE_define_file = 0x03, + DW_LNE_set_discriminator = 0x04, + DW_LNE_lo_user = 0x80, + DW_LNE_hi_user = 0xff +}; + +#ifdef INCLUDE_DWARF5_VALUES +enum LineNumberFormat : uint8_t { + // Values to be added in Dwarf 5. Final value not yet specified. Values listed + // may be different than other implementations. Use with caution. + // TODO Update these values when Dwarf 5 is released. + // + DW_LNF_path = 0x1, + DW_LNF_include_index = 0x2, + DW_LNF_timestamp = 0x3, + DW_LNF_size = 0x4, + DW_LNF_MD5 = 0x5, + DW_LNF_lo_user = 0x2000, + DW_LNF_hi_user = 0x3fff +}; +#endif + +enum MacroInfo : uint8_t { + DW_MACINFO_define = 0x01, + DW_MACINFO_undef = 0x02, + DW_MACINFO_start_file = 0x03, + DW_MACINFO_end_file = 0x04, + DW_MACINFO_vendor_ext = 0xff +}; + +#ifdef INCLUDE_DWARF5_VALUES +enum Macro : uint8_t { + // Values to be added in Dwarf 5. Final value not yet specified. Values listed + // may be different than other implementations. Use with caution. + // TODO Update these values when Dwarf 5 is released. + DW_MACRO_define = 0x01, + DW_MACRO_undef = 0x02, + DW_MACRO_start_file = 0x03, + DW_MACRO_end_file = 0x04, + DW_MACRO_define_indirect = 0x05, + DW_MACRO_undef_indirect = 0x06, + DW_MACRO_transparent_include = 0x07, + DW_MACRO_define_indirectx = 0x0b, + DW_MACRO_undef_indirectx = 0x0c, + DW_MACRO_lo_user = 0xe0, + DW_MACRO_hi_user = 0xff +}; +#endif + +const uint32_t CIE_ID_32 = 0xffffffff; +const uint64_t CIE_ID_64 = 0xffffffffffffffff; + +enum CallFrameInstruction : uint8_t { + DW_CFA_advance_loc = 0x40, + DW_CFA_offset = 0x80, + DW_CFA_restore = 0xc0, + DW_CFA_nop = 0x00, + DW_CFA_set_loc = 0x01, + DW_CFA_advance_loc1 = 0x02, + DW_CFA_advance_loc2 = 0x03, + DW_CFA_advance_loc4 = 0x04, + DW_CFA_offset_extended = 0x05, + DW_CFA_restore_extended = 0x06, + DW_CFA_undefined = 0x07, + DW_CFA_same_value = 0x08, + DW_CFA_register = 0x09, + DW_CFA_remember_state = 0x0a, + DW_CFA_restore_state = 0x0b, + DW_CFA_def_cfa = 0x0c, + DW_CFA_def_cfa_register = 0x0d, + DW_CFA_def_cfa_offset = 0x0e, + DW_CFA_def_cfa_expression = 0x0f, + DW_CFA_expression = 0x10, + DW_CFA_offset_extended_sf = 0x11, + DW_CFA_def_cfa_sf = 0x12, + DW_CFA_def_cfa_offset_sf = 0x13, + DW_CFA_val_offset = 0x14, + DW_CFA_val_offset_sf = 0x15, + DW_CFA_val_expression = 0x16, + DW_CFA_lo_user = 0x1c, + DW_CFA_hi_user = 0x3f +}; + +enum ExceptionHeaderValueFormat : uint8_t { + DW_EH_PE_native = 0x00, + DW_EH_PE_uleb128 = 0x01, + DW_EH_PE_udata2 = 0x02, + DW_EH_PE_udata4 = 0x03, + DW_EH_PE_udata8 = 0x04, + DW_EH_PE_sleb128 = 0x09, + DW_EH_PE_sdata2 = 0x0A, + DW_EH_PE_sdata4 = 0x0B, + DW_EH_PE_sdata8 = 0x0C, + DW_EH_PE_omit = 0xFF, +}; + +enum ExceptionHeaderValueApplication : uint8_t { + DW_EH_PE_absptr = 0x00, + DW_EH_PE_pcrel = 0x10, + DW_EH_PE_textrel = 0x20, + DW_EH_PE_datarel = 0x30, + DW_EH_PE_funcrel = 0x40, + DW_EH_PE_aligned = 0x50, +}; + +enum CFIFormat : uint8_t { + // This is the original format as defined by the specification. + // It is used for the .debug_frame section. + DW_DEBUG_FRAME_FORMAT, + // Slightly modified format used for the .eh_frame section. + DW_EH_FRAME_FORMAT +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_ diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc new file mode 100644 index 000000000..2ba3af5e1 --- /dev/null +++ b/compiler/debug/dwarf/dwarf_test.cc @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dwarf_test.h" + +#include "debug/dwarf/debug_frame_opcode_writer.h" +#include "debug/dwarf/debug_info_entry_writer.h" +#include "debug/dwarf/debug_line_opcode_writer.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/headers.h" +#include "gtest/gtest.h" + +namespace art { +namespace dwarf { + +// Run the tests only on host since we need objdump. +#ifndef __ANDROID__ + +constexpr CFIFormat kCFIFormat = DW_DEBUG_FRAME_FORMAT; + +TEST_F(DwarfTest, DebugFrame) { + const bool is64bit = false; + + // Pick offset value which would catch Uleb vs Sleb errors. + const int offset = 40000; + ASSERT_EQ(UnsignedLeb128Size(offset / 4), 2u); + ASSERT_EQ(SignedLeb128Size(offset / 4), 3u); + DW_CHECK("Data alignment factor: -4"); + const Reg reg(6); + + // Test the opcodes in the order mentioned in the spec. + // There are usually several encoding variations of each opcode. + DebugFrameOpCodeWriter<> opcodes; + DW_CHECK("FDE"); + int pc = 0; + for (int i : {0, 1, 0x3F, 0x40, 0xFF, 0x100, 0xFFFF, 0x10000}) { + pc += i; + opcodes.AdvancePC(pc); + } + DW_CHECK_NEXT("DW_CFA_advance_loc: 1 to 01000001"); + DW_CHECK_NEXT("DW_CFA_advance_loc: 63 to 01000040"); + DW_CHECK_NEXT("DW_CFA_advance_loc1: 64 to 01000080"); + DW_CHECK_NEXT("DW_CFA_advance_loc1: 255 to 0100017f"); + DW_CHECK_NEXT("DW_CFA_advance_loc2: 256 to 0100027f"); + DW_CHECK_NEXT("DW_CFA_advance_loc2: 65535 to 0101027e"); + DW_CHECK_NEXT("DW_CFA_advance_loc4: 65536 to 0102027e"); + opcodes.DefCFA(reg, offset); + DW_CHECK_NEXT("DW_CFA_def_cfa: r6 (esi) ofs 40000"); + opcodes.DefCFA(reg, -offset); + DW_CHECK_NEXT("DW_CFA_def_cfa_sf: r6 (esi) ofs -40000"); + opcodes.DefCFARegister(reg); + DW_CHECK_NEXT("DW_CFA_def_cfa_register: r6 (esi)"); + opcodes.DefCFAOffset(offset); + DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 40000"); + opcodes.DefCFAOffset(-offset); + DW_CHECK_NEXT("DW_CFA_def_cfa_offset_sf: -40000"); + uint8_t expr[] = { 0 }; + opcodes.DefCFAExpression(expr, arraysize(expr)); + DW_CHECK_NEXT("DW_CFA_def_cfa_expression"); + opcodes.Undefined(reg); + DW_CHECK_NEXT("DW_CFA_undefined: r6 (esi)"); + opcodes.SameValue(reg); + DW_CHECK_NEXT("DW_CFA_same_value: r6 (esi)"); + opcodes.Offset(Reg(0x3F), -offset); + // Bad register likely means that it does not exist on x86, + // but we want to test high register numbers anyway. + DW_CHECK_NEXT("DW_CFA_offset: bad register: r63 at cfa-40000"); + opcodes.Offset(Reg(0x40), -offset); + DW_CHECK_NEXT("DW_CFA_offset_extended: bad register: r64 at cfa-40000"); + opcodes.Offset(Reg(0x40), offset); + DW_CHECK_NEXT("DW_CFA_offset_extended_sf: bad register: r64 at cfa+40000"); + opcodes.ValOffset(reg, -offset); + DW_CHECK_NEXT("DW_CFA_val_offset: r6 (esi) at cfa-40000"); + opcodes.ValOffset(reg, offset); + DW_CHECK_NEXT("DW_CFA_val_offset_sf: r6 (esi) at cfa+40000"); + opcodes.Register(reg, Reg(1)); + DW_CHECK_NEXT("DW_CFA_register: r6 (esi) in r1 (ecx)"); + opcodes.Expression(reg, expr, arraysize(expr)); + DW_CHECK_NEXT("DW_CFA_expression: r6 (esi)"); + opcodes.ValExpression(reg, expr, arraysize(expr)); + DW_CHECK_NEXT("DW_CFA_val_expression: r6 (esi)"); + opcodes.Restore(Reg(0x3F)); + DW_CHECK_NEXT("DW_CFA_restore: bad register: r63"); + opcodes.Restore(Reg(0x40)); + DW_CHECK_NEXT("DW_CFA_restore_extended: bad register: r64"); + opcodes.Restore(reg); + DW_CHECK_NEXT("DW_CFA_restore: r6 (esi)"); + opcodes.RememberState(); + DW_CHECK_NEXT("DW_CFA_remember_state"); + opcodes.RestoreState(); + DW_CHECK_NEXT("DW_CFA_restore_state"); + opcodes.Nop(); + DW_CHECK_NEXT("DW_CFA_nop"); + + // Also test helpers. + opcodes.DefCFA(Reg(4), 100); // ESP + DW_CHECK_NEXT("DW_CFA_def_cfa: r4 (esp) ofs 100"); + opcodes.AdjustCFAOffset(8); + DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 108"); + opcodes.RelOffset(Reg(0), 0); // push R0 + DW_CHECK_NEXT("DW_CFA_offset: r0 (eax) at cfa-108"); + opcodes.RelOffset(Reg(1), 4); // push R1 + DW_CHECK_NEXT("DW_CFA_offset: r1 (ecx) at cfa-104"); + opcodes.RelOffsetForMany(Reg(2), 8, 1 | (1 << 3), 4); // push R2 and R5 + DW_CHECK_NEXT("DW_CFA_offset: r2 (edx) at cfa-100"); + DW_CHECK_NEXT("DW_CFA_offset: r5 (ebp) at cfa-96"); + opcodes.RestoreMany(Reg(2), 1 | (1 << 3)); // pop R2 and R5 + DW_CHECK_NEXT("DW_CFA_restore: r2 (edx)"); + DW_CHECK_NEXT("DW_CFA_restore: r5 (ebp)"); + + DebugFrameOpCodeWriter<> initial_opcodes; + WriteCIE(is64bit, Reg(is64bit ? 16 : 8), + initial_opcodes, kCFIFormat, &debug_frame_data_); + std::vector debug_frame_patches; + std::vector expected_patches { 28 }; // NOLINT + WriteFDE(is64bit, 0, 0, 0x01000000, 0x01000000, ArrayRef(*opcodes.data()), + kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); + + EXPECT_EQ(expected_patches, debug_frame_patches); + CheckObjdumpOutput(is64bit, "-W"); +} + +TEST_F(DwarfTest, DebugFrame64) { + constexpr bool is64bit = true; + DebugFrameOpCodeWriter<> initial_opcodes; + WriteCIE(is64bit, Reg(16), + initial_opcodes, kCFIFormat, &debug_frame_data_); + DebugFrameOpCodeWriter<> opcodes; + std::vector debug_frame_patches; + std::vector expected_patches { 32 }; // NOLINT + WriteFDE(is64bit, 0, 0, 0x0100000000000000, 0x0200000000000000, + ArrayRef(*opcodes.data()), + kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); + DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000"); + + EXPECT_EQ(expected_patches, debug_frame_patches); + CheckObjdumpOutput(is64bit, "-W"); +} + +// Test x86_64 register mapping. It is the only non-trivial architecture. +// ARM, X86, and Mips have: dwarf_reg = art_reg + constant. +TEST_F(DwarfTest, x86_64_RegisterMapping) { + constexpr bool is64bit = true; + DebugFrameOpCodeWriter<> opcodes; + for (int i = 0; i < 16; i++) { + opcodes.RelOffset(Reg::X86_64Core(i), 0); + } + DW_CHECK("FDE"); + DW_CHECK_NEXT("DW_CFA_offset: r0 (rax)"); + DW_CHECK_NEXT("DW_CFA_offset: r2 (rcx)"); + DW_CHECK_NEXT("DW_CFA_offset: r1 (rdx)"); + DW_CHECK_NEXT("DW_CFA_offset: r3 (rbx)"); + DW_CHECK_NEXT("DW_CFA_offset: r7 (rsp)"); + DW_CHECK_NEXT("DW_CFA_offset: r6 (rbp)"); + DW_CHECK_NEXT("DW_CFA_offset: r4 (rsi)"); + DW_CHECK_NEXT("DW_CFA_offset: r5 (rdi)"); + DW_CHECK_NEXT("DW_CFA_offset: r8 (r8)"); + DW_CHECK_NEXT("DW_CFA_offset: r9 (r9)"); + DW_CHECK_NEXT("DW_CFA_offset: r10 (r10)"); + DW_CHECK_NEXT("DW_CFA_offset: r11 (r11)"); + DW_CHECK_NEXT("DW_CFA_offset: r12 (r12)"); + DW_CHECK_NEXT("DW_CFA_offset: r13 (r13)"); + DW_CHECK_NEXT("DW_CFA_offset: r14 (r14)"); + DW_CHECK_NEXT("DW_CFA_offset: r15 (r15)"); + DebugFrameOpCodeWriter<> initial_opcodes; + WriteCIE(is64bit, Reg(16), + initial_opcodes, kCFIFormat, &debug_frame_data_); + std::vector debug_frame_patches; + WriteFDE(is64bit, 0, 0, 0x0100000000000000, 0x0200000000000000, + ArrayRef(*opcodes.data()), + kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); + + CheckObjdumpOutput(is64bit, "-W"); +} + +TEST_F(DwarfTest, DebugLine) { + const bool is64bit = false; + const int code_factor_bits = 1; + DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits); + + std::vector include_directories; + include_directories.push_back("/path/to/source"); + DW_CHECK("/path/to/source"); + + std::vector files { + { "file0.c", 0, 1000, 2000 }, + { "file1.c", 1, 1000, 2000 }, + { "file2.c", 1, 1000, 2000 }, + }; + DW_CHECK("1\t0\t1000\t2000\tfile0.c"); + DW_CHECK_NEXT("2\t1\t1000\t2000\tfile1.c"); + DW_CHECK_NEXT("3\t1\t1000\t2000\tfile2.c"); + + DW_CHECK("Line Number Statements"); + opcodes.SetAddress(0x01000000); + DW_CHECK_NEXT("Extended opcode 2: set Address to 0x1000000"); + opcodes.AddRow(); + DW_CHECK_NEXT("Copy"); + opcodes.AdvancePC(0x01000100); + DW_CHECK_NEXT("Advance PC by 256 to 0x1000100"); + opcodes.SetFile(2); + DW_CHECK_NEXT("Set File Name to entry 2 in the File Name Table"); + opcodes.AdvanceLine(3); + DW_CHECK_NEXT("Advance Line by 2 to 3"); + opcodes.SetColumn(4); + DW_CHECK_NEXT("Set column to 4"); + opcodes.SetIsStmt(true); + DW_CHECK_NEXT("Set is_stmt to 1"); + opcodes.SetIsStmt(false); + DW_CHECK_NEXT("Set is_stmt to 0"); + opcodes.SetBasicBlock(); + DW_CHECK_NEXT("Set basic block"); + opcodes.SetPrologueEnd(); + DW_CHECK_NEXT("Set prologue_end to true"); + opcodes.SetEpilogueBegin(); + DW_CHECK_NEXT("Set epilogue_begin to true"); + opcodes.SetISA(5); + DW_CHECK_NEXT("Set ISA to 5"); + opcodes.EndSequence(); + DW_CHECK_NEXT("Extended opcode 1: End of Sequence"); + opcodes.DefineFile("file.c", 0, 1000, 2000); + DW_CHECK_NEXT("Extended opcode 3: define new File Table entry"); + DW_CHECK_NEXT("Entry\tDir\tTime\tSize\tName"); + DW_CHECK_NEXT("1\t0\t1000\t2000\tfile.c"); + + std::vector debug_line_patches; + std::vector expected_patches { 87 }; // NOLINT + WriteDebugLineTable(include_directories, files, opcodes, + 0, &debug_line_data_, &debug_line_patches); + + EXPECT_EQ(expected_patches, debug_line_patches); + CheckObjdumpOutput(is64bit, "-W"); +} + +// DWARF has special one byte codes which advance PC and line at the same time. +TEST_F(DwarfTest, DebugLineSpecialOpcodes) { + const bool is64bit = false; + const int code_factor_bits = 1; + uint32_t pc = 0x01000000; + int line = 1; + DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits); + opcodes.SetAddress(pc); + size_t num_rows = 0; + DW_CHECK("Line Number Statements:"); + DW_CHECK("Special opcode"); + DW_CHECK("Advance PC by constant"); + DW_CHECK("Decoded dump of debug contents of section .debug_line:"); + DW_CHECK("Line number Starting address"); + for (int addr_delta = 0; addr_delta < 80; addr_delta += 2) { + for (int line_delta = 16; line_delta >= -16; --line_delta) { + pc += addr_delta; + line += line_delta; + opcodes.AddRow(pc, line); + num_rows++; + ASSERT_EQ(opcodes.CurrentAddress(), pc); + ASSERT_EQ(opcodes.CurrentLine(), line); + char expected[1024]; + sprintf(expected, "%i 0x%x", line, pc); + DW_CHECK_NEXT(expected); + } + } + EXPECT_LT(opcodes.data()->size(), num_rows * 3); + + std::vector directories; + std::vector files { { "file.c", 0, 1000, 2000 } }; // NOLINT + std::vector debug_line_patches; + WriteDebugLineTable(directories, files, opcodes, + 0, &debug_line_data_, &debug_line_patches); + + CheckObjdumpOutput(is64bit, "-W -WL"); +} + +TEST_F(DwarfTest, DebugInfo) { + constexpr bool is64bit = false; + DebugAbbrevWriter<> debug_abbrev(&debug_abbrev_data_); + DebugInfoEntryWriter<> info(is64bit, &debug_abbrev); + DW_CHECK("Contents of the .debug_info section:"); + info.StartTag(dwarf::DW_TAG_compile_unit); + DW_CHECK("Abbrev Number: 1 (DW_TAG_compile_unit)"); + info.WriteStrp(dwarf::DW_AT_producer, "Compiler name", &debug_str_data_); + DW_CHECK_NEXT("DW_AT_producer : (indirect string, offset: 0x0): Compiler name"); + info.WriteAddr(dwarf::DW_AT_low_pc, 0x01000000); + DW_CHECK_NEXT("DW_AT_low_pc : 0x1000000"); + info.WriteAddr(dwarf::DW_AT_high_pc, 0x02000000); + DW_CHECK_NEXT("DW_AT_high_pc : 0x2000000"); + info.StartTag(dwarf::DW_TAG_subprogram); + DW_CHECK("Abbrev Number: 2 (DW_TAG_subprogram)"); + info.WriteStrp(dwarf::DW_AT_name, "Foo", &debug_str_data_); + DW_CHECK_NEXT("DW_AT_name : (indirect string, offset: 0xe): Foo"); + info.WriteAddr(dwarf::DW_AT_low_pc, 0x01010000); + DW_CHECK_NEXT("DW_AT_low_pc : 0x1010000"); + info.WriteAddr(dwarf::DW_AT_high_pc, 0x01020000); + DW_CHECK_NEXT("DW_AT_high_pc : 0x1020000"); + info.EndTag(); // DW_TAG_subprogram + info.StartTag(dwarf::DW_TAG_subprogram); + DW_CHECK("Abbrev Number: 2 (DW_TAG_subprogram)"); + info.WriteStrp(dwarf::DW_AT_name, "Bar", &debug_str_data_); + DW_CHECK_NEXT("DW_AT_name : (indirect string, offset: 0x12): Bar"); + info.WriteAddr(dwarf::DW_AT_low_pc, 0x01020000); + DW_CHECK_NEXT("DW_AT_low_pc : 0x1020000"); + info.WriteAddr(dwarf::DW_AT_high_pc, 0x01030000); + DW_CHECK_NEXT("DW_AT_high_pc : 0x1030000"); + info.EndTag(); // DW_TAG_subprogram + info.EndTag(); // DW_TAG_compile_unit + // Test that previous list was properly terminated and empty children. + info.StartTag(dwarf::DW_TAG_compile_unit); + info.EndTag(); // DW_TAG_compile_unit + + // The abbrev table is just side product, but check it as well. + DW_CHECK("Abbrev Number: 3 (DW_TAG_compile_unit)"); + DW_CHECK("Contents of the .debug_abbrev section:"); + DW_CHECK("1 DW_TAG_compile_unit [has children]"); + DW_CHECK_NEXT("DW_AT_producer DW_FORM_strp"); + DW_CHECK_NEXT("DW_AT_low_pc DW_FORM_addr"); + DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr"); + DW_CHECK("2 DW_TAG_subprogram [no children]"); + DW_CHECK_NEXT("DW_AT_name DW_FORM_strp"); + DW_CHECK_NEXT("DW_AT_low_pc DW_FORM_addr"); + DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr"); + DW_CHECK("3 DW_TAG_compile_unit [no children]"); + + std::vector debug_info_patches; + std::vector expected_patches { 16, 20, 29, 33, 42, 46 }; // NOLINT + dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info, + 0, &debug_info_data_, &debug_info_patches); + + EXPECT_EQ(expected_patches, debug_info_patches); + CheckObjdumpOutput(is64bit, "-W"); +} + +#endif // __ANDROID__ + +} // namespace dwarf +} // namespace art diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h new file mode 100644 index 000000000..e2f0a65ab --- /dev/null +++ b/compiler/debug/dwarf/dwarf_test.h @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ +#define ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "base/unix_file/fd_file.h" +#include "common_runtime_test.h" +#include "elf_builder.h" +#include "gtest/gtest.h" +#include "linker/file_output_stream.h" +#include "os.h" + +namespace art { +namespace dwarf { + +#define DW_CHECK(substring) Check(substring, false, __FILE__, __LINE__) +#define DW_CHECK_NEXT(substring) Check(substring, true, __FILE__, __LINE__) + +class DwarfTest : public CommonRuntimeTest { + public: + static constexpr bool kPrintObjdumpOutput = false; // debugging. + + struct ExpectedLine { + std::string substring; + bool next; + const char* at_file; + int at_line; + }; + + // Check that the objdump output contains given output. + // If next is true, it must be the next line. Otherwise lines are skipped. + void Check(const char* substr, bool next, const char* at_file, int at_line) { + expected_lines_.push_back(ExpectedLine {substr, next, at_file, at_line}); + } + + // Pretty-print the generated DWARF data using objdump. + template + std::vector Objdump(const char* args) { + // Write simple elf file with just the DWARF sections. + InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86; + ScratchFile file; + FileOutputStream output_stream(file.GetFile()); + ElfBuilder builder(isa, nullptr, &output_stream); + builder.Start(); + if (!debug_info_data_.empty()) { + builder.WriteSection(".debug_info", &debug_info_data_); + } + if (!debug_abbrev_data_.empty()) { + builder.WriteSection(".debug_abbrev", &debug_abbrev_data_); + } + if (!debug_str_data_.empty()) { + builder.WriteSection(".debug_str", &debug_str_data_); + } + if (!debug_line_data_.empty()) { + builder.WriteSection(".debug_line", &debug_line_data_); + } + if (!debug_frame_data_.empty()) { + builder.WriteSection(".debug_frame", &debug_frame_data_); + } + builder.End(); + EXPECT_TRUE(builder.Good()); + + // Read the elf file back using objdump. + std::vector lines; + std::string cmd = GetAndroidHostToolsDir(); + cmd = cmd + "objdump " + args + " " + file.GetFilename() + " 2>&1"; + FILE* output = popen(cmd.data(), "r"); + char buffer[1024]; + const char* line; + while ((line = fgets(buffer, sizeof(buffer), output)) != nullptr) { + if (kPrintObjdumpOutput) { + printf("%s", line); + } + if (line[0] != '\0' && line[0] != '\n') { + EXPECT_TRUE(strstr(line, "objdump: Error:") == nullptr) << line; + EXPECT_TRUE(strstr(line, "objdump: Warning:") == nullptr) << line; + std::string str(line); + if (str.back() == '\n') { + str.pop_back(); + } + lines.push_back(str); + } + } + pclose(output); + return lines; + } + + std::vector Objdump(bool is64bit, const char* args) { + if (is64bit) { + return Objdump(args); + } else { + return Objdump(args); + } + } + + // Compare objdump output to the recorded checks. + void CheckObjdumpOutput(bool is64bit, const char* args) { + std::vector actual_lines = Objdump(is64bit, args); + auto actual_line = actual_lines.begin(); + for (const ExpectedLine& expected_line : expected_lines_) { + const std::string& substring = expected_line.substring; + if (actual_line == actual_lines.end()) { + ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << + "Expected '" << substring << "'.\n" << + "Seen end of output."; + } else if (expected_line.next) { + if (actual_line->find(substring) == std::string::npos) { + ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << + "Expected '" << substring << "'.\n" << + "Seen '" << actual_line->data() << "'."; + } else { + // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data()); + } + actual_line++; + } else { + bool found = false; + for (auto it = actual_line; it < actual_lines.end(); it++) { + if (it->find(substring) != std::string::npos) { + actual_line = it; + found = true; + break; + } + } + if (!found) { + ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << + "Expected '" << substring << "'.\n" << + "Not found anywhere in the rest of the output."; + } else { + // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data()); + actual_line++; + } + } + } + } + + // Buffers which are going to assembled into ELF file and passed to objdump. + std::vector debug_frame_data_; + std::vector debug_info_data_; + std::vector debug_abbrev_data_; + std::vector debug_str_data_; + std::vector debug_line_data_; + + // The expected output of objdump. + std::vector expected_lines_; +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ diff --git a/compiler/debug/dwarf/expression.h b/compiler/debug/dwarf/expression.h new file mode 100644 index 000000000..fafc0462d --- /dev/null +++ b/compiler/debug/dwarf/expression.h @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_ +#define ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_ + +#include +#include + +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/writer.h" + +namespace art { +namespace dwarf { + +// Writer for DWARF expressions which are used in .debug_info and .debug_loc sections. +// See the DWARF specification for the precise meaning of the opcodes. +// If multiple equivalent encodings are possible, it will choose the most compact one. +// The writer is not exhaustive - it only implements opcodes we have needed so far. +class Expression : private Writer<> { + public: + using Writer<>::data; + using Writer<>::size; + + // Push signed integer on the stack. + void WriteOpConsts(int32_t value) { + if (0 <= value && value < 32) { + PushUint8(DW_OP_lit0 + value); + } else { + PushUint8(DW_OP_consts); + PushSleb128(value); + } + } + + // Push unsigned integer on the stack. + void WriteOpConstu(uint32_t value) { + if (value < 32) { + PushUint8(DW_OP_lit0 + value); + } else { + PushUint8(DW_OP_constu); + PushUleb128(value); + } + } + + // Variable is stored in given register. + void WriteOpReg(uint32_t dwarf_reg_num) { + if (dwarf_reg_num < 32) { + PushUint8(DW_OP_reg0 + dwarf_reg_num); + } else { + PushUint8(DW_OP_regx); + PushUleb128(dwarf_reg_num); + } + } + + // Variable is stored on stack. Also see DW_AT_frame_base. + void WriteOpFbreg(int32_t stack_offset) { + PushUint8(DW_OP_fbreg); + PushSleb128(stack_offset); + } + + // The variable is stored in multiple locations (pieces). + void WriteOpPiece(uint32_t num_bytes) { + PushUint8(DW_OP_piece); + PushUleb128(num_bytes); + } + + // Loads 32-bit or 64-bit value depending on architecture. + void WriteOpDeref() { PushUint8(DW_OP_deref); } + + // Loads value of given byte size. + void WriteOpDerefSize(uint8_t num_bytes) { + PushUint8(DW_OP_deref_size); + PushUint8(num_bytes); + } + + // Pop two values and push their sum. + void WriteOpPlus() { PushUint8(DW_OP_plus); } + + // Add constant value to value on top of stack. + void WriteOpPlusUconst(uint32_t offset) { + PushUint8(DW_OP_plus_uconst); + PushUleb128(offset); + } + + // Negate top of stack. + void WriteOpNeg() { PushUint8(DW_OP_neg); } + + // Pop two values and push their bitwise-AND. + void WriteOpAnd() { PushUint8(DW_OP_and); } + + // Push stack base pointer as determined from .debug_frame. + void WriteOpCallFrameCfa() { PushUint8(DW_OP_call_frame_cfa); } + + // Push address of the variable we are working with. + void WriteOpPushObjectAddress() { PushUint8(DW_OP_push_object_address); } + + // Return the top stack as the value of the variable. + // Otherwise, the top of stack is the variable's location. + void WriteOpStackValue() { PushUint8(DW_OP_stack_value); } + + explicit Expression(std::vector* buffer) : Writer<>(buffer) { + buffer->clear(); + } +}; +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_ diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h new file mode 100644 index 000000000..146d9fddf --- /dev/null +++ b/compiler/debug/dwarf/headers.h @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_HEADERS_H_ +#define ART_COMPILER_DEBUG_DWARF_HEADERS_H_ + +#include + +#include "debug/dwarf/debug_frame_opcode_writer.h" +#include "debug/dwarf/debug_info_entry_writer.h" +#include "debug/dwarf/debug_line_opcode_writer.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/register.h" +#include "debug/dwarf/writer.h" +#include "utils/array_ref.h" + +namespace art { +namespace dwarf { + +// Note that all headers start with 32-bit length. +// DWARF also supports 64-bit lengths, but we never use that. +// It is intended to support very large debug sections (>4GB), +// and compilers are expected *not* to use it by default. +// In particular, it is not related to machine architecture. + +// Write common information entry (CIE) to .debug_frame or .eh_frame section. +template +void WriteCIE(bool is64bit, + Reg return_address_register, + const DebugFrameOpCodeWriter& opcodes, + CFIFormat format, + std::vector* buffer) { + static_assert(std::is_same::value, "Invalid value type"); + + Writer<> writer(buffer); + size_t cie_header_start_ = writer.data()->size(); + writer.PushUint32(0); // Length placeholder. + writer.PushUint32((format == DW_EH_FRAME_FORMAT) ? 0 : 0xFFFFFFFF); // CIE id. + writer.PushUint8(1); // Version. + writer.PushString("zR"); + writer.PushUleb128(DebugFrameOpCodeWriter::kCodeAlignmentFactor); + writer.PushSleb128(DebugFrameOpCodeWriter::kDataAlignmentFactor); + writer.PushUleb128(return_address_register.num()); // ubyte in DWARF2. + writer.PushUleb128(1); // z: Augmentation data size. + if (is64bit) { + if (format == DW_EH_FRAME_FORMAT) { + writer.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata8); // R: Pointer encoding. + } else { + DCHECK(format == DW_DEBUG_FRAME_FORMAT); + writer.PushUint8(DW_EH_PE_absptr | DW_EH_PE_udata8); // R: Pointer encoding. + } + } else { + if (format == DW_EH_FRAME_FORMAT) { + writer.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata4); // R: Pointer encoding. + } else { + DCHECK(format == DW_DEBUG_FRAME_FORMAT); + writer.PushUint8(DW_EH_PE_absptr | DW_EH_PE_udata4); // R: Pointer encoding. + } + } + writer.PushData(opcodes.data()); + writer.Pad(is64bit ? 8 : 4); + writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4); +} + +// Write frame description entry (FDE) to .debug_frame or .eh_frame section. +inline +void WriteFDE(bool is64bit, + uint64_t section_address, // Absolute address of the section. + uint64_t cie_address, // Absolute address of last CIE. + uint64_t code_address, + uint64_t code_size, + const ArrayRef& opcodes, + CFIFormat format, + uint64_t buffer_address, // Address of buffer in linked application. + std::vector* buffer, + std::vector* patch_locations) { + CHECK_GE(cie_address, section_address); + CHECK_GE(buffer_address, section_address); + + Writer<> writer(buffer); + size_t fde_header_start = writer.data()->size(); + writer.PushUint32(0); // Length placeholder. + if (format == DW_EH_FRAME_FORMAT) { + uint32_t cie_pointer = (buffer_address + buffer->size()) - cie_address; + writer.PushUint32(cie_pointer); + } else { + DCHECK(format == DW_DEBUG_FRAME_FORMAT); + uint32_t cie_pointer = cie_address - section_address; + writer.PushUint32(cie_pointer); + } + if (format == DW_EH_FRAME_FORMAT) { + // .eh_frame encodes the location as relative address. + code_address -= buffer_address + buffer->size(); + } else { + DCHECK(format == DW_DEBUG_FRAME_FORMAT); + // Relocate code_address if it has absolute value. + patch_locations->push_back(buffer_address + buffer->size() - section_address); + } + if (is64bit) { + writer.PushUint64(code_address); + writer.PushUint64(code_size); + } else { + writer.PushUint32(code_address); + writer.PushUint32(code_size); + } + writer.PushUleb128(0); // Augmentation data size. + writer.PushData(opcodes.data(), opcodes.size()); + writer.Pad(is64bit ? 8 : 4); + writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4); +} + +// Write compilation unit (CU) to .debug_info section. +template +void WriteDebugInfoCU(uint32_t debug_abbrev_offset, + const DebugInfoEntryWriter& entries, + size_t debug_info_offset, // offset from start of .debug_info. + std::vector* debug_info, + std::vector* debug_info_patches) { + static_assert(std::is_same::value, "Invalid value type"); + + Writer<> writer(debug_info); + size_t start = writer.data()->size(); + writer.PushUint32(0); // Length placeholder. + writer.PushUint16(4); // Version. + writer.PushUint32(debug_abbrev_offset); + writer.PushUint8(entries.Is64bit() ? 8 : 4); + size_t entries_offset = writer.data()->size(); + DCHECK_EQ(entries_offset, DebugInfoEntryWriter::kCompilationUnitHeaderSize); + writer.PushData(entries.data()); + writer.UpdateUint32(start, writer.data()->size() - start - 4); + // Copy patch locations and make them relative to .debug_info section. + for (uintptr_t patch_location : entries.GetPatchLocations()) { + debug_info_patches->push_back(debug_info_offset + entries_offset + patch_location); + } +} + +struct FileEntry { + std::string file_name; + int directory_index; + int modification_time; + int file_size; +}; + +// Write line table to .debug_line section. +template +void WriteDebugLineTable(const std::vector& include_directories, + const std::vector& files, + const DebugLineOpCodeWriter& opcodes, + size_t debug_line_offset, // offset from start of .debug_line. + std::vector* debug_line, + std::vector* debug_line_patches) { + static_assert(std::is_same::value, "Invalid value type"); + + Writer<> writer(debug_line); + size_t header_start = writer.data()->size(); + writer.PushUint32(0); // Section-length placeholder. + writer.PushUint16(3); // .debug_line version. + size_t header_length_pos = writer.data()->size(); + writer.PushUint32(0); // Header-length placeholder. + writer.PushUint8(1 << opcodes.GetCodeFactorBits()); + writer.PushUint8(DebugLineOpCodeWriter::kDefaultIsStmt ? 1 : 0); + writer.PushInt8(DebugLineOpCodeWriter::kLineBase); + writer.PushUint8(DebugLineOpCodeWriter::kLineRange); + writer.PushUint8(DebugLineOpCodeWriter::kOpcodeBase); + static const int opcode_lengths[DebugLineOpCodeWriter::kOpcodeBase] = { + 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 }; + for (int i = 1; i < DebugLineOpCodeWriter::kOpcodeBase; i++) { + writer.PushUint8(opcode_lengths[i]); + } + for (const std::string& directory : include_directories) { + writer.PushData(directory.data(), directory.size() + 1); + } + writer.PushUint8(0); // Terminate include_directories list. + for (const FileEntry& file : files) { + writer.PushData(file.file_name.data(), file.file_name.size() + 1); + writer.PushUleb128(file.directory_index); + writer.PushUleb128(file.modification_time); + writer.PushUleb128(file.file_size); + } + writer.PushUint8(0); // Terminate file list. + writer.UpdateUint32(header_length_pos, writer.data()->size() - header_length_pos - 4); + size_t opcodes_offset = writer.data()->size(); + writer.PushData(opcodes.data()); + writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4); + // Copy patch locations and make them relative to .debug_line section. + for (uintptr_t patch_location : opcodes.GetPatchLocations()) { + debug_line_patches->push_back(debug_line_offset + opcodes_offset + patch_location); + } +} + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_HEADERS_H_ diff --git a/compiler/debug/dwarf/register.h b/compiler/debug/dwarf/register.h new file mode 100644 index 000000000..24bacac29 --- /dev/null +++ b/compiler/debug/dwarf/register.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_REGISTER_H_ +#define ART_COMPILER_DEBUG_DWARF_REGISTER_H_ + +namespace art { +namespace dwarf { + +// Represents DWARF register. +class Reg { + public: + explicit Reg(int reg_num) : num_(reg_num) { } + int num() const { return num_; } + + // TODO: Arm S0–S31 register mapping is obsolescent. + // We should use VFP-v3/Neon D0-D31 mapping instead. + // However, D0 is aliased to pair of S0 and S1, so using that + // mapping we cannot easily say S0 is spilled and S1 is not. + // There are ways around this in DWARF but they are complex. + // It would be much simpler to always spill whole D registers. + // Arm64 mapping is correct since we already do this there. + // libunwind might struggle with the new mapping as well. + + static Reg ArmCore(int num) { return Reg(num); } // R0-R15. + static Reg ArmFp(int num) { return Reg(64 + num); } // S0–S31. + static Reg ArmDp(int num) { return Reg(256 + num); } // D0–D31. + static Reg Arm64Core(int num) { return Reg(num); } // X0-X31. + static Reg Arm64Fp(int num) { return Reg(64 + num); } // V0-V31. + static Reg MipsCore(int num) { return Reg(num); } + static Reg Mips64Core(int num) { return Reg(num); } + static Reg MipsFp(int num) { return Reg(32 + num); } + static Reg Mips64Fp(int num) { return Reg(32 + num); } + static Reg X86Core(int num) { return Reg(num); } + static Reg X86Fp(int num) { return Reg(21 + num); } + static Reg X86_64Core(int num) { + static const int map[8] = {0, 2, 1, 3, 7, 6, 4, 5}; + return Reg(num < 8 ? map[num] : num); + } + static Reg X86_64Fp(int num) { return Reg(17 + num); } + + private: + int num_; +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_REGISTER_H_ diff --git a/compiler/debug/dwarf/writer.h b/compiler/debug/dwarf/writer.h new file mode 100644 index 000000000..95912ad6c --- /dev/null +++ b/compiler/debug/dwarf/writer.h @@ -0,0 +1,182 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_DWARF_WRITER_H_ +#define ART_COMPILER_DEBUG_DWARF_WRITER_H_ + +#include +#include +#include "base/bit_utils.h" +#include "base/logging.h" +#include "leb128.h" + +namespace art { +namespace dwarf { + +// The base class for all DWARF writers. +template > +class Writer { + static_assert(std::is_same::value, "Invalid value type"); + + public: + void PushUint8(int value) { + DCHECK_GE(value, 0); + DCHECK_LE(value, UINT8_MAX); + data_->push_back(value & 0xff); + } + + void PushUint16(int value) { + DCHECK_GE(value, 0); + DCHECK_LE(value, UINT16_MAX); + data_->push_back((value >> 0) & 0xff); + data_->push_back((value >> 8) & 0xff); + } + + void PushUint32(uint32_t value) { + data_->push_back((value >> 0) & 0xff); + data_->push_back((value >> 8) & 0xff); + data_->push_back((value >> 16) & 0xff); + data_->push_back((value >> 24) & 0xff); + } + + void PushUint32(int value) { + DCHECK_GE(value, 0); + PushUint32(static_cast(value)); + } + + void PushUint32(uint64_t value) { + DCHECK_LE(value, UINT32_MAX); + PushUint32(static_cast(value)); + } + + void PushUint64(uint64_t value) { + data_->push_back((value >> 0) & 0xff); + data_->push_back((value >> 8) & 0xff); + data_->push_back((value >> 16) & 0xff); + data_->push_back((value >> 24) & 0xff); + data_->push_back((value >> 32) & 0xff); + data_->push_back((value >> 40) & 0xff); + data_->push_back((value >> 48) & 0xff); + data_->push_back((value >> 56) & 0xff); + } + + void PushInt8(int value) { + DCHECK_GE(value, INT8_MIN); + DCHECK_LE(value, INT8_MAX); + PushUint8(static_cast(value)); + } + + void PushInt16(int value) { + DCHECK_GE(value, INT16_MIN); + DCHECK_LE(value, INT16_MAX); + PushUint16(static_cast(value)); + } + + void PushInt32(int value) { + PushUint32(static_cast(value)); + } + + void PushInt64(int64_t value) { + PushUint64(static_cast(value)); + } + + // Variable-length encoders. + + void PushUleb128(uint32_t value) { + EncodeUnsignedLeb128(data_, value); + } + + void PushUleb128(int value) { + DCHECK_GE(value, 0); + EncodeUnsignedLeb128(data_, value); + } + + void PushSleb128(int value) { + EncodeSignedLeb128(data_, value); + } + + // Miscellaneous functions. + + void PushString(const char* value) { + data_->insert(data_->end(), value, value + strlen(value) + 1); + } + + void PushData(const uint8_t* ptr, size_t num_bytes) { + data_->insert(data_->end(), ptr, ptr + num_bytes); + } + + void PushData(const char* ptr, size_t num_bytes) { + data_->insert(data_->end(), ptr, ptr + num_bytes); + } + + void PushData(const Vector* buffer) { + data_->insert(data_->end(), buffer->begin(), buffer->end()); + } + + void UpdateUint32(size_t offset, uint32_t value) { + DCHECK_LT(offset + 3, data_->size()); + (*data_)[offset + 0] = (value >> 0) & 0xFF; + (*data_)[offset + 1] = (value >> 8) & 0xFF; + (*data_)[offset + 2] = (value >> 16) & 0xFF; + (*data_)[offset + 3] = (value >> 24) & 0xFF; + } + + void UpdateUint64(size_t offset, uint64_t value) { + DCHECK_LT(offset + 7, data_->size()); + (*data_)[offset + 0] = (value >> 0) & 0xFF; + (*data_)[offset + 1] = (value >> 8) & 0xFF; + (*data_)[offset + 2] = (value >> 16) & 0xFF; + (*data_)[offset + 3] = (value >> 24) & 0xFF; + (*data_)[offset + 4] = (value >> 32) & 0xFF; + (*data_)[offset + 5] = (value >> 40) & 0xFF; + (*data_)[offset + 6] = (value >> 48) & 0xFF; + (*data_)[offset + 7] = (value >> 56) & 0xFF; + } + + void UpdateUleb128(size_t offset, uint32_t value) { + DCHECK_LE(offset + UnsignedLeb128Size(value), data_->size()); + UpdateUnsignedLeb128(data_->data() + offset, value); + } + + void Pop() { + return data_->pop_back(); + } + + void Pad(int alignment) { + DCHECK_NE(alignment, 0); + data_->resize(RoundUp(data_->size(), alignment), 0); + } + + const Vector* data() const { + return data_; + } + + size_t size() const { + return data_->size(); + } + + explicit Writer(Vector* buffer) : data_(buffer) { } + + private: + Vector* const data_; + + DISALLOW_COPY_AND_ASSIGN(Writer); +}; + +} // namespace dwarf +} // namespace art + +#endif // ART_COMPILER_DEBUG_DWARF_WRITER_H_ diff --git a/compiler/debug/elf_compilation_unit.h b/compiler/debug/elf_compilation_unit.h new file mode 100644 index 000000000..b1d89ebeb --- /dev/null +++ b/compiler/debug/elf_compilation_unit.h @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ +#define ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ + +#include + +#include "debug/method_debug_info.h" + +namespace art { +namespace debug { + +struct ElfCompilationUnit { + std::vector methods; + size_t debug_line_offset = 0; + bool is_code_address_text_relative; // Is the address offset from start of .text section? + uint64_t code_address = std::numeric_limits::max(); + uint64_t code_end = 0; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ + diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h new file mode 100644 index 000000000..f9d33c1c3 --- /dev/null +++ b/compiler/debug/elf_debug_frame_writer.h @@ -0,0 +1,284 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ + +#include + +#include "arch/instruction_set.h" +#include "debug/dwarf/debug_frame_opcode_writer.h" +#include "debug/dwarf/dwarf_constants.h" +#include "debug/dwarf/headers.h" +#include "debug/method_debug_info.h" +#include "elf_builder.h" + +namespace art { +namespace debug { + +static void WriteCIE(InstructionSet isa, + dwarf::CFIFormat format, + std::vector* buffer) { + using Reg = dwarf::Reg; + // Scratch registers should be marked as undefined. This tells the + // debugger that its value in the previous frame is not recoverable. + bool is64bit = Is64BitInstructionSet(isa); + switch (isa) { + case kArm: + case kThumb2: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::ArmCore(13), 0); // R13(SP). + // core registers. + for (int reg = 0; reg < 13; reg++) { + if (reg < 4 || reg == 12) { + opcodes.Undefined(Reg::ArmCore(reg)); + } else { + opcodes.SameValue(Reg::ArmCore(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 32; reg++) { + if (reg < 16) { + opcodes.Undefined(Reg::ArmFp(reg)); + } else { + opcodes.SameValue(Reg::ArmFp(reg)); + } + } + auto return_reg = Reg::ArmCore(14); // R14(LR). + WriteCIE(is64bit, return_reg, opcodes, format, buffer); + return; + } + case kArm64: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::Arm64Core(31), 0); // R31(SP). + // core registers. + for (int reg = 0; reg < 30; reg++) { + if (reg < 8 || reg == 16 || reg == 17) { + opcodes.Undefined(Reg::Arm64Core(reg)); + } else { + opcodes.SameValue(Reg::Arm64Core(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 32; reg++) { + if (reg < 8 || reg >= 16) { + opcodes.Undefined(Reg::Arm64Fp(reg)); + } else { + opcodes.SameValue(Reg::Arm64Fp(reg)); + } + } + auto return_reg = Reg::Arm64Core(30); // R30(LR). + WriteCIE(is64bit, return_reg, opcodes, format, buffer); + return; + } + case kMips: + case kMips64: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::MipsCore(29), 0); // R29(SP). + // core registers. + for (int reg = 1; reg < 26; reg++) { + if (reg < 16 || reg == 24 || reg == 25) { // AT, V*, A*, T*. + opcodes.Undefined(Reg::MipsCore(reg)); + } else { + opcodes.SameValue(Reg::MipsCore(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 32; reg++) { + if (reg < 24) { + opcodes.Undefined(Reg::Mips64Fp(reg)); + } else { + opcodes.SameValue(Reg::Mips64Fp(reg)); + } + } + auto return_reg = Reg::MipsCore(31); // R31(RA). + WriteCIE(is64bit, return_reg, opcodes, format, buffer); + return; + } + case kX86: { + // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296 + constexpr bool generate_opcodes_for_x86_fp = false; + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::X86Core(4), 4); // R4(ESP). + opcodes.Offset(Reg::X86Core(8), -4); // R8(EIP). + // core registers. + for (int reg = 0; reg < 8; reg++) { + if (reg <= 3) { + opcodes.Undefined(Reg::X86Core(reg)); + } else if (reg == 4) { + // Stack pointer. + } else { + opcodes.SameValue(Reg::X86Core(reg)); + } + } + // fp registers. + if (generate_opcodes_for_x86_fp) { + for (int reg = 0; reg < 8; reg++) { + opcodes.Undefined(Reg::X86Fp(reg)); + } + } + auto return_reg = Reg::X86Core(8); // R8(EIP). + WriteCIE(is64bit, return_reg, opcodes, format, buffer); + return; + } + case kX86_64: { + dwarf::DebugFrameOpCodeWriter<> opcodes; + opcodes.DefCFA(Reg::X86_64Core(4), 8); // R4(RSP). + opcodes.Offset(Reg::X86_64Core(16), -8); // R16(RIP). + // core registers. + for (int reg = 0; reg < 16; reg++) { + if (reg == 4) { + // Stack pointer. + } else if (reg < 12 && reg != 3 && reg != 5) { // except EBX and EBP. + opcodes.Undefined(Reg::X86_64Core(reg)); + } else { + opcodes.SameValue(Reg::X86_64Core(reg)); + } + } + // fp registers. + for (int reg = 0; reg < 16; reg++) { + if (reg < 12) { + opcodes.Undefined(Reg::X86_64Fp(reg)); + } else { + opcodes.SameValue(Reg::X86_64Fp(reg)); + } + } + auto return_reg = Reg::X86_64Core(16); // R16(RIP). + WriteCIE(is64bit, return_reg, opcodes, format, buffer); + return; + } + case kNone: + break; + } + LOG(FATAL) << "Cannot write CIE frame for ISA " << isa; + UNREACHABLE(); +} + +template +void WriteCFISection(ElfBuilder* builder, + const ArrayRef& method_infos, + dwarf::CFIFormat format, + bool write_oat_patches) { + CHECK(format == dwarf::DW_DEBUG_FRAME_FORMAT || format == dwarf::DW_EH_FRAME_FORMAT); + typedef typename ElfTypes::Addr Elf_Addr; + + // The methods can be written in any order. + // Let's therefore sort them in the lexicographical order of the opcodes. + // This has no effect on its own. However, if the final .debug_frame section is + // compressed it reduces the size since similar opcodes sequences are grouped. + std::vector sorted_method_infos; + sorted_method_infos.reserve(method_infos.size()); + for (size_t i = 0; i < method_infos.size(); i++) { + if (!method_infos[i].cfi.empty() && !method_infos[i].deduped) { + sorted_method_infos.push_back(&method_infos[i]); + } + } + if (sorted_method_infos.empty()) { + return; + } + std::stable_sort( + sorted_method_infos.begin(), + sorted_method_infos.end(), + [](const MethodDebugInfo* lhs, const MethodDebugInfo* rhs) { + ArrayRef l = lhs->cfi; + ArrayRef r = rhs->cfi; + return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end()); + }); + + std::vector binary_search_table; + std::vector patch_locations; + if (format == dwarf::DW_EH_FRAME_FORMAT) { + binary_search_table.reserve(2 * sorted_method_infos.size()); + } else { + patch_locations.reserve(sorted_method_infos.size()); + } + + // Write .eh_frame/.debug_frame section. + auto* cfi_section = (format == dwarf::DW_DEBUG_FRAME_FORMAT + ? builder->GetDebugFrame() + : builder->GetEhFrame()); + { + cfi_section->Start(); + const bool is64bit = Is64BitInstructionSet(builder->GetIsa()); + const Elf_Addr cfi_address = cfi_section->GetAddress(); + const Elf_Addr cie_address = cfi_address; + Elf_Addr buffer_address = cfi_address; + std::vector buffer; // Small temporary buffer. + WriteCIE(builder->GetIsa(), format, &buffer); + cfi_section->WriteFully(buffer.data(), buffer.size()); + buffer_address += buffer.size(); + buffer.clear(); + for (const MethodDebugInfo* mi : sorted_method_infos) { + DCHECK(!mi->deduped); + DCHECK(!mi->cfi.empty()); + const Elf_Addr code_address = mi->code_address + + (mi->is_code_address_text_relative ? builder->GetText()->GetAddress() : 0); + if (format == dwarf::DW_EH_FRAME_FORMAT) { + binary_search_table.push_back(dchecked_integral_cast(code_address)); + binary_search_table.push_back(dchecked_integral_cast(buffer_address)); + } + WriteFDE(is64bit, cfi_address, cie_address, + code_address, mi->code_size, + mi->cfi, format, buffer_address, &buffer, + &patch_locations); + cfi_section->WriteFully(buffer.data(), buffer.size()); + buffer_address += buffer.size(); + buffer.clear(); + } + cfi_section->End(); + } + + if (format == dwarf::DW_EH_FRAME_FORMAT) { + auto* header_section = builder->GetEhFrameHdr(); + header_section->Start(); + uint32_t header_address = dchecked_integral_cast(header_section->GetAddress()); + // Write .eh_frame_hdr section. + std::vector buffer; + dwarf::Writer<> header(&buffer); + header.PushUint8(1); // Version. + // Encoding of .eh_frame pointer - libunwind does not honor datarel here, + // so we have to use pcrel which means relative to the pointer's location. + header.PushUint8(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4); + // Encoding of binary search table size. + header.PushUint8(dwarf::DW_EH_PE_udata4); + // Encoding of binary search table addresses - libunwind supports only this + // specific combination, which means relative to the start of .eh_frame_hdr. + header.PushUint8(dwarf::DW_EH_PE_datarel | dwarf::DW_EH_PE_sdata4); + // .eh_frame pointer + header.PushInt32(cfi_section->GetAddress() - (header_address + 4u)); + // Binary search table size (number of entries). + header.PushUint32(dchecked_integral_cast(binary_search_table.size()/2)); + header_section->WriteFully(buffer.data(), buffer.size()); + // Binary search table. + for (size_t i = 0; i < binary_search_table.size(); i++) { + // Make addresses section-relative since we know the header address now. + binary_search_table[i] -= header_address; + } + header_section->WriteFully(binary_search_table.data(), binary_search_table.size()); + header_section->End(); + } else { + if (write_oat_patches) { + builder->WritePatches(".debug_frame.oat_patches", + ArrayRef(patch_locations)); + } + } +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ + diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h new file mode 100644 index 000000000..e8e278d97 --- /dev/null +++ b/compiler/debug/elf_debug_info_writer.h @@ -0,0 +1,671 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ + +#include +#include +#include + +#include "debug/dwarf/debug_abbrev_writer.h" +#include "debug/dwarf/debug_info_entry_writer.h" +#include "debug/elf_compilation_unit.h" +#include "debug/elf_debug_loc_writer.h" +#include "debug/method_debug_info.h" +#include "dex_file-inl.h" +#include "dex_file.h" +#include "elf_builder.h" +#include "linear_alloc.h" +#include "mirror/array.h" +#include "mirror/class-inl.h" +#include "mirror/class.h" + +namespace art { +namespace debug { + +typedef std::vector LocalInfos; + +static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) { + static_cast(ctx)->push_back(entry); +} + +static std::vector GetParamNames(const MethodDebugInfo* mi) { + std::vector names; + if (mi->code_item != nullptr) { + DCHECK(mi->dex_file != nullptr); + const uint8_t* stream = mi->dex_file->GetDebugInfoStream(mi->code_item); + if (stream != nullptr) { + DecodeUnsignedLeb128(&stream); // line. + uint32_t parameters_size = DecodeUnsignedLeb128(&stream); + for (uint32_t i = 0; i < parameters_size; ++i) { + uint32_t id = DecodeUnsignedLeb128P1(&stream); + names.push_back(mi->dex_file->StringDataByIdx(id)); + } + } + } + return names; +} + +// Helper class to write .debug_info and its supporting sections. +template +class ElfDebugInfoWriter { + using Elf_Addr = typename ElfTypes::Addr; + + public: + explicit ElfDebugInfoWriter(ElfBuilder* builder) + : builder_(builder), + debug_abbrev_(&debug_abbrev_buffer_) { + } + + void Start() { + builder_->GetDebugInfo()->Start(); + } + + void End(bool write_oat_patches) { + builder_->GetDebugInfo()->End(); + if (write_oat_patches) { + builder_->WritePatches(".debug_info.oat_patches", + ArrayRef(debug_info_patches_)); + } + builder_->WriteSection(".debug_abbrev", &debug_abbrev_buffer_); + if (!debug_loc_.empty()) { + builder_->WriteSection(".debug_loc", &debug_loc_); + } + if (!debug_ranges_.empty()) { + builder_->WriteSection(".debug_ranges", &debug_ranges_); + } + } + + private: + ElfBuilder* builder_; + std::vector debug_info_patches_; + std::vector debug_abbrev_buffer_; + dwarf::DebugAbbrevWriter<> debug_abbrev_; + std::vector debug_loc_; + std::vector debug_ranges_; + + std::unordered_set defined_dex_classes_; // For CHECKs only. + + template + friend class ElfCompilationUnitWriter; +}; + +// Helper class to write one compilation unit. +// It holds helper methods and temporary state. +template +class ElfCompilationUnitWriter { + using Elf_Addr = typename ElfTypes::Addr; + + public: + explicit ElfCompilationUnitWriter(ElfDebugInfoWriter* owner) + : owner_(owner), + info_(Is64BitInstructionSet(owner_->builder_->GetIsa()), &owner->debug_abbrev_) { + } + + void Write(const ElfCompilationUnit& compilation_unit) { + CHECK(!compilation_unit.methods.empty()); + const Elf_Addr base_address = compilation_unit.is_code_address_text_relative + ? owner_->builder_->GetText()->GetAddress() + : 0; + const uint64_t cu_size = compilation_unit.code_end - compilation_unit.code_address; + using namespace dwarf; // NOLINT. For easy access to DWARF constants. + + info_.StartTag(DW_TAG_compile_unit); + info_.WriteString(DW_AT_producer, "Android dex2oat"); + info_.WriteData1(DW_AT_language, DW_LANG_Java); + info_.WriteString(DW_AT_comp_dir, "$JAVA_SRC_ROOT"); + info_.WriteAddr(DW_AT_low_pc, base_address + compilation_unit.code_address); + info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast(cu_size)); + info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset); + + const char* last_dex_class_desc = nullptr; + for (auto mi : compilation_unit.methods) { + DCHECK(mi->dex_file != nullptr); + const DexFile* dex = mi->dex_file; + const DexFile::CodeItem* dex_code = mi->code_item; + const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index); + const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method); + const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto); + const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method); + const bool is_static = (mi->access_flags & kAccStatic) != 0; + + // Enclose the method in correct class definition. + if (last_dex_class_desc != dex_class_desc) { + if (last_dex_class_desc != nullptr) { + EndClassTag(); + } + // Write reference tag for the class we are about to declare. + size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type); + type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset); + size_t type_attrib_offset = info_.size(); + info_.WriteRef4(DW_AT_type, 0); + info_.EndTag(); + // Declare the class that owns this method. + size_t class_offset = StartClassTag(dex_class_desc); + info_.UpdateUint32(type_attrib_offset, class_offset); + info_.WriteFlagPresent(DW_AT_declaration); + // Check that each class is defined only once. + bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second; + CHECK(unique) << "Redefinition of " << dex_class_desc; + last_dex_class_desc = dex_class_desc; + } + + int start_depth = info_.Depth(); + info_.StartTag(DW_TAG_subprogram); + WriteName(dex->GetMethodName(dex_method)); + info_.WriteAddr(DW_AT_low_pc, base_address + mi->code_address); + info_.WriteUdata(DW_AT_high_pc, mi->code_size); + std::vector expr_buffer; + Expression expr(&expr_buffer); + expr.WriteOpCallFrameCfa(); + info_.WriteExprLoc(DW_AT_frame_base, expr); + WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto)); + + // Decode dex register locations for all stack maps. + // It might be expensive, so do it just once and reuse the result. + std::vector dex_reg_maps; + if (mi->code_info != nullptr) { + const CodeInfo code_info(mi->code_info); + CodeInfoEncoding encoding = code_info.ExtractEncoding(); + for (size_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); ++s) { + const StackMap& stack_map = code_info.GetStackMapAt(s, encoding); + dex_reg_maps.push_back(code_info.GetDexRegisterMapOf( + stack_map, encoding, dex_code->registers_size_)); + } + } + + // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not + // guarantee order or uniqueness so it is safer to iterate over them manually. + // DecodeDebugLocalInfo might not also be available if there is no debug info. + std::vector param_names = GetParamNames(mi); + uint32_t arg_reg = 0; + if (!is_static) { + info_.StartTag(DW_TAG_formal_parameter); + WriteName("this"); + info_.WriteFlagPresent(DW_AT_artificial); + WriteLazyType(dex_class_desc); + if (dex_code != nullptr) { + // Write the stack location of the parameter. + const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; + const bool is64bitValue = false; + WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); + } + arg_reg++; + info_.EndTag(); + } + if (dex_params != nullptr) { + for (uint32_t i = 0; i < dex_params->Size(); ++i) { + info_.StartTag(DW_TAG_formal_parameter); + // Parameter names may not be always available. + if (i < param_names.size()) { + WriteName(param_names[i]); + } + // Write the type. + const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_); + WriteLazyType(type_desc); + const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J'; + if (dex_code != nullptr) { + // Write the stack location of the parameter. + const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; + WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); + } + arg_reg += is64bitValue ? 2 : 1; + info_.EndTag(); + } + if (dex_code != nullptr) { + DCHECK_EQ(arg_reg, dex_code->ins_size_); + } + } + + // Write local variables. + LocalInfos local_infos; + if (dex->DecodeDebugLocalInfo(dex_code, + is_static, + mi->dex_method_index, + LocalInfoCallback, + &local_infos)) { + for (const DexFile::LocalInfo& var : local_infos) { + if (var.reg_ < dex_code->registers_size_ - dex_code->ins_size_) { + info_.StartTag(DW_TAG_variable); + WriteName(var.name_); + WriteLazyType(var.descriptor_); + bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J'; + WriteRegLocation(mi, + dex_reg_maps, + var.reg_, + is64bitValue, + compilation_unit.code_address, + var.start_address_, + var.end_address_); + info_.EndTag(); + } + } + } + + info_.EndTag(); + CHECK_EQ(info_.Depth(), start_depth); // Balanced start/end. + } + if (last_dex_class_desc != nullptr) { + EndClassTag(); + } + FinishLazyTypes(); + CloseNamespacesAboveDepth(0); + info_.EndTag(); // DW_TAG_compile_unit + CHECK_EQ(info_.Depth(), 0); + std::vector buffer; + buffer.reserve(info_.data()->size() + KB); + const size_t offset = owner_->builder_->GetDebugInfo()->GetSize(); + // All compilation units share single table which is at the start of .debug_abbrev. + const size_t debug_abbrev_offset = 0; + WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_); + owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size()); + } + + void Write(const ArrayRef& types) SHARED_REQUIRES(Locks::mutator_lock_) { + using namespace dwarf; // NOLINT. For easy access to DWARF constants. + + info_.StartTag(DW_TAG_compile_unit); + info_.WriteString(DW_AT_producer, "Android dex2oat"); + info_.WriteData1(DW_AT_language, DW_LANG_Java); + + // Base class references to be patched at the end. + std::map base_class_references; + + // Already written declarations or definitions. + std::map class_declarations; + + std::vector expr_buffer; + for (mirror::Class* type : types) { + if (type->IsPrimitive()) { + // For primitive types the definition and the declaration is the same. + if (type->GetPrimitiveType() != Primitive::kPrimVoid) { + WriteTypeDeclaration(type->GetDescriptor(nullptr)); + } + } else if (type->IsArrayClass()) { + mirror::Class* element_type = type->GetComponentType(); + uint32_t component_size = type->GetComponentSize(); + uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value(); + uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value(); + + CloseNamespacesAboveDepth(0); // Declare in root namespace. + info_.StartTag(DW_TAG_array_type); + std::string descriptor_string; + WriteLazyType(element_type->GetDescriptor(&descriptor_string)); + WriteLinkageName(type); + info_.WriteUdata(DW_AT_data_member_location, data_offset); + info_.StartTag(DW_TAG_subrange_type); + Expression count_expr(&expr_buffer); + count_expr.WriteOpPushObjectAddress(); + count_expr.WriteOpPlusUconst(length_offset); + count_expr.WriteOpDerefSize(4); // Array length is always 32-bit wide. + info_.WriteExprLoc(DW_AT_count, count_expr); + info_.EndTag(); // DW_TAG_subrange_type. + info_.EndTag(); // DW_TAG_array_type. + } else if (type->IsInterface()) { + // Skip. Variables cannot have an interface as a dynamic type. + // We do not expose the interface information to the debugger in any way. + } else { + std::string descriptor_string; + const char* desc = type->GetDescriptor(&descriptor_string); + size_t class_offset = StartClassTag(desc); + class_declarations.emplace(type, class_offset); + + if (!type->IsVariableSize()) { + info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize()); + } + + WriteLinkageName(type); + + if (type->IsObjectClass()) { + // Generate artificial member which is used to get the dynamic type of variable. + // The run-time value of this field will correspond to linkage name of some type. + // We need to do it only once in j.l.Object since all other types inherit it. + info_.StartTag(DW_TAG_member); + WriteName(".dynamic_type"); + WriteLazyType(sizeof(uintptr_t) == 8 ? "J" : "I"); + info_.WriteFlagPresent(DW_AT_artificial); + // Create DWARF expression to get the value of the methods_ field. + Expression expr(&expr_buffer); + // The address of the object has been implicitly pushed on the stack. + // Dereference the klass_ field of Object (32-bit; possibly poisoned). + DCHECK_EQ(type->ClassOffset().Uint32Value(), 0u); + DCHECK_EQ(sizeof(mirror::HeapReference), 4u); + expr.WriteOpDerefSize(4); + if (kPoisonHeapReferences) { + expr.WriteOpNeg(); + // DWARF stack is pointer sized. Ensure that the high bits are clear. + expr.WriteOpConstu(0xFFFFFFFF); + expr.WriteOpAnd(); + } + // Add offset to the methods_ field. + expr.WriteOpPlusUconst(mirror::Class::MethodsOffset().Uint32Value()); + // Top of stack holds the location of the field now. + info_.WriteExprLoc(DW_AT_data_member_location, expr); + info_.EndTag(); // DW_TAG_member. + } + + // Base class. + mirror::Class* base_class = type->GetSuperClass(); + if (base_class != nullptr) { + info_.StartTag(DW_TAG_inheritance); + base_class_references.emplace(info_.size(), base_class); + info_.WriteRef4(DW_AT_type, 0); + info_.WriteUdata(DW_AT_data_member_location, 0); + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public); + info_.EndTag(); // DW_TAG_inheritance. + } + + // Member variables. + for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) { + ArtField* field = type->GetInstanceField(i); + info_.StartTag(DW_TAG_member); + WriteName(field->GetName()); + WriteLazyType(field->GetTypeDescriptor()); + info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value()); + uint32_t access_flags = field->GetAccessFlags(); + if (access_flags & kAccPublic) { + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public); + } else if (access_flags & kAccProtected) { + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected); + } else if (access_flags & kAccPrivate) { + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private); + } + info_.EndTag(); // DW_TAG_member. + } + + if (type->IsStringClass()) { + // Emit debug info about an artifical class member for java.lang.String which represents + // the first element of the data stored in a string instance. Consumers of the debug + // info will be able to read the content of java.lang.String based on the count (real + // field) and based on the location of this data member. + info_.StartTag(DW_TAG_member); + WriteName("value"); + // We don't support fields with C like array types so we just say its type is java char. + WriteLazyType("C"); // char. + info_.WriteUdata(DW_AT_data_member_location, + mirror::String::ValueOffset().Uint32Value()); + info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private); + info_.EndTag(); // DW_TAG_member. + } + + EndClassTag(); + } + } + + // Write base class declarations. + for (const auto& base_class_reference : base_class_references) { + size_t reference_offset = base_class_reference.first; + mirror::Class* base_class = base_class_reference.second; + const auto& it = class_declarations.find(base_class); + if (it != class_declarations.end()) { + info_.UpdateUint32(reference_offset, it->second); + } else { + // Declare base class. We can not use the standard WriteLazyType + // since we want to avoid the DW_TAG_reference_tag wrapping. + std::string tmp_storage; + const char* base_class_desc = base_class->GetDescriptor(&tmp_storage); + size_t base_class_declaration_offset = StartClassTag(base_class_desc); + info_.WriteFlagPresent(DW_AT_declaration); + WriteLinkageName(base_class); + EndClassTag(); + class_declarations.emplace(base_class, base_class_declaration_offset); + info_.UpdateUint32(reference_offset, base_class_declaration_offset); + } + } + + FinishLazyTypes(); + CloseNamespacesAboveDepth(0); + info_.EndTag(); // DW_TAG_compile_unit. + CHECK_EQ(info_.Depth(), 0); + std::vector buffer; + buffer.reserve(info_.data()->size() + KB); + const size_t offset = owner_->builder_->GetDebugInfo()->GetSize(); + // All compilation units share single table which is at the start of .debug_abbrev. + const size_t debug_abbrev_offset = 0; + WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_); + owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size()); + } + + // Write table into .debug_loc which describes location of dex register. + // The dex register might be valid only at some points and it might + // move between machine registers and stack. + void WriteRegLocation(const MethodDebugInfo* method_info, + const std::vector& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low = 0, + uint32_t dex_pc_high = 0xFFFFFFFF) { + WriteDebugLocEntry(method_info, + dex_register_maps, + vreg, + is64bitValue, + compilation_unit_code_address, + dex_pc_low, + dex_pc_high, + owner_->builder_->GetIsa(), + &info_, + &owner_->debug_loc_, + &owner_->debug_ranges_); + } + + // Linkage name uniquely identifies type. + // It is used to determine the dynamic type of objects. + // We use the methods_ field of class since it is unique and it is not moved by the GC. + void WriteLinkageName(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) { + auto* methods_ptr = type->GetMethodsPtr(); + if (methods_ptr == nullptr) { + // Some types might have no methods. Allocate empty array instead. + LinearAlloc* allocator = Runtime::Current()->GetLinearAlloc(); + void* storage = allocator->Alloc(Thread::Current(), sizeof(LengthPrefixedArray)); + methods_ptr = new (storage) LengthPrefixedArray(0); + type->SetMethodsPtr(methods_ptr, 0, 0); + DCHECK(type->GetMethodsPtr() != nullptr); + } + char name[32]; + snprintf(name, sizeof(name), "0x%" PRIXPTR, reinterpret_cast(methods_ptr)); + info_.WriteString(dwarf::DW_AT_linkage_name, name); + } + + // Some types are difficult to define as we go since they need + // to be enclosed in the right set of namespaces. Therefore we + // just define all types lazily at the end of compilation unit. + void WriteLazyType(const char* type_descriptor) { + if (type_descriptor != nullptr && type_descriptor[0] != 'V') { + lazy_types_.emplace(std::string(type_descriptor), info_.size()); + info_.WriteRef4(dwarf::DW_AT_type, 0); + } + } + + void FinishLazyTypes() { + for (const auto& lazy_type : lazy_types_) { + info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first)); + } + lazy_types_.clear(); + } + + private: + void WriteName(const char* name) { + if (name != nullptr) { + info_.WriteString(dwarf::DW_AT_name, name); + } + } + + // Convert dex type descriptor to DWARF. + // Returns offset in the compilation unit. + size_t WriteTypeDeclaration(const std::string& desc) { + using namespace dwarf; // NOLINT. For easy access to DWARF constants. + + DCHECK(!desc.empty()); + const auto& it = type_cache_.find(desc); + if (it != type_cache_.end()) { + return it->second; + } + + size_t offset; + if (desc[0] == 'L') { + // Class type. For example: Lpackage/name; + size_t class_offset = StartClassTag(desc.c_str()); + info_.WriteFlagPresent(DW_AT_declaration); + EndClassTag(); + // Reference to the class type. + offset = info_.StartTag(DW_TAG_reference_type); + info_.WriteRef(DW_AT_type, class_offset); + info_.EndTag(); + } else if (desc[0] == '[') { + // Array type. + size_t element_type = WriteTypeDeclaration(desc.substr(1)); + CloseNamespacesAboveDepth(0); // Declare in root namespace. + size_t array_type = info_.StartTag(DW_TAG_array_type); + info_.WriteFlagPresent(DW_AT_declaration); + info_.WriteRef(DW_AT_type, element_type); + info_.EndTag(); + offset = info_.StartTag(DW_TAG_reference_type); + info_.WriteRef4(DW_AT_type, array_type); + info_.EndTag(); + } else { + // Primitive types. + DCHECK_EQ(desc.size(), 1u); + + const char* name; + uint32_t encoding; + uint32_t byte_size; + switch (desc[0]) { + case 'B': + name = "byte"; + encoding = DW_ATE_signed; + byte_size = 1; + break; + case 'C': + name = "char"; + encoding = DW_ATE_UTF; + byte_size = 2; + break; + case 'D': + name = "double"; + encoding = DW_ATE_float; + byte_size = 8; + break; + case 'F': + name = "float"; + encoding = DW_ATE_float; + byte_size = 4; + break; + case 'I': + name = "int"; + encoding = DW_ATE_signed; + byte_size = 4; + break; + case 'J': + name = "long"; + encoding = DW_ATE_signed; + byte_size = 8; + break; + case 'S': + name = "short"; + encoding = DW_ATE_signed; + byte_size = 2; + break; + case 'Z': + name = "boolean"; + encoding = DW_ATE_boolean; + byte_size = 1; + break; + case 'V': + LOG(FATAL) << "Void type should not be encoded"; + UNREACHABLE(); + default: + LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\""; + UNREACHABLE(); + } + CloseNamespacesAboveDepth(0); // Declare in root namespace. + offset = info_.StartTag(DW_TAG_base_type); + WriteName(name); + info_.WriteData1(DW_AT_encoding, encoding); + info_.WriteData1(DW_AT_byte_size, byte_size); + info_.EndTag(); + } + + type_cache_.emplace(desc, offset); + return offset; + } + + // Start DW_TAG_class_type tag nested in DW_TAG_namespace tags. + // Returns offset of the class tag in the compilation unit. + size_t StartClassTag(const char* desc) { + std::string name = SetNamespaceForClass(desc); + size_t offset = info_.StartTag(dwarf::DW_TAG_class_type); + WriteName(name.c_str()); + return offset; + } + + void EndClassTag() { + info_.EndTag(); + } + + // Set the current namespace nesting to one required by the given class. + // Returns the class name with namespaces, 'L', and ';' stripped. + std::string SetNamespaceForClass(const char* desc) { + DCHECK(desc != nullptr && desc[0] == 'L'); + desc++; // Skip the initial 'L'. + size_t depth = 0; + for (const char* end; (end = strchr(desc, '/')) != nullptr; desc = end + 1, ++depth) { + // Check whether the name at this depth is already what we need. + if (depth < current_namespace_.size()) { + const std::string& name = current_namespace_[depth]; + if (name.compare(0, name.size(), desc, end - desc) == 0) { + continue; + } + } + // Otherwise we need to open a new namespace tag at this depth. + CloseNamespacesAboveDepth(depth); + info_.StartTag(dwarf::DW_TAG_namespace); + std::string name(desc, end - desc); + WriteName(name.c_str()); + current_namespace_.push_back(std::move(name)); + } + CloseNamespacesAboveDepth(depth); + return std::string(desc, strchr(desc, ';') - desc); + } + + // Close namespace tags to reach the given nesting depth. + void CloseNamespacesAboveDepth(size_t depth) { + DCHECK_LE(depth, current_namespace_.size()); + while (current_namespace_.size() > depth) { + info_.EndTag(); + current_namespace_.pop_back(); + } + } + + // For access to the ELF sections. + ElfDebugInfoWriter* owner_; + // Temporary buffer to create and store the entries. + dwarf::DebugInfoEntryWriter<> info_; + // Cache of already translated type descriptors. + std::map type_cache_; // type_desc -> definition_offset. + // 32-bit references which need to be resolved to a type later. + // Given type may be used multiple times. Therefore we need a multimap. + std::multimap lazy_types_; // type_desc -> patch_offset. + // The current set of open namespace tags which are active and not closed yet. + std::vector current_namespace_; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ + diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h new file mode 100644 index 000000000..3db73064f --- /dev/null +++ b/compiler/debug/elf_debug_line_writer.h @@ -0,0 +1,290 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ + +#include +#include + +#include "compiled_method.h" +#include "debug/dwarf/debug_line_opcode_writer.h" +#include "debug/dwarf/headers.h" +#include "debug/elf_compilation_unit.h" +#include "dex_file-inl.h" +#include "elf_builder.h" +#include "stack_map.h" + +namespace art { +namespace debug { + +typedef std::vector PositionInfos; + +static bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) { + static_cast(ctx)->push_back(entry); + return false; +} + +template +class ElfDebugLineWriter { + using Elf_Addr = typename ElfTypes::Addr; + + public: + explicit ElfDebugLineWriter(ElfBuilder* builder) : builder_(builder) { + } + + void Start() { + builder_->GetDebugLine()->Start(); + } + + // Write line table for given set of methods. + // Returns the number of bytes written. + size_t WriteCompilationUnit(ElfCompilationUnit& compilation_unit) { + const bool is64bit = Is64BitInstructionSet(builder_->GetIsa()); + const Elf_Addr base_address = compilation_unit.is_code_address_text_relative + ? builder_->GetText()->GetAddress() + : 0; + + compilation_unit.debug_line_offset = builder_->GetDebugLine()->GetSize(); + + std::vector files; + std::unordered_map files_map; + std::vector directories; + std::unordered_map directories_map; + int code_factor_bits_ = 0; + int dwarf_isa = -1; + switch (builder_->GetIsa()) { + case kArm: // arm actually means thumb2. + case kThumb2: + code_factor_bits_ = 1; // 16-bit instuctions + dwarf_isa = 1; // DW_ISA_ARM_thumb. + break; + case kArm64: + case kMips: + case kMips64: + code_factor_bits_ = 2; // 32-bit instructions + break; + case kNone: + case kX86: + case kX86_64: + break; + } + std::unordered_set seen_addresses(compilation_unit.methods.size()); + dwarf::DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits_); + for (const MethodDebugInfo* mi : compilation_unit.methods) { + // Ignore function if we have already generated line table for the same address. + // It would confuse the debugger and the DWARF specification forbids it. + // We allow the line table for method to be replicated in different compilation unit. + // This ensures that each compilation unit contains line table for all its methods. + if (!seen_addresses.insert(mi->code_address).second) { + continue; + } + + uint32_t prologue_end = std::numeric_limits::max(); + std::vector pc2dex_map; + if (mi->code_info != nullptr) { + // Use stack maps to create mapping table from pc to dex. + const CodeInfo code_info(mi->code_info); + const CodeInfoEncoding encoding = code_info.ExtractEncoding(); + pc2dex_map.reserve(code_info.GetNumberOfStackMaps(encoding)); + for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) { + StackMap stack_map = code_info.GetStackMapAt(s, encoding); + DCHECK(stack_map.IsValid()); + const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding); + const int32_t dex = stack_map.GetDexPc(encoding.stack_map_encoding); + pc2dex_map.push_back({pc, dex}); + if (stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) { + // Guess that the first map with local variables is the end of prologue. + prologue_end = std::min(prologue_end, pc); + } + } + std::sort(pc2dex_map.begin(), pc2dex_map.end()); + } + + if (pc2dex_map.empty()) { + continue; + } + + // Compensate for compiler's off-by-one-instruction error. + // + // The compiler generates stackmap with PC *after* the branch instruction + // (because this is the PC which is easier to obtain when unwinding). + // + // However, the debugger is more clever and it will ask us for line-number + // mapping at the location of the branch instruction (since the following + // instruction could belong to other line, this is the correct thing to do). + // + // So we really want to just decrement the PC by one instruction so that the + // branch instruction is covered as well. However, we do not know the size + // of the previous instruction, and we can not subtract just a fixed amount + // (the debugger would trust us that the PC is valid; it might try to set + // breakpoint there at some point, and setting breakpoint in mid-instruction + // would make the process crash in spectacular way). + // + // Therefore, we say that the PC which the compiler gave us for the stackmap + // is the end of its associated address range, and we use the PC from the + // previous stack map as the start of the range. This ensures that the PC is + // valid and that the branch instruction is covered. + // + // This ensures we have correct line number mapping at call sites (which is + // important for backtraces), but there is nothing we can do for non-call + // sites (so stepping through optimized code in debugger is not possible). + // + // We do not adjust the stackmaps if the code was compiled as debuggable. + // In that case, the stackmaps should accurately cover all instructions. + if (!mi->is_native_debuggable) { + for (size_t i = pc2dex_map.size() - 1; i > 0; --i) { + pc2dex_map[i].from_ = pc2dex_map[i - 1].from_; + } + pc2dex_map[0].from_ = 0; + } + + Elf_Addr method_address = base_address + mi->code_address; + + PositionInfos dex2line_map; + DCHECK(mi->dex_file != nullptr); + const DexFile* dex = mi->dex_file; + if (!dex->DecodeDebugPositionInfo(mi->code_item, PositionInfoCallback, &dex2line_map)) { + continue; + } + + if (dex2line_map.empty()) { + continue; + } + + opcodes.SetAddress(method_address); + if (dwarf_isa != -1) { + opcodes.SetISA(dwarf_isa); + } + + // Get and deduplicate directory and filename. + int file_index = 0; // 0 - primary source file of the compilation. + auto& dex_class_def = dex->GetClassDef(mi->class_def_index); + const char* source_file = dex->GetSourceFile(dex_class_def); + if (source_file != nullptr) { + std::string file_name(source_file); + size_t file_name_slash = file_name.find_last_of('/'); + std::string class_name(dex->GetClassDescriptor(dex_class_def)); + size_t class_name_slash = class_name.find_last_of('/'); + std::string full_path(file_name); + + // Guess directory from package name. + int directory_index = 0; // 0 - current directory of the compilation. + if (file_name_slash == std::string::npos && // Just filename. + class_name.front() == 'L' && // Type descriptor for a class. + class_name_slash != std::string::npos) { // Has package name. + std::string package_name = class_name.substr(1, class_name_slash - 1); + auto it = directories_map.find(package_name); + if (it == directories_map.end()) { + directory_index = 1 + directories.size(); + directories_map.emplace(package_name, directory_index); + directories.push_back(package_name); + } else { + directory_index = it->second; + } + full_path = package_name + "/" + file_name; + } + + // Add file entry. + auto it2 = files_map.find(full_path); + if (it2 == files_map.end()) { + file_index = 1 + files.size(); + files_map.emplace(full_path, file_index); + files.push_back(dwarf::FileEntry { + file_name, + directory_index, + 0, // Modification time - NA. + 0, // File size - NA. + }); + } else { + file_index = it2->second; + } + } + opcodes.SetFile(file_index); + + // Generate mapping opcodes from PC to Java lines. + if (file_index != 0) { + // If the method was not compiled as native-debuggable, we still generate all available + // lines, but we try to prevent the debugger from stepping and setting breakpoints since + // the information is too inaccurate for that (breakpoints would be set after the calls). + const bool default_is_stmt = mi->is_native_debuggable; + bool first = true; + for (SrcMapElem pc2dex : pc2dex_map) { + uint32_t pc = pc2dex.from_; + int dex_pc = pc2dex.to_; + // Find mapping with address with is greater than our dex pc; then go back one step. + auto dex2line = std::upper_bound( + dex2line_map.begin(), + dex2line_map.end(), + dex_pc, + [](uint32_t address, const DexFile::PositionInfo& entry) { + return address < entry.address_; + }); + // Look for first valid mapping after the prologue. + if (dex2line != dex2line_map.begin() && pc >= prologue_end) { + int line = (--dex2line)->line_; + if (first) { + first = false; + if (pc > 0) { + // Assume that any preceding code is prologue. + int first_line = dex2line_map.front().line_; + // Prologue is not a sensible place for a breakpoint. + opcodes.SetIsStmt(false); + opcodes.AddRow(method_address, first_line); + opcodes.SetPrologueEnd(); + } + opcodes.SetIsStmt(default_is_stmt); + opcodes.AddRow(method_address + pc, line); + } else if (line != opcodes.CurrentLine()) { + opcodes.SetIsStmt(default_is_stmt); + opcodes.AddRow(method_address + pc, line); + } + } + } + } else { + // line 0 - instruction cannot be attributed to any source line. + opcodes.AddRow(method_address, 0); + } + + opcodes.AdvancePC(method_address + mi->code_size); + opcodes.EndSequence(); + } + std::vector buffer; + buffer.reserve(opcodes.data()->size() + KB); + size_t offset = builder_->GetDebugLine()->GetSize(); + WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches_); + builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size()); + return buffer.size(); + } + + void End(bool write_oat_patches) { + builder_->GetDebugLine()->End(); + if (write_oat_patches) { + builder_->WritePatches(".debug_line.oat_patches", + ArrayRef(debug_line_patches_)); + } + } + + private: + ElfBuilder* builder_; + std::vector debug_line_patches_; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ + diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h new file mode 100644 index 000000000..9645643ed --- /dev/null +++ b/compiler/debug/elf_debug_loc_writer.h @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ + +#include +#include + +#include "arch/instruction_set.h" +#include "compiled_method.h" +#include "debug/dwarf/debug_info_entry_writer.h" +#include "debug/dwarf/register.h" +#include "debug/method_debug_info.h" +#include "stack_map.h" + +namespace art { +namespace debug { +using Reg = dwarf::Reg; + +static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) { + switch (isa) { + case kArm: + case kThumb2: + return Reg::ArmCore(machine_reg); + case kArm64: + return Reg::Arm64Core(machine_reg); + case kX86: + return Reg::X86Core(machine_reg); + case kX86_64: + return Reg::X86_64Core(machine_reg); + case kMips: + return Reg::MipsCore(machine_reg); + case kMips64: + return Reg::Mips64Core(machine_reg); + case kNone: + LOG(FATAL) << "No instruction set"; + } + UNREACHABLE(); +} + +static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) { + switch (isa) { + case kArm: + case kThumb2: + return Reg::ArmFp(machine_reg); + case kArm64: + return Reg::Arm64Fp(machine_reg); + case kX86: + return Reg::X86Fp(machine_reg); + case kX86_64: + return Reg::X86_64Fp(machine_reg); + case kMips: + return Reg::MipsFp(machine_reg); + case kMips64: + return Reg::Mips64Fp(machine_reg); + case kNone: + LOG(FATAL) << "No instruction set"; + } + UNREACHABLE(); +} + +struct VariableLocation { + uint32_t low_pc; // Relative to compilation unit. + uint32_t high_pc; // Relative to compilation unit. + DexRegisterLocation reg_lo; // May be None if the location is unknown. + DexRegisterLocation reg_hi; // Most significant bits of 64-bit value. +}; + +// Get the location of given dex register (e.g. stack or machine register). +// Note that the location might be different based on the current pc. +// The result will cover all ranges where the variable is in scope. +// PCs corresponding to stackmap with dex register map are accurate, +// all other PCs are best-effort only. +std::vector GetVariableLocations( + const MethodDebugInfo* method_info, + const std::vector& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low, + uint32_t dex_pc_high) { + std::vector variable_locations; + + // Get stack maps sorted by pc (they might not be sorted internally). + // TODO(dsrbecky) Remove this once stackmaps get sorted by pc. + const CodeInfo code_info(method_info->code_info); + const CodeInfoEncoding encoding = code_info.ExtractEncoding(); + std::map stack_maps; // low_pc -> stack_map_index. + for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) { + StackMap stack_map = code_info.GetStackMapAt(s, encoding); + DCHECK(stack_map.IsValid()); + if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) { + // The compiler creates stackmaps without register maps at the start of + // basic blocks in order to keep instruction-accurate line number mapping. + // However, we never stop at those (breakpoint locations always have map). + // Therefore, for the purpose of local variables, we ignore them. + // The main reason for this is to save space by avoiding undefined gaps. + continue; + } + const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map_encoding); + DCHECK_LE(pc_offset, method_info->code_size); + DCHECK_LE(compilation_unit_code_address, method_info->code_address); + const uint32_t low_pc = dchecked_integral_cast( + method_info->code_address + pc_offset - compilation_unit_code_address); + stack_maps.emplace(low_pc, s); + } + + // Create entries for the requested register based on stack map data. + for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) { + const uint32_t low_pc = it->first; + const uint32_t stack_map_index = it->second; + const StackMap& stack_map = code_info.GetStackMapAt(stack_map_index, encoding); + auto next_it = it; + next_it++; + const uint32_t high_pc = next_it != stack_maps.end() + ? next_it->first + : method_info->code_address + method_info->code_size - compilation_unit_code_address; + DCHECK_LE(low_pc, high_pc); + if (low_pc == high_pc) { + continue; // Ignore if the address range is empty. + } + + // Check that the stack map is in the requested range. + uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map_encoding); + if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) { + // The variable is not in scope at this PC. Therefore omit the entry. + // Note that this is different to None() entry which means in scope, but unknown location. + continue; + } + + // Find the location of the dex register. + DexRegisterLocation reg_lo = DexRegisterLocation::None(); + DexRegisterLocation reg_hi = DexRegisterLocation::None(); + DCHECK_LT(stack_map_index, dex_register_maps.size()); + DexRegisterMap dex_register_map = dex_register_maps[stack_map_index]; + DCHECK(dex_register_map.IsValid()); + reg_lo = dex_register_map.GetDexRegisterLocation( + vreg, method_info->code_item->registers_size_, code_info, encoding); + if (is64bitValue) { + reg_hi = dex_register_map.GetDexRegisterLocation( + vreg + 1, method_info->code_item->registers_size_, code_info, encoding); + } + + // Add location entry for this address range. + if (!variable_locations.empty() && + variable_locations.back().reg_lo == reg_lo && + variable_locations.back().reg_hi == reg_hi && + variable_locations.back().high_pc == low_pc) { + // Merge with the previous entry (extend its range). + variable_locations.back().high_pc = high_pc; + } else { + variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi}); + } + } + + return variable_locations; +} + +// Write table into .debug_loc which describes location of dex register. +// The dex register might be valid only at some points and it might +// move between machine registers and stack. +static void WriteDebugLocEntry(const MethodDebugInfo* method_info, + const std::vector& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low, + uint32_t dex_pc_high, + InstructionSet isa, + dwarf::DebugInfoEntryWriter<>* debug_info, + std::vector* debug_loc_buffer, + std::vector* debug_ranges_buffer) { + using Kind = DexRegisterLocation::Kind; + if (method_info->code_info == nullptr || dex_register_maps.empty()) { + return; + } + + std::vector variable_locations = GetVariableLocations( + method_info, + dex_register_maps, + vreg, + is64bitValue, + compilation_unit_code_address, + dex_pc_low, + dex_pc_high); + + // Write .debug_loc entries. + dwarf::Writer<> debug_loc(debug_loc_buffer); + const size_t debug_loc_offset = debug_loc.size(); + const bool is64bit = Is64BitInstructionSet(isa); + std::vector expr_buffer; + for (const VariableLocation& variable_location : variable_locations) { + // Translate dex register location to DWARF expression. + // Note that 64-bit value might be split to two distinct locations. + // (for example, two 32-bit machine registers, or even stack and register) + dwarf::Expression expr(&expr_buffer); + DexRegisterLocation reg_lo = variable_location.reg_lo; + DexRegisterLocation reg_hi = variable_location.reg_hi; + for (int piece = 0; piece < (is64bitValue ? 2 : 1); piece++) { + DexRegisterLocation reg_loc = (piece == 0 ? reg_lo : reg_hi); + const Kind kind = reg_loc.GetKind(); + const int32_t value = reg_loc.GetValue(); + if (kind == Kind::kInStack) { + // The stack offset is relative to SP. Make it relative to CFA. + expr.WriteOpFbreg(value - method_info->frame_size_in_bytes); + if (piece == 0 && reg_hi.GetKind() == Kind::kInStack && + reg_hi.GetValue() == value + 4) { + break; // the high word is correctly implied by the low word. + } + } else if (kind == Kind::kInRegister) { + expr.WriteOpReg(GetDwarfCoreReg(isa, value).num()); + if (piece == 0 && reg_hi.GetKind() == Kind::kInRegisterHigh && + reg_hi.GetValue() == value) { + break; // the high word is correctly implied by the low word. + } + } else if (kind == Kind::kInFpuRegister) { + if ((isa == kArm || isa == kThumb2) && + piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister && + reg_hi.GetValue() == value + 1 && value % 2 == 0) { + // Translate S register pair to D register (e.g. S4+S5 to D2). + expr.WriteOpReg(Reg::ArmDp(value / 2).num()); + break; + } + expr.WriteOpReg(GetDwarfFpReg(isa, value).num()); + if (piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegisterHigh && + reg_hi.GetValue() == reg_lo.GetValue()) { + break; // the high word is correctly implied by the low word. + } + } else if (kind == Kind::kConstant) { + expr.WriteOpConsts(value); + expr.WriteOpStackValue(); + } else if (kind == Kind::kNone) { + break; + } else { + // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind(). + // kInRegisterHigh and kInFpuRegisterHigh should be handled by + // the special cases above and they should not occur alone. + LOG(ERROR) << "Unexpected register location kind: " << kind; + break; + } + if (is64bitValue) { + // Write the marker which is needed by split 64-bit values. + // This code is skipped by the special cases. + expr.WriteOpPiece(4); + } + } + + if (expr.size() > 0) { + if (is64bit) { + debug_loc.PushUint64(variable_location.low_pc); + debug_loc.PushUint64(variable_location.high_pc); + } else { + debug_loc.PushUint32(variable_location.low_pc); + debug_loc.PushUint32(variable_location.high_pc); + } + // Write the expression. + debug_loc.PushUint16(expr.size()); + debug_loc.PushData(expr.data()); + } else { + // Do not generate .debug_loc if the location is not known. + } + } + // Write end-of-list entry. + if (is64bit) { + debug_loc.PushUint64(0); + debug_loc.PushUint64(0); + } else { + debug_loc.PushUint32(0); + debug_loc.PushUint32(0); + } + + // Write .debug_ranges entries. + // This includes ranges where the variable is in scope but the location is not known. + dwarf::Writer<> debug_ranges(debug_ranges_buffer); + size_t debug_ranges_offset = debug_ranges.size(); + for (size_t i = 0; i < variable_locations.size(); i++) { + uint32_t low_pc = variable_locations[i].low_pc; + uint32_t high_pc = variable_locations[i].high_pc; + while (i + 1 < variable_locations.size() && variable_locations[i+1].low_pc == high_pc) { + // Merge address range with the next entry. + high_pc = variable_locations[++i].high_pc; + } + if (is64bit) { + debug_ranges.PushUint64(low_pc); + debug_ranges.PushUint64(high_pc); + } else { + debug_ranges.PushUint32(low_pc); + debug_ranges.PushUint32(high_pc); + } + } + // Write end-of-list entry. + if (is64bit) { + debug_ranges.PushUint64(0); + debug_ranges.PushUint64(0); + } else { + debug_ranges.PushUint32(0); + debug_ranges.PushUint32(0); + } + + // Simple de-duplication - check whether this entry is same as the last one (or tail of it). + size_t debug_ranges_entry_size = debug_ranges.size() - debug_ranges_offset; + if (debug_ranges_offset >= debug_ranges_entry_size) { + size_t previous_offset = debug_ranges_offset - debug_ranges_entry_size; + if (memcmp(debug_ranges_buffer->data() + previous_offset, + debug_ranges_buffer->data() + debug_ranges_offset, + debug_ranges_entry_size) == 0) { + // Remove what we have just written and use the last entry instead. + debug_ranges_buffer->resize(debug_ranges_offset); + debug_ranges_offset = previous_offset; + } + } + + // Write attributes to .debug_info. + debug_info->WriteSecOffset(dwarf::DW_AT_location, debug_loc_offset); + debug_info->WriteSecOffset(dwarf::DW_AT_start_scope, debug_ranges_offset); +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ + diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc new file mode 100644 index 000000000..b7e000aeb --- /dev/null +++ b/compiler/debug/elf_debug_writer.cc @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "elf_debug_writer.h" + +#include + +#include "debug/dwarf/dwarf_constants.h" +#include "debug/elf_compilation_unit.h" +#include "debug/elf_debug_frame_writer.h" +#include "debug/elf_debug_info_writer.h" +#include "debug/elf_debug_line_writer.h" +#include "debug/elf_debug_loc_writer.h" +#include "debug/elf_gnu_debugdata_writer.h" +#include "debug/elf_symtab_writer.h" +#include "debug/method_debug_info.h" +#include "elf_builder.h" +#include "linker/vector_output_stream.h" +#include "utils/array_ref.h" + +namespace art { +namespace debug { + +template +void WriteDebugInfo(ElfBuilder* builder, + const ArrayRef& method_infos, + dwarf::CFIFormat cfi_format, + bool write_oat_patches) { + // Write .strtab and .symtab. + WriteDebugSymbols(builder, method_infos, true /* with_signature */); + + // Write .debug_frame. + WriteCFISection(builder, method_infos, cfi_format, write_oat_patches); + + // Group the methods into compilation units based on source file. + std::vector compilation_units; + const char* last_source_file = nullptr; + for (const MethodDebugInfo& mi : method_infos) { + if (mi.dex_file != nullptr) { + auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index); + const char* source_file = mi.dex_file->GetSourceFile(dex_class_def); + if (compilation_units.empty() || source_file != last_source_file) { + compilation_units.push_back(ElfCompilationUnit()); + } + ElfCompilationUnit& cu = compilation_units.back(); + cu.methods.push_back(&mi); + // All methods must have the same addressing mode otherwise the min/max below does not work. + DCHECK_EQ(cu.methods.front()->is_code_address_text_relative, mi.is_code_address_text_relative); + cu.is_code_address_text_relative = mi.is_code_address_text_relative; + cu.code_address = std::min(cu.code_address, mi.code_address); + cu.code_end = std::max(cu.code_end, mi.code_address + mi.code_size); + last_source_file = source_file; + } + } + + // Write .debug_line section. + if (!compilation_units.empty()) { + ElfDebugLineWriter line_writer(builder); + line_writer.Start(); + for (auto& compilation_unit : compilation_units) { + line_writer.WriteCompilationUnit(compilation_unit); + } + line_writer.End(write_oat_patches); + } + + // Write .debug_info section. + if (!compilation_units.empty()) { + ElfDebugInfoWriter info_writer(builder); + info_writer.Start(); + for (const auto& compilation_unit : compilation_units) { + ElfCompilationUnitWriter cu_writer(&info_writer); + cu_writer.Write(compilation_unit); + } + info_writer.End(write_oat_patches); + } +} + +std::vector MakeMiniDebugInfo( + InstructionSet isa, + const InstructionSetFeatures* features, + size_t rodata_size, + size_t text_size, + const ArrayRef& method_infos) { + if (Is64BitInstructionSet(isa)) { + return MakeMiniDebugInfoInternal(isa, + features, + rodata_size, + text_size, + method_infos); + } else { + return MakeMiniDebugInfoInternal(isa, + features, + rodata_size, + text_size, + method_infos); + } +} + +template +static std::vector WriteDebugElfFileForMethodsInternal( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& method_infos) { + std::vector buffer; + buffer.reserve(KB); + VectorOutputStream out("Debug ELF file", &buffer); + std::unique_ptr> builder(new ElfBuilder(isa, features, &out)); + // No program headers since the ELF file is not linked and has no allocated sections. + builder->Start(false /* write_program_headers */); + WriteDebugInfo(builder.get(), + method_infos, + dwarf::DW_DEBUG_FRAME_FORMAT, + false /* write_oat_patches */); + builder->End(); + CHECK(builder->Good()); + return buffer; +} + +std::vector WriteDebugElfFileForMethods( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& method_infos) { + if (Is64BitInstructionSet(isa)) { + return WriteDebugElfFileForMethodsInternal(isa, features, method_infos); + } else { + return WriteDebugElfFileForMethodsInternal(isa, features, method_infos); + } +} + +template +static std::vector WriteDebugElfFileForClassesInternal( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& types) + SHARED_REQUIRES(Locks::mutator_lock_) { + std::vector buffer; + buffer.reserve(KB); + VectorOutputStream out("Debug ELF file", &buffer); + std::unique_ptr> builder(new ElfBuilder(isa, features, &out)); + // No program headers since the ELF file is not linked and has no allocated sections. + builder->Start(false /* write_program_headers */); + ElfDebugInfoWriter info_writer(builder.get()); + info_writer.Start(); + ElfCompilationUnitWriter cu_writer(&info_writer); + cu_writer.Write(types); + info_writer.End(false /* write_oat_patches */); + + builder->End(); + CHECK(builder->Good()); + return buffer; +} + +std::vector WriteDebugElfFileForClasses(InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& types) { + if (Is64BitInstructionSet(isa)) { + return WriteDebugElfFileForClassesInternal(isa, features, types); + } else { + return WriteDebugElfFileForClassesInternal(isa, features, types); + } +} + +std::vector MakeTrampolineInfos(const OatHeader& header) { + std::map trampolines = { + { "interpreterToInterpreterBridge", header.GetInterpreterToInterpreterBridgeOffset() }, + { "interpreterToCompiledCodeBridge", header.GetInterpreterToCompiledCodeBridgeOffset() }, + { "jniDlsymLookup", header.GetJniDlsymLookupOffset() }, + { "quickGenericJniTrampoline", header.GetQuickGenericJniTrampolineOffset() }, + { "quickImtConflictTrampoline", header.GetQuickImtConflictTrampolineOffset() }, + { "quickResolutionTrampoline", header.GetQuickResolutionTrampolineOffset() }, + { "quickToInterpreterBridge", header.GetQuickToInterpreterBridgeOffset() }, + }; + std::vector result; + for (const auto& it : trampolines) { + if (it.second != 0) { + MethodDebugInfo info = MethodDebugInfo(); + info.trampoline_name = it.first; + info.isa = header.GetInstructionSet(); + info.is_code_address_text_relative = true; + info.code_address = it.second - header.GetExecutableOffset(); + info.code_size = 0; // The symbol lasts until the next symbol. + result.push_back(std::move(info)); + } + } + return result; +} + +// Explicit instantiations +template void WriteDebugInfo( + ElfBuilder* builder, + const ArrayRef& method_infos, + dwarf::CFIFormat cfi_format, + bool write_oat_patches); +template void WriteDebugInfo( + ElfBuilder* builder, + const ArrayRef& method_infos, + dwarf::CFIFormat cfi_format, + bool write_oat_patches); + +} // namespace debug +} // namespace art diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h new file mode 100644 index 000000000..6f52249c1 --- /dev/null +++ b/compiler/debug/elf_debug_writer.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ + +#include + +#include "base/macros.h" +#include "base/mutex.h" +#include "debug/dwarf/dwarf_constants.h" +#include "elf_builder.h" +#include "utils/array_ref.h" + +namespace art { +class OatHeader; +namespace mirror { +class Class; +} +namespace debug { +struct MethodDebugInfo; + +template +void WriteDebugInfo( + ElfBuilder* builder, + const ArrayRef& method_infos, + dwarf::CFIFormat cfi_format, + bool write_oat_patches); + +std::vector MakeMiniDebugInfo( + InstructionSet isa, + const InstructionSetFeatures* features, + size_t rodata_section_size, + size_t text_section_size, + const ArrayRef& method_infos); + +std::vector WriteDebugElfFileForMethods( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& method_infos); + +std::vector WriteDebugElfFileForClasses( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef& types) + SHARED_REQUIRES(Locks::mutator_lock_); + +std::vector MakeTrampolineInfos(const OatHeader& oat_header); + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h new file mode 100644 index 000000000..fb63d6257 --- /dev/null +++ b/compiler/debug/elf_gnu_debugdata_writer.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_ + +#include + +#include "arch/instruction_set.h" +#include "elf_builder.h" +#include "linker/vector_output_stream.h" + +// liblzma. +#include "7zCrc.h" +#include "XzCrc64.h" +#include "XzEnc.h" + +namespace art { +namespace debug { + +static void XzCompress(const std::vector* src, std::vector* dst) { + // Configure the compression library. + CrcGenerateTable(); + Crc64GenerateTable(); + CLzma2EncProps lzma2Props; + Lzma2EncProps_Init(&lzma2Props); + lzma2Props.lzmaProps.level = 1; // Fast compression. + Lzma2EncProps_Normalize(&lzma2Props); + CXzProps props; + XzProps_Init(&props); + props.lzma2Props = &lzma2Props; + // Implement the required interface for communication (written in C so no virtual methods). + struct XzCallbacks : public ISeqInStream, public ISeqOutStream, public ICompressProgress { + static SRes ReadImpl(void* p, void* buf, size_t* size) { + auto* ctx = static_cast(reinterpret_cast(p)); + *size = std::min(*size, ctx->src_->size() - ctx->src_pos_); + memcpy(buf, ctx->src_->data() + ctx->src_pos_, *size); + ctx->src_pos_ += *size; + return SZ_OK; + } + static size_t WriteImpl(void* p, const void* buf, size_t size) { + auto* ctx = static_cast(reinterpret_cast(p)); + const uint8_t* buffer = reinterpret_cast(buf); + ctx->dst_->insert(ctx->dst_->end(), buffer, buffer + size); + return size; + } + static SRes ProgressImpl(void* , UInt64, UInt64) { + return SZ_OK; + } + size_t src_pos_; + const std::vector* src_; + std::vector* dst_; + }; + XzCallbacks callbacks; + callbacks.Read = XzCallbacks::ReadImpl; + callbacks.Write = XzCallbacks::WriteImpl; + callbacks.Progress = XzCallbacks::ProgressImpl; + callbacks.src_pos_ = 0; + callbacks.src_ = src; + callbacks.dst_ = dst; + // Compress. + SRes res = Xz_Encode(&callbacks, &callbacks, &props, &callbacks); + CHECK_EQ(res, SZ_OK); +} + +template +static std::vector MakeMiniDebugInfoInternal( + InstructionSet isa, + const InstructionSetFeatures* features, + size_t rodata_section_size, + size_t text_section_size, + const ArrayRef& method_infos) { + std::vector buffer; + buffer.reserve(KB); + VectorOutputStream out("Mini-debug-info ELF file", &buffer); + std::unique_ptr> builder(new ElfBuilder(isa, features, &out)); + builder->Start(); + // Mirror .rodata and .text as NOBITS sections. + // It is needed to detected relocations after compression. + builder->GetRoData()->WriteNoBitsSection(rodata_section_size); + builder->GetText()->WriteNoBitsSection(text_section_size); + WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */); + WriteCFISection(builder.get(), + method_infos, + dwarf::DW_DEBUG_FRAME_FORMAT, + false /* write_oat_paches */); + builder->End(); + CHECK(builder->Good()); + std::vector compressed_buffer; + compressed_buffer.reserve(buffer.size() / 4); + XzCompress(&buffer, &compressed_buffer); + return compressed_buffer; +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_ + diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h new file mode 100644 index 000000000..045edddd7 --- /dev/null +++ b/compiler/debug/elf_symtab_writer.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ +#define ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ + +#include + +#include "debug/method_debug_info.h" +#include "elf_builder.h" +#include "utils.h" + +namespace art { +namespace debug { + +// The ARM specification defines three special mapping symbols +// $a, $t and $d which mark ARM, Thumb and data ranges respectively. +// These symbols can be used by tools, for example, to pretty +// print instructions correctly. Objdump will use them if they +// exist, but it will still work well without them. +// However, these extra symbols take space, so let's just generate +// one symbol which marks the whole .text section as code. +constexpr bool kGenerateSingleArmMappingSymbol = true; + +template +static void WriteDebugSymbols(ElfBuilder* builder, + const ArrayRef& method_infos, + bool with_signature) { + uint64_t mapping_symbol_address = std::numeric_limits::max(); + auto* strtab = builder->GetStrTab(); + auto* symtab = builder->GetSymTab(); + + if (method_infos.empty()) { + return; + } + + // Find all addresses which contain deduped methods. + // The first instance of method is not marked deduped_, but the rest is. + std::unordered_set deduped_addresses; + for (const MethodDebugInfo& info : method_infos) { + if (info.deduped) { + deduped_addresses.insert(info.code_address); + } + } + + strtab->Start(); + strtab->Write(""); // strtab should start with empty string. + std::string last_name; + size_t last_name_offset = 0; + for (const MethodDebugInfo& info : method_infos) { + if (info.deduped) { + continue; // Add symbol only for the first instance. + } + size_t name_offset; + if (info.trampoline_name != nullptr) { + name_offset = strtab->Write(info.trampoline_name); + } else { + DCHECK(info.dex_file != nullptr); + std::string name = PrettyMethod(info.dex_method_index, *info.dex_file, with_signature); + if (deduped_addresses.find(info.code_address) != deduped_addresses.end()) { + name += " [DEDUPED]"; + } + // If we write method names without signature, we might see the same name multiple times. + name_offset = (name == last_name ? last_name_offset : strtab->Write(name)); + last_name = std::move(name); + last_name_offset = name_offset; + } + + const auto* text = info.is_code_address_text_relative ? builder->GetText() : nullptr; + uint64_t address = info.code_address + (text != nullptr ? text->GetAddress() : 0); + // Add in code delta, e.g., thumb bit 0 for Thumb2 code. + address += CompiledMethod::CodeDelta(info.isa); + symtab->Add(name_offset, text, address, info.code_size, STB_GLOBAL, STT_FUNC); + + // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2 + // instructions, so that disassembler tools can correctly disassemble. + // Note that even if we generate just a single mapping symbol, ARM's Streamline + // requires it to match function symbol. Just address 0 does not work. + if (info.isa == kThumb2) { + if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) { + symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE); + mapping_symbol_address = address; + } + } + } + strtab->End(); + + // Symbols are buffered and written after names (because they are smaller). + // We could also do two passes in this function to avoid the buffering. + symtab->Start(); + symtab->Write(); + symtab->End(); +} + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ + diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h new file mode 100644 index 000000000..ed1da2c26 --- /dev/null +++ b/compiler/debug/method_debug_info.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ +#define ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ + +#include "compiled_method.h" +#include "dex_file.h" + +namespace art { +namespace debug { + +struct MethodDebugInfo { + const char* trampoline_name; + const DexFile* dex_file; // Native methods (trampolines) do not reference dex file. + size_t class_def_index; + uint32_t dex_method_index; + uint32_t access_flags; + const DexFile::CodeItem* code_item; + InstructionSet isa; + bool deduped; + bool is_native_debuggable; + bool is_optimized; + bool is_code_address_text_relative; // Is the address offset from start of .text section? + uint64_t code_address; + uint32_t code_size; + uint32_t frame_size_in_bytes; + const void* code_info; + ArrayRef cfi; +}; + +} // namespace debug +} // namespace art + +#endif // ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h new file mode 100644 index 000000000..8800e4b08 --- /dev/null +++ b/compiler/dex/compiler_enums.h @@ -0,0 +1,677 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_COMPILER_ENUMS_H_ +#define ART_COMPILER_DEX_COMPILER_ENUMS_H_ + +#include "dex_instruction.h" + +namespace art { + +enum RegisterClass { + kInvalidRegClass, + kCoreReg, + kFPReg, + kRefReg, + kAnyReg, +}; +std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs); + +enum BitsUsed { + kSize32Bits, + kSize64Bits, + kSize128Bits, + kSize256Bits, + kSize512Bits, + kSize1024Bits, +}; +std::ostream& operator<<(std::ostream& os, const BitsUsed& rhs); + +enum SpecialTargetRegister { + kSelf, // Thread pointer. + kSuspend, // Used to reduce suspend checks for some targets. + kLr, + kPc, + kSp, + kArg0, + kArg1, + kArg2, + kArg3, + kArg4, + kArg5, + kArg6, + kArg7, + kFArg0, + kFArg1, + kFArg2, + kFArg3, + kFArg4, + kFArg5, + kFArg6, + kFArg7, + kFArg8, + kFArg9, + kFArg10, + kFArg11, + kFArg12, + kFArg13, + kFArg14, + kFArg15, + kRet0, + kRet1, + kInvokeTgt, + kHiddenArg, + kHiddenFpArg, + kCount +}; +std::ostream& operator<<(std::ostream& os, const SpecialTargetRegister& code); + +enum RegLocationType { + kLocDalvikFrame = 0, // Normal Dalvik register + kLocPhysReg, + kLocCompilerTemp, + kLocInvalid +}; +std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs); + +enum BBType { + kNullBlock, + kEntryBlock, + kDalvikByteCode, + kExitBlock, + kExceptionHandling, + kDead, +}; +std::ostream& operator<<(std::ostream& os, const BBType& code); + +// Shared pseudo opcodes - must be < 0. +enum LIRPseudoOpcode { + kPseudoPrologueBegin = -18, + kPseudoPrologueEnd = -17, + kPseudoEpilogueBegin = -16, + kPseudoEpilogueEnd = -15, + kPseudoExportedPC = -14, + kPseudoSafepointPC = -13, + kPseudoIntrinsicRetry = -12, + kPseudoSuspendTarget = -11, + kPseudoThrowTarget = -10, + kPseudoCaseLabel = -9, + kPseudoBarrier = -8, + kPseudoEntryBlock = -7, + kPseudoExitBlock = -6, + kPseudoTargetLabel = -5, + kPseudoDalvikByteCodeBoundary = -4, + kPseudoPseudoAlign4 = -3, + kPseudoEHBlockLabel = -2, + kPseudoNormalBlockLabel = -1, +}; +std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs); + +enum ExtendedMIROpcode { + kMirOpFirst = kNumPackedOpcodes, + kMirOpPhi = kMirOpFirst, + + // @brief Copy from one VR to another. + // @details + // vA: destination VR + // vB: source VR + kMirOpCopy, + + // @brief Used to do float comparison with less-than bias. + // @details Unlike cmpl-float, this does not store result of comparison in VR. + // vA: left-hand side VR for comparison. + // vB: right-hand side VR for comparison. + kMirOpFusedCmplFloat, + + // @brief Used to do float comparison with greater-than bias. + // @details Unlike cmpg-float, this does not store result of comparison in VR. + // vA: left-hand side VR for comparison. + // vB: right-hand side VR for comparison. + kMirOpFusedCmpgFloat, + + // @brief Used to do double comparison with less-than bias. + // @details Unlike cmpl-double, this does not store result of comparison in VR. + // vA: left-hand side wide VR for comparison. + // vB: right-hand side wide VR for comparison. + kMirOpFusedCmplDouble, + + // @brief Used to do double comparison with greater-than bias. + // @details Unlike cmpl-double, this does not store result of comparison in VR. + // vA: left-hand side wide VR for comparison. + // vB: right-hand side wide VR for comparison. + kMirOpFusedCmpgDouble, + + // @brief Used to do comparison of 64-bit long integers. + // @details Unlike cmp-long, this does not store result of comparison in VR. + // vA: left-hand side wide VR for comparison. + // vB: right-hand side wide VR for comparison. + kMirOpFusedCmpLong, + + // @brief This represents no-op. + kMirOpNop, + + // @brief Do a null check on the object register. + // @details The backends may implement this implicitly or explicitly. This MIR is guaranteed + // to have the correct offset as an exception thrower. + // vA: object register + kMirOpNullCheck, + + kMirOpRangeCheck, + kMirOpDivZeroCheck, + kMirOpCheck, + kMirOpSelect, + + // Vector opcodes: + // TypeSize is an encoded field giving the element type and the vector size. + // It is encoded as OpSize << 16 | (number of bits in vector) + // + // Destination and source are integers that will be interpreted by the + // backend that supports Vector operations. Backends are permitted to support only + // certain vector register sizes. + // + // At this point, only two operand instructions are supported. Three operand instructions + // could be supported by using a bit in TypeSize and arg[0] where needed. + + // @brief MIR to move constant data to a vector register + // vA: destination + // vB: number of bits in register + // args[0]~args[3]: up to 128 bits of data for initialization + kMirOpConstVector, + + // @brief MIR to move a vectorized register to another + // vA: destination + // vB: source + // vC: TypeSize + kMirOpMoveVector, + + // @brief Packed multiply of units in two vector registers: vB = vB .* vC using vA to know the type of the vector. + // vA: destination and source + // vB: source + // vC: TypeSize + kMirOpPackedMultiply, + + // @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector. + // vA: destination and source + // vB: source + // vC: TypeSize + kMirOpPackedAddition, + + // @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector. + // vA: destination and source + // vB: source + // vC: TypeSize + kMirOpPackedSubtract, + + // @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector. + // vA: destination and source + // vB: amount to shift + // vC: TypeSize + kMirOpPackedShiftLeft, + + // @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector. + // vA: destination and source + // vB: amount to shift + // vC: TypeSize + kMirOpPackedSignedShiftRight, + + // @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector. + // vA: destination and source + // vB: amount to shift + // vC: TypeSize + kMirOpPackedUnsignedShiftRight, + + // @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector. + // vA: destination and source + // vB: source + // vC: TypeSize + kMirOpPackedAnd, + + // @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector. + // vA: destination and source + // vB: source + // vC: TypeSize + kMirOpPackedOr, + + // @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector. + // vA: destination and source + // vB: source + // vC: TypeSize + kMirOpPackedXor, + + // @brief Reduce a 128-bit packed element into a single VR by taking lower bits + // @details Instruction does a horizontal addition of the packed elements and then adds it to VR + // vA: destination and source VR (not vector register) + // vB: source (vector register) + // vC: TypeSize + kMirOpPackedAddReduce, + + // @brief Extract a packed element into a single VR. + // vA: destination VR (not vector register) + // vB: source (vector register) + // vC: TypeSize + // arg[0]: The index to use for extraction from vector register (which packed element) + kMirOpPackedReduce, + + // @brief Create a vector value, with all TypeSize values equal to vC + // vA: destination vector register + // vB: source VR (not vector register) + // vC: TypeSize + kMirOpPackedSet, + + // @brief Reserve a range of vector registers. + // vA: Start vector register to reserve. + // vB: Inclusive end vector register to reserve. + // @note: The backend may choose to map vector numbers used in vector opcodes. + // Reserved registers are removed from the list of backend temporary pool. + kMirOpReserveVectorRegisters, + + // @brief Free a range of reserved vector registers + // vA: Start vector register to unreserve. + // vB: Inclusive end vector register to unreserve. + // @note: All currently reserved vector registers are returned to the temporary pool. + kMirOpReturnVectorRegisters, + + // @brief Create a memory barrier. + // vA: a constant defined by enum MemBarrierKind. + kMirOpMemBarrier, + + // @brief Used to fill a vector register with array values. + // @details Just as with normal arrays, access on null object register must ensure NullPointerException + // and invalid index must ensure ArrayIndexOutOfBoundsException. Exception behavior must be the same + // as the aget it replaced and must happen at same index. Therefore, it is generally recommended that + // before using this MIR, it is proven that exception is guaranteed to not be thrown and marked with + // MIR_IGNORE_NULL_CHECK and MIR_IGNORE_RANGE_CHECK. + // vA: destination vector register + // vB: array register + // vC: index register + // arg[0]: TypeSize (most other vector opcodes have this in vC) + kMirOpPackedArrayGet, + + // @brief Used to store a vector register into array. + // @details Just as with normal arrays, access on null object register must ensure NullPointerException + // and invalid index must ensure ArrayIndexOutOfBoundsException. Exception behavior must be the same + // as the aget it replaced and must happen at same index. Therefore, it is generally recommended that + // before using this MIR, it is proven that exception is guaranteed to not be thrown and marked with + // MIR_IGNORE_NULL_CHECK and MIR_IGNORE_RANGE_CHECK. + // vA: source vector register + // vB: array register + // vC: index register + // arg[0]: TypeSize (most other vector opcodes have this in vC) + kMirOpPackedArrayPut, + + // @brief Multiply-add integer. + // vA: destination + // vB: multiplicand + // vC: multiplier + // arg[0]: addend + kMirOpMaddInt, + + // @brief Multiply-subtract integer. + // vA: destination + // vB: multiplicand + // vC: multiplier + // arg[0]: minuend + kMirOpMsubInt, + + // @brief Multiply-add long. + // vA: destination + // vB: multiplicand + // vC: multiplier + // arg[0]: addend + kMirOpMaddLong, + + // @brief Multiply-subtract long. + // vA: destination + // vB: multiplicand + // vC: multiplier + // arg[0]: minuend + kMirOpMsubLong, + + kMirOpLast, +}; + +enum MIROptimizationFlagPositions { + kMIRIgnoreNullCheck = 0, + kMIRIgnoreRangeCheck, + kMIRIgnoreCheckCast, + kMIRStoreNonNullValue, // Storing non-null value, always mark GC card. + kMIRClassIsInitialized, + kMIRClassIsInDexCache, + kMirIgnoreDivZeroCheck, + kMIRInlined, // Invoke is inlined (ie dead). + kMIRInlinedPred, // Invoke is inlined via prediction. + kMIRCallee, // Instruction is inlined from callee. + kMIRIgnoreSuspendCheck, + kMIRDup, + kMIRMark, // Temporary node mark can be used by + // opt passes for their private needs. + kMIRStoreNonTemporal, + kMIRLastMIRFlag, +}; + +// For successor_block_list. +enum BlockListType { + kNotUsed = 0, + kCatch, + kPackedSwitch, + kSparseSwitch, +}; +std::ostream& operator<<(std::ostream& os, const BlockListType& rhs); + +enum AssemblerStatus { + kSuccess, + kRetryAll, +}; +std::ostream& operator<<(std::ostream& os, const AssemblerStatus& rhs); + +enum OpSize { + kWord, // Natural word size of target (32/64). + k32, + k64, + kReference, // Object reference; compressed on 64-bit targets. + kSingle, + kDouble, + kUnsignedHalf, + kSignedHalf, + kUnsignedByte, + kSignedByte, +}; +std::ostream& operator<<(std::ostream& os, const OpSize& kind); + +enum OpKind { + kOpMov, + kOpCmov, + kOpMvn, + kOpCmp, + kOpLsl, + kOpLsr, + kOpAsr, + kOpRor, + kOpNot, + kOpAnd, + kOpOr, + kOpXor, + kOpNeg, + kOpAdd, + kOpAdc, + kOpSub, + kOpSbc, + kOpRsub, + kOpMul, + kOpDiv, + kOpRem, + kOpBic, + kOpCmn, + kOpTst, + kOpRev, + kOpRevsh, + kOpBkpt, + kOpBlx, + kOpPush, + kOpPop, + kOp2Char, + kOp2Short, + kOp2Byte, + kOpCondBr, + kOpUncondBr, + kOpBx, + kOpInvalid, +}; +std::ostream& operator<<(std::ostream& os, const OpKind& rhs); + +enum MoveType { + kMov8GP, // Move 8-bit general purpose register. + kMov16GP, // Move 16-bit general purpose register. + kMov32GP, // Move 32-bit general purpose register. + kMov64GP, // Move 64-bit general purpose register. + kMov32FP, // Move 32-bit FP register. + kMov64FP, // Move 64-bit FP register. + kMovLo64FP, // Move low 32-bits of 64-bit FP register. + kMovHi64FP, // Move high 32-bits of 64-bit FP register. + kMovU128FP, // Move 128-bit FP register to/from possibly unaligned region. + kMov128FP = kMovU128FP, + kMovA128FP, // Move 128-bit FP register to/from region surely aligned to 16-bytes. + kMovLo128FP, // Move low 64-bits of 128-bit FP register. + kMovHi128FP, // Move high 64-bits of 128-bit FP register. +}; +std::ostream& operator<<(std::ostream& os, const MoveType& kind); + +enum ConditionCode { + kCondEq, // equal + kCondNe, // not equal + kCondCs, // carry set + kCondCc, // carry clear + kCondUlt, // unsigned less than + kCondUge, // unsigned greater than or same + kCondMi, // minus + kCondPl, // plus, positive or zero + kCondVs, // overflow + kCondVc, // no overflow + kCondHi, // unsigned greater than + kCondLs, // unsigned lower or same + kCondGe, // signed greater than or equal + kCondLt, // signed less than + kCondGt, // signed greater than + kCondLe, // signed less than or equal + kCondAl, // always + kCondNv, // never +}; +std::ostream& operator<<(std::ostream& os, const ConditionCode& kind); + +// Target specific condition encodings +enum ArmConditionCode { + kArmCondEq = 0x0, // 0000 + kArmCondNe = 0x1, // 0001 + kArmCondCs = 0x2, // 0010 + kArmCondCc = 0x3, // 0011 + kArmCondMi = 0x4, // 0100 + kArmCondPl = 0x5, // 0101 + kArmCondVs = 0x6, // 0110 + kArmCondVc = 0x7, // 0111 + kArmCondHi = 0x8, // 1000 + kArmCondLs = 0x9, // 1001 + kArmCondGe = 0xa, // 1010 + kArmCondLt = 0xb, // 1011 + kArmCondGt = 0xc, // 1100 + kArmCondLe = 0xd, // 1101 + kArmCondAl = 0xe, // 1110 + kArmCondNv = 0xf, // 1111 +}; +std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind); + +enum X86ConditionCode { + kX86CondO = 0x0, // overflow + kX86CondNo = 0x1, // not overflow + + kX86CondB = 0x2, // below + kX86CondNae = kX86CondB, // not-above-equal + kX86CondC = kX86CondB, // carry + + kX86CondNb = 0x3, // not-below + kX86CondAe = kX86CondNb, // above-equal + kX86CondNc = kX86CondNb, // not-carry + + kX86CondZ = 0x4, // zero + kX86CondEq = kX86CondZ, // equal + + kX86CondNz = 0x5, // not-zero + kX86CondNe = kX86CondNz, // not-equal + + kX86CondBe = 0x6, // below-equal + kX86CondNa = kX86CondBe, // not-above + + kX86CondNbe = 0x7, // not-below-equal + kX86CondA = kX86CondNbe, // above + + kX86CondS = 0x8, // sign + kX86CondNs = 0x9, // not-sign + + kX86CondP = 0xa, // 8-bit parity even + kX86CondPE = kX86CondP, + + kX86CondNp = 0xb, // 8-bit parity odd + kX86CondPo = kX86CondNp, + + kX86CondL = 0xc, // less-than + kX86CondNge = kX86CondL, // not-greater-equal + + kX86CondNl = 0xd, // not-less-than + kX86CondGe = kX86CondNl, // not-greater-equal + + kX86CondLe = 0xe, // less-than-equal + kX86CondNg = kX86CondLe, // not-greater + + kX86CondNle = 0xf, // not-less-than + kX86CondG = kX86CondNle, // greater +}; +std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind); + +enum DividePattern { + DivideNone, + Divide3, + Divide5, + Divide7, +}; +std::ostream& operator<<(std::ostream& os, const DividePattern& pattern); + +/** + * @brief Memory barrier types (see "The JSR-133 Cookbook for Compiler Writers"). + * @details We define the combined barrier types that are actually required + * by the Java Memory Model, rather than using exactly the terminology from + * the JSR-133 cookbook. These should, in many cases, be replaced by acquire/release + * primitives. Note that the JSR-133 cookbook generally does not deal with + * store atomicity issues, and the recipes there are not always entirely sufficient. + * The current recipe is as follows: + * -# Use AnyStore ~= (LoadStore | StoreStore) ~= release barrier before volatile store. + * -# Use AnyAny barrier after volatile store. (StoreLoad is as expensive.) + * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrier after each volatile load. + * -# Use StoreStore barrier after all stores but before return from any constructor whose + * class has final fields. + * -# Use NTStoreStore to order non-temporal stores with respect to all later + * store-to-memory instructions. Only generated together with non-temporal stores. + */ +enum MemBarrierKind { + kAnyStore, + kLoadAny, + kStoreStore, + kAnyAny, + kNTStoreStore, + kLastBarrierKind = kNTStoreStore +}; +std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind); + +enum OpFeatureFlags { + kIsBranch = 0, + kNoOperand, + kIsUnaryOp, + kIsBinaryOp, + kIsTertiaryOp, + kIsQuadOp, + kIsQuinOp, + kIsSextupleOp, + kIsIT, + kIsMoveOp, + kMemLoad, + kMemStore, + kMemVolatile, + kMemScaledx0, + kMemScaledx2, + kMemScaledx4, + kPCRelFixup, // x86 FIXME: add NEEDS_FIXUP to instruction attributes. + kRegDef0, + kRegDef1, + kRegDef2, + kRegDefA, + kRegDefD, + kRegDefFPCSList0, + kRegDefFPCSList2, + kRegDefList0, + kRegDefList1, + kRegDefList2, + kRegDefLR, + kRegDefSP, + kRegUse0, + kRegUse1, + kRegUse2, + kRegUse3, + kRegUse4, + kRegUseA, + kRegUseC, + kRegUseD, + kRegUseB, + kRegUseFPCSList0, + kRegUseFPCSList2, + kRegUseList0, + kRegUseList1, + kRegUseLR, + kRegUsePC, + kRegUseSP, + kSetsCCodes, + kUsesCCodes, + kUseFpStack, + kUseHi, + kUseLo, + kDefHi, + kDefLo +}; +std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& rhs); + +enum SelectInstructionKind { + kSelectNone, + kSelectConst, + kSelectMove, + kSelectGoto +}; +std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind); + +// LIR fixup kinds for Arm and X86. +enum FixupKind { + kFixupNone, + kFixupLabel, // For labels we just adjust the offset. + kFixupLoad, // Mostly for immediates. + kFixupVLoad, // FP load which *may* be pc-relative. + kFixupCBxZ, // Cbz, Cbnz. + kFixupTBxZ, // Tbz, Tbnz. + kFixupCondBranch, // Conditional branch + kFixupT1Branch, // Thumb1 Unconditional branch + kFixupT2Branch, // Thumb2 Unconditional branch + kFixupBlx1, // Blx1 (start of Blx1/Blx2 pair). + kFixupBl1, // Bl1 (start of Bl1/Bl2 pair). + kFixupAdr, // Adr. + kFixupMovImmLST, // kThumb2MovImm16LST. + kFixupMovImmHST, // kThumb2MovImm16HST. + kFixupAlign4, // Align to 4-byte boundary. + kFixupA53Erratum835769, // Cortex A53 Erratum 835769. + kFixupSwitchTable, // X86_64 packed switch table. +}; +std::ostream& operator<<(std::ostream& os, const FixupKind& kind); + +enum VolatileKind { + kNotVolatile, // Load/Store is not volatile + kVolatile // Load/Store is volatile +}; +std::ostream& operator<<(std::ostream& os, const VolatileKind& kind); + +enum WideKind { + kNotWide, // Non-wide view + kWide, // Wide view + kRef // Ref width +}; +std::ostream& operator<<(std::ostream& os, const WideKind& kind); + +} // namespace art + +#endif // ART_COMPILER_DEX_COMPILER_ENUMS_H_ diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc new file mode 100644 index 000000000..3ce786e00 --- /dev/null +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -0,0 +1,375 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_to_dex_compiler.h" + +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "base/logging.h" +#include "base/mutex.h" +#include "compiled_method.h" +#include "dex_file-inl.h" +#include "dex_instruction-inl.h" +#include "driver/compiler_driver.h" +#include "driver/dex_compilation_unit.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache.h" +#include "thread-inl.h" + +namespace art { +namespace optimizer { + +// Controls quickening activation. +const bool kEnableQuickening = true; +// Control check-cast elision. +const bool kEnableCheckCastEllision = true; + +struct QuickenedInfo { + QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {} + + uint32_t dex_pc; + uint16_t dex_member_index; +}; + +class DexCompiler { + public: + DexCompiler(art::CompilerDriver& compiler, + const DexCompilationUnit& unit, + DexToDexCompilationLevel dex_to_dex_compilation_level) + : driver_(compiler), + unit_(unit), + dex_to_dex_compilation_level_(dex_to_dex_compilation_level) {} + + ~DexCompiler() {} + + void Compile(); + + const std::vector& GetQuickenedInfo() const { + return quickened_info_; + } + + private: + const DexFile& GetDexFile() const { + return *unit_.GetDexFile(); + } + + bool PerformOptimizations() const { + return dex_to_dex_compilation_level_ >= DexToDexCompilationLevel::kOptimize; + } + + // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where + // a barrier is required. + void CompileReturnVoid(Instruction* inst, uint32_t dex_pc); + + // Compiles a CHECK-CAST into 2 NOP instructions if it is known to be safe. In + // this case, returns the second NOP instruction pointer. Otherwise, returns + // the given "inst". + Instruction* CompileCheckCast(Instruction* inst, uint32_t dex_pc); + + // Compiles a field access into a quick field access. + // The field index is replaced by an offset within an Object where we can read + // from / write to this field. Therefore, this does not involve any resolution + // at runtime. + // Since the field index is encoded with 16 bits, we can replace it only if the + // field offset can be encoded with 16 bits too. + void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc, + Instruction::Code new_opcode, bool is_put); + + // Compiles a virtual method invocation into a quick virtual method invocation. + // The method index is replaced by the vtable index where the corresponding + // AbstractMethod can be found. Therefore, this does not involve any resolution + // at runtime. + // Since the method index is encoded with 16 bits, we can replace it only if the + // vtable index can be encoded with 16 bits too. + void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, + Instruction::Code new_opcode, bool is_range); + + CompilerDriver& driver_; + const DexCompilationUnit& unit_; + const DexToDexCompilationLevel dex_to_dex_compilation_level_; + + // Filled by the compiler when quickening, in order to encode that information + // in the .oat file. The runtime will use that information to get to the original + // opcodes. + std::vector quickened_info_; + + DISALLOW_COPY_AND_ASSIGN(DexCompiler); +}; + +void DexCompiler::Compile() { + DCHECK_GE(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kRequired); + const DexFile::CodeItem* code_item = unit_.GetCodeItem(); + const uint16_t* insns = code_item->insns_; + const uint32_t insns_size = code_item->insns_size_in_code_units_; + Instruction* inst = const_cast(Instruction::At(insns)); + + for (uint32_t dex_pc = 0; dex_pc < insns_size; + inst = const_cast(inst->Next()), dex_pc = inst->GetDexPc(insns)) { + switch (inst->Opcode()) { + case Instruction::RETURN_VOID: + CompileReturnVoid(inst, dex_pc); + break; + + case Instruction::CHECK_CAST: + inst = CompileCheckCast(inst, dex_pc); + break; + + case Instruction::IGET: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_QUICK, false); + break; + + case Instruction::IGET_WIDE: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE_QUICK, false); + break; + + case Instruction::IGET_OBJECT: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT_QUICK, false); + break; + + case Instruction::IGET_BOOLEAN: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BOOLEAN_QUICK, false); + break; + + case Instruction::IGET_BYTE: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BYTE_QUICK, false); + break; + + case Instruction::IGET_CHAR: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_CHAR_QUICK, false); + break; + + case Instruction::IGET_SHORT: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_SHORT_QUICK, false); + break; + + case Instruction::IPUT: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true); + break; + + case Instruction::IPUT_BOOLEAN: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN_QUICK, true); + break; + + case Instruction::IPUT_BYTE: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE_QUICK, true); + break; + + case Instruction::IPUT_CHAR: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR_QUICK, true); + break; + + case Instruction::IPUT_SHORT: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT_QUICK, true); + break; + + case Instruction::IPUT_WIDE: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_WIDE_QUICK, true); + break; + + case Instruction::IPUT_OBJECT: + CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_OBJECT_QUICK, true); + break; + + case Instruction::INVOKE_VIRTUAL: + CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_QUICK, false); + break; + + case Instruction::INVOKE_VIRTUAL_RANGE: + CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_RANGE_QUICK, true); + break; + + default: + // Nothing to do. + break; + } + } +} + +void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) { + DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID); + if (unit_.IsConstructor()) { + // Are we compiling a non clinit constructor which needs a barrier ? + if (!unit_.IsStatic() && + driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(), + unit_.GetClassDefIndex())) { + return; + } + } + // Replace RETURN_VOID by RETURN_VOID_NO_BARRIER. + VLOG(compiler) << "Replacing " << Instruction::Name(inst->Opcode()) + << " by " << Instruction::Name(Instruction::RETURN_VOID_NO_BARRIER) + << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " + << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true); + inst->SetOpcode(Instruction::RETURN_VOID_NO_BARRIER); +} + +Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) { + if (!kEnableCheckCastEllision || !PerformOptimizations()) { + return inst; + } + if (!driver_.IsSafeCast(&unit_, dex_pc)) { + return inst; + } + // Ok, this is a safe cast. Since the "check-cast" instruction size is 2 code + // units and a "nop" instruction size is 1 code unit, we need to replace it by + // 2 consecutive NOP instructions. + // Because the caller loops over instructions by calling Instruction::Next onto + // the current instruction, we need to return the 2nd NOP instruction. Indeed, + // its next instruction is the former check-cast's next instruction. + VLOG(compiler) << "Removing " << Instruction::Name(inst->Opcode()) + << " by replacing it with 2 NOPs at dex pc " + << StringPrintf("0x%x", dex_pc) << " in method " + << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true); + // We are modifying 4 consecutive bytes. + inst->SetOpcode(Instruction::NOP); + inst->SetVRegA_10x(0u); // keep compliant with verifier. + // Get to next instruction which is the second half of check-cast and replace + // it by a NOP. + inst = const_cast(inst->Next()); + inst->SetOpcode(Instruction::NOP); + inst->SetVRegA_10x(0u); // keep compliant with verifier. + return inst; +} + +void DexCompiler::CompileInstanceFieldAccess(Instruction* inst, + uint32_t dex_pc, + Instruction::Code new_opcode, + bool is_put) { + if (!kEnableQuickening || !PerformOptimizations()) { + return; + } + uint32_t field_idx = inst->VRegC_22c(); + MemberOffset field_offset(0u); + bool is_volatile; + bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put, + &field_offset, &is_volatile); + if (fast_path && !is_volatile && IsUint<16>(field_offset.Int32Value())) { + VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode()) + << " to " << Instruction::Name(new_opcode) + << " by replacing field index " << field_idx + << " by field offset " << field_offset.Int32Value() + << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " + << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true); + // We are modifying 4 consecutive bytes. + inst->SetOpcode(new_opcode); + // Replace field index by field offset. + inst->SetVRegC_22c(static_cast(field_offset.Int32Value())); + quickened_info_.push_back(QuickenedInfo(dex_pc, field_idx)); + } +} + +void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, + Instruction::Code new_opcode, bool is_range) { + if (!kEnableQuickening || !PerformOptimizations()) { + return; + } + uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c(); + MethodReference target_method(&GetDexFile(), method_idx); + InvokeType invoke_type = kVirtual; + InvokeType original_invoke_type = invoke_type; + int vtable_idx; + uintptr_t direct_code; + uintptr_t direct_method; + // TODO: support devirtualization. + const bool kEnableDevirtualization = false; + bool fast_path = driver_.ComputeInvokeInfo(&unit_, dex_pc, + false, kEnableDevirtualization, + &invoke_type, + &target_method, &vtable_idx, + &direct_code, &direct_method); + if (fast_path && original_invoke_type == invoke_type) { + if (vtable_idx >= 0 && IsUint<16>(vtable_idx)) { + VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode()) + << "(" << PrettyMethod(method_idx, GetDexFile(), true) << ")" + << " to " << Instruction::Name(new_opcode) + << " by replacing method index " << method_idx + << " by vtable index " << vtable_idx + << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " + << PrettyMethod(unit_.GetDexMethodIndex(), GetDexFile(), true); + // We are modifying 4 consecutive bytes. + inst->SetOpcode(new_opcode); + // Replace method index by vtable index. + if (is_range) { + inst->SetVRegB_3rc(static_cast(vtable_idx)); + } else { + inst->SetVRegB_35c(static_cast(vtable_idx)); + } + quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx)); + } + } +} + +CompiledMethod* ArtCompileDEX( + CompilerDriver* driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type ATTRIBUTE_UNUSED, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + DexToDexCompilationLevel dex_to_dex_compilation_level) { + DCHECK(driver != nullptr); + if (dex_to_dex_compilation_level != DexToDexCompilationLevel::kDontDexToDexCompile) { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); + art::DexCompilationUnit unit( + class_loader, + class_linker, + dex_file, + code_item, + class_def_idx, + method_idx, + access_flags, + driver->GetVerifiedMethod(&dex_file, method_idx), + hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file))); + art::optimizer::DexCompiler dex_compiler(*driver, unit, dex_to_dex_compilation_level); + dex_compiler.Compile(); + if (dex_compiler.GetQuickenedInfo().empty()) { + // No need to create a CompiledMethod if there are no quickened opcodes. + return nullptr; + } + + // Create a `CompiledMethod`, with the quickened information in the vmap table. + Leb128EncodingVector<> builder; + for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) { + builder.PushBackUnsigned(info.dex_pc); + builder.PushBackUnsigned(info.dex_member_index); + } + InstructionSet instruction_set = driver->GetInstructionSet(); + if (instruction_set == kThumb2) { + // Don't use the thumb2 instruction set to avoid the one off code delta. + instruction_set = kArm; + } + return CompiledMethod::SwapAllocCompiledMethod( + driver, + instruction_set, + ArrayRef(), // no code + 0, + 0, + 0, + ArrayRef(), // src_mapping_table + ArrayRef(builder.GetData()), // vmap_table + ArrayRef(), // cfi data + ArrayRef()); + } + return nullptr; +} + +} // namespace optimizer + +} // namespace art diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h new file mode 100644 index 000000000..3fad6d4c9 --- /dev/null +++ b/compiler/dex/dex_to_dex_compiler.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ +#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ + +#include "jni.h" + +#include "dex_file.h" +#include "invoke_type.h" + +namespace art { + +class CompiledMethod; +class CompilerDriver; + +namespace optimizer { + +enum class DexToDexCompilationLevel { + kDontDexToDexCompile, // Only meaning wrt image time interpretation. + kRequired, // Dex-to-dex compilation required for correctness. + kOptimize // Perform required transformation and peep-hole optimizations. +}; +std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs); + +CompiledMethod* ArtCompileDEX(CompilerDriver* driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + DexToDexCompilationLevel dex_to_dex_compilation_level); + +} // namespace optimizer + +} // namespace art + +#endif // ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc new file mode 100644 index 000000000..4a98342bf --- /dev/null +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -0,0 +1,863 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_file_method_inliner.h" + +#include + +#include "base/logging.h" +#include "base/macros.h" +#include "base/mutex-inl.h" +#include "driver/compiler_driver.h" +#include "thread-inl.h" +#include "dex_instruction-inl.h" +#include "driver/dex_compilation_unit.h" +#include "verifier/method_verifier-inl.h" + +namespace art { + +namespace { // anonymous namespace + +static constexpr bool kIntrinsicIsStatic[] = { + true, // kIntrinsicDoubleCvt + true, // kIntrinsicFloatCvt + true, // kIntrinsicFloat2Int + true, // kIntrinsicDouble2Long + true, // kIntrinsicFloatIsInfinite + true, // kIntrinsicDoubleIsInfinite + true, // kIntrinsicFloatIsNaN + true, // kIntrinsicDoubleIsNaN + true, // kIntrinsicReverseBits + true, // kIntrinsicReverseBytes + true, // kIntrinsicBitCount + true, // kIntrinsicCompare, + true, // kIntrinsicHighestOneBit + true, // kIntrinsicLowestOneBit + true, // kIntrinsicNumberOfLeadingZeros + true, // kIntrinsicNumberOfTrailingZeros + true, // kIntrinsicRotateRight + true, // kIntrinsicRotateLeft + true, // kIntrinsicSignum + true, // kIntrinsicAbsInt + true, // kIntrinsicAbsLong + true, // kIntrinsicAbsFloat + true, // kIntrinsicAbsDouble + true, // kIntrinsicMinMaxInt + true, // kIntrinsicMinMaxLong + true, // kIntrinsicMinMaxFloat + true, // kIntrinsicMinMaxDouble + true, // kIntrinsicCos + true, // kIntrinsicSin + true, // kIntrinsicAcos + true, // kIntrinsicAsin + true, // kIntrinsicAtan + true, // kIntrinsicAtan2 + true, // kIntrinsicCbrt + true, // kIntrinsicCosh + true, // kIntrinsicExp + true, // kIntrinsicExpm1 + true, // kIntrinsicHypot + true, // kIntrinsicLog + true, // kIntrinsicLog10 + true, // kIntrinsicNextAfter + true, // kIntrinsicSinh + true, // kIntrinsicTan + true, // kIntrinsicTanh + true, // kIntrinsicSqrt + true, // kIntrinsicCeil + true, // kIntrinsicFloor + true, // kIntrinsicRint + true, // kIntrinsicRoundFloat + true, // kIntrinsicRoundDouble + false, // kIntrinsicReferenceGetReferent + false, // kIntrinsicCharAt + false, // kIntrinsicCompareTo + false, // kIntrinsicEquals + false, // kIntrinsicGetCharsNoCheck + false, // kIntrinsicIsEmptyOrLength + false, // kIntrinsicIndexOf + true, // kIntrinsicNewStringFromBytes + true, // kIntrinsicNewStringFromChars + true, // kIntrinsicNewStringFromString + true, // kIntrinsicCurrentThread + true, // kIntrinsicPeek + true, // kIntrinsicPoke + false, // kIntrinsicCas + false, // kIntrinsicUnsafeGet + false, // kIntrinsicUnsafePut + false, // kIntrinsicUnsafeGetAndAddInt, + false, // kIntrinsicUnsafeGetAndAddLong, + false, // kIntrinsicUnsafeGetAndSetInt, + false, // kIntrinsicUnsafeGetAndSetLong, + false, // kIntrinsicUnsafeGetAndSetObject, + false, // kIntrinsicUnsafeLoadFence, + false, // kIntrinsicUnsafeStoreFence, + false, // kIntrinsicUnsafeFullFence, + true, // kIntrinsicSystemArrayCopyCharArray + true, // kIntrinsicSystemArrayCopy +}; +static_assert(arraysize(kIntrinsicIsStatic) == kInlineOpNop, + "arraysize of kIntrinsicIsStatic unexpected"); +static_assert(kIntrinsicIsStatic[kIntrinsicDoubleCvt], "DoubleCvt must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicFloatCvt], "FloatCvt must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicFloat2Int], "Float2Int must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicDouble2Long], "Double2Long must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicFloatIsInfinite], "FloatIsInfinite must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicDoubleIsInfinite], "DoubleIsInfinite must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicFloatIsNaN], "FloatIsNaN must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicDoubleIsNaN], "DoubleIsNaN must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicReverseBits], "ReverseBits must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicReverseBytes], "ReverseBytes must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicBitCount], "BitCount must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicCompare], "Compare must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicHighestOneBit], "HighestOneBit must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicLowestOneBit], "LowestOneBit must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfLeadingZeros], + "NumberOfLeadingZeros must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNumberOfTrailingZeros], + "NumberOfTrailingZeros must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicRotateRight], "RotateRight must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicRotateLeft], "RotateLeft must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicSignum], "Signum must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAbsInt], "AbsInt must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAbsLong], "AbsLong must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAbsFloat], "AbsFloat must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAbsDouble], "AbsDouble must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxInt], "MinMaxInt must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxLong], "MinMaxLong must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxFloat], "MinMaxFloat must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicMinMaxDouble], "MinMaxDouble must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicCos], "Cos must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicSin], "Sin must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAcos], "Acos must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAsin], "Asin must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAtan], "Atan must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicAtan2], "Atan2 must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicCbrt], "Cbrt must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicCosh], "Cosh must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicExp], "Exp must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicExpm1], "Expm1 must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicHypot], "Hypot must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicLog], "Log must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicLog10], "Log10 must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNextAfter], "NextAfter must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicSinh], "Sinh must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicTan], "Tan must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicTanh], "Tanh must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicSqrt], "Sqrt must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicCeil], "Ceil must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicFloor], "Floor must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicRint], "Rint must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicRoundFloat], "RoundFloat must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicRoundDouble], "RoundDouble must be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicReferenceGetReferent], "Get must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicCharAt], "CharAt must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicCompareTo], "CompareTo must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicEquals], "String equals must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicGetCharsNoCheck], "GetCharsNoCheck must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], "IsEmptyOrLength must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicIndexOf], "IndexOf must not be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromBytes], + "NewStringFromBytes must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromChars], + "NewStringFromChars must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicNewStringFromString], + "NewStringFromString must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicCurrentThread], "CurrentThread must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicPeek], "Peek must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicCas], "Cas must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndAddInt], "UnsafeGetAndAddInt must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndAddLong], "UnsafeGetAndAddLong must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndSetInt], "UnsafeGetAndSetInt must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndSetLong], "UnsafeGetAndSetLong must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndSetObject], "UnsafeGetAndSetObject must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeLoadFence], "UnsafeLoadFence must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeStoreFence], "UnsafeStoreFence must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeFullFence], "UnsafeFullFence must not be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray], + "SystemArrayCopyCharArray must be static"); +static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopy], + "SystemArrayCopy must be static"); + +} // anonymous namespace + +const uint32_t DexFileMethodInliner::kIndexUnresolved; +const char* const DexFileMethodInliner::kClassCacheNames[] = { + "Z", // kClassCacheBoolean + "B", // kClassCacheByte + "C", // kClassCacheChar + "S", // kClassCacheShort + "I", // kClassCacheInt + "J", // kClassCacheLong + "F", // kClassCacheFloat + "D", // kClassCacheDouble + "V", // kClassCacheVoid + "[B", // kClassCacheJavaLangByteArray + "[C", // kClassCacheJavaLangCharArray + "[I", // kClassCacheJavaLangIntArray + "Ljava/lang/Object;", // kClassCacheJavaLangObject + "Ljava/lang/ref/Reference;", // kClassCacheJavaLangRefReference + "Ljava/lang/String;", // kClassCacheJavaLangString + "Ljava/lang/StringBuffer;", // kClassCacheJavaLangStringBuffer + "Ljava/lang/StringBuilder;", // kClassCacheJavaLangStringBuilder + "Ljava/lang/StringFactory;", // kClassCacheJavaLangStringFactory + "Ljava/lang/Double;", // kClassCacheJavaLangDouble + "Ljava/lang/Float;", // kClassCacheJavaLangFloat + "Ljava/lang/Integer;", // kClassCacheJavaLangInteger + "Ljava/lang/Long;", // kClassCacheJavaLangLong + "Ljava/lang/Short;", // kClassCacheJavaLangShort + "Ljava/lang/Math;", // kClassCacheJavaLangMath + "Ljava/lang/StrictMath;", // kClassCacheJavaLangStrictMath + "Ljava/lang/Thread;", // kClassCacheJavaLangThread + "Ljava/nio/charset/Charset;", // kClassCacheJavaNioCharsetCharset + "Llibcore/io/Memory;", // kClassCacheLibcoreIoMemory + "Lsun/misc/Unsafe;", // kClassCacheSunMiscUnsafe + "Ljava/lang/System;", // kClassCacheJavaLangSystem +}; + +const char* const DexFileMethodInliner::kNameCacheNames[] = { + "reverse", // kNameCacheReverse + "reverseBytes", // kNameCacheReverseBytes + "doubleToRawLongBits", // kNameCacheDoubleToRawLongBits + "longBitsToDouble", // kNameCacheLongBitsToDouble + "floatToRawIntBits", // kNameCacheFloatToRawIntBits + "intBitsToFloat", // kNameCacheIntBitsToFloat + "abs", // kNameCacheAbs + "max", // kNameCacheMax + "min", // kNameCacheMin + "cos", // kNameCacheCos + "sin", // kNameCacheSin + "acos", // kNameCacheAcos + "asin", // kNameCacheAsin + "atan", // kNameCacheAtan + "atan2", // kNameCacheAtan2 + "cbrt", // kNameCacheCbrt + "cosh", // kNameCacheCosh + "exp", // kNameCacheExp + "expm1", // kNameCacheExpm1 + "hypot", // kNameCacheHypot + "log", // kNameCacheLog + "log10", // kNameCacheLog10 + "nextAfter", // kNameCacheNextAfter + "sinh", // kNameCacheSinh + "tan", // kNameCacheTan + "tanh", // kNameCacheTanh + "sqrt", // kNameCacheSqrt + "ceil", // kNameCacheCeil + "floor", // kNameCacheFloor + "rint", // kNameCacheRint + "round", // kNameCacheRound + "getReferent", // kNameCacheReferenceGet + "charAt", // kNameCacheCharAt + "compareTo", // kNameCacheCompareTo + "equals", // kNameCacheEquals + "getCharsNoCheck", // kNameCacheGetCharsNoCheck + "isEmpty", // kNameCacheIsEmpty + "floatToIntBits", // kNameCacheFloatToIntBits + "doubleToLongBits", // kNameCacheDoubleToLongBits + "isInfinite", // kNameCacheIsInfinite + "isNaN", // kNameCacheIsNaN + "indexOf", // kNameCacheIndexOf + "length", // kNameCacheLength + "", // kNameCacheInit + "newStringFromBytes", // kNameCacheNewStringFromBytes + "newStringFromChars", // kNameCacheNewStringFromChars + "newStringFromString", // kNameCacheNewStringFromString + "currentThread", // kNameCacheCurrentThread + "peekByte", // kNameCachePeekByte + "peekIntNative", // kNameCachePeekIntNative + "peekLongNative", // kNameCachePeekLongNative + "peekShortNative", // kNameCachePeekShortNative + "pokeByte", // kNameCachePokeByte + "pokeIntNative", // kNameCachePokeIntNative + "pokeLongNative", // kNameCachePokeLongNative + "pokeShortNative", // kNameCachePokeShortNative + "compareAndSwapInt", // kNameCacheCompareAndSwapInt + "compareAndSwapLong", // kNameCacheCompareAndSwapLong + "compareAndSwapObject", // kNameCacheCompareAndSwapObject + "getInt", // kNameCacheGetInt + "getIntVolatile", // kNameCacheGetIntVolatile + "putInt", // kNameCachePutInt + "putIntVolatile", // kNameCachePutIntVolatile + "putOrderedInt", // kNameCachePutOrderedInt + "getLong", // kNameCacheGetLong + "getLongVolatile", // kNameCacheGetLongVolatile + "putLong", // kNameCachePutLong + "putLongVolatile", // kNameCachePutLongVolatile + "putOrderedLong", // kNameCachePutOrderedLong + "getObject", // kNameCacheGetObject + "getObjectVolatile", // kNameCacheGetObjectVolatile + "putObject", // kNameCachePutObject + "putObjectVolatile", // kNameCachePutObjectVolatile + "putOrderedObject", // kNameCachePutOrderedObject + "getAndAddInt", // kNameCacheGetAndAddInt, + "getAndAddLong", // kNameCacheGetAndAddLong, + "getAndSetInt", // kNameCacheGetAndSetInt, + "getAndSetLong", // kNameCacheGetAndSetLong, + "getAndSetObject", // kNameCacheGetAndSetObject, + "loadFence", // kNameCacheLoadFence, + "storeFence", // kNameCacheStoreFence, + "fullFence", // kNameCacheFullFence, + "arraycopy", // kNameCacheArrayCopy + "bitCount", // kNameCacheBitCount + "compare", // kNameCacheCompare + "highestOneBit", // kNameCacheHighestOneBit + "lowestOneBit", // kNameCacheLowestOneBit + "numberOfLeadingZeros", // kNameCacheNumberOfLeadingZeros + "numberOfTrailingZeros", // kNameCacheNumberOfTrailingZeros + "rotateRight", // kNameCacheRotateRight + "rotateLeft", // kNameCacheRotateLeft + "signum", // kNameCacheSignum +}; + +const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = { + // kProtoCacheI_I + { kClassCacheInt, 1, { kClassCacheInt } }, + // kProtoCacheJ_J + { kClassCacheLong, 1, { kClassCacheLong } }, + // kProtoCacheS_S + { kClassCacheShort, 1, { kClassCacheShort } }, + // kProtoCacheD_D + { kClassCacheDouble, 1, { kClassCacheDouble } }, + // kProtoCacheDD_D + { kClassCacheDouble, 2, { kClassCacheDouble, kClassCacheDouble } }, + // kProtoCacheF_F + { kClassCacheFloat, 1, { kClassCacheFloat } }, + // kProtoCacheFF_F + { kClassCacheFloat, 2, { kClassCacheFloat, kClassCacheFloat } }, + // kProtoCacheD_J + { kClassCacheLong, 1, { kClassCacheDouble } }, + // kProtoCacheD_Z + { kClassCacheBoolean, 1, { kClassCacheDouble } }, + // kProtoCacheJ_D + { kClassCacheDouble, 1, { kClassCacheLong } }, + // kProtoCacheF_I + { kClassCacheInt, 1, { kClassCacheFloat } }, + // kProtoCacheF_Z + { kClassCacheBoolean, 1, { kClassCacheFloat } }, + // kProtoCacheI_F + { kClassCacheFloat, 1, { kClassCacheInt } }, + // kProtoCacheII_I + { kClassCacheInt, 2, { kClassCacheInt, kClassCacheInt } }, + // kProtoCacheI_C + { kClassCacheChar, 1, { kClassCacheInt } }, + // kProtoCacheString_I + { kClassCacheInt, 1, { kClassCacheJavaLangString } }, + // kProtoCache_Z + { kClassCacheBoolean, 0, { } }, + // kProtoCache_I + { kClassCacheInt, 0, { } }, + // kProtoCache_Object + { kClassCacheJavaLangObject, 0, { } }, + // kProtoCache_Thread + { kClassCacheJavaLangThread, 0, { } }, + // kProtoCacheJ_B + { kClassCacheByte, 1, { kClassCacheLong } }, + // kProtoCacheJ_I + { kClassCacheInt, 1, { kClassCacheLong } }, + // kProtoCacheJ_S + { kClassCacheShort, 1, { kClassCacheLong } }, + // kProtoCacheJB_V + { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheByte } }, + // kProtoCacheJI_V + { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheInt } }, + // kProtoCacheJJ_J + { kClassCacheLong, 2, { kClassCacheLong, kClassCacheLong } }, + // kProtoCacheJJ_I + { kClassCacheInt, 2, { kClassCacheLong, kClassCacheLong } }, + // kProtoCacheJJ_V + { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheLong } }, + // kProtoCacheJS_V + { kClassCacheVoid, 2, { kClassCacheLong, kClassCacheShort } }, + // kProtoCacheObject_Z + { kClassCacheBoolean, 1, { kClassCacheJavaLangObject } }, + // kProtoCacheJI_J + { kClassCacheLong, 2, { kClassCacheLong, kClassCacheInt } }, + // kProtoCacheObjectJII_Z + { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong, + kClassCacheInt, kClassCacheInt } }, + // kProtoCacheObjectJJJ_Z + { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong, + kClassCacheLong, kClassCacheLong } }, + // kProtoCacheObjectJObjectObject_Z + { kClassCacheBoolean, 4, { kClassCacheJavaLangObject, kClassCacheLong, + kClassCacheJavaLangObject, kClassCacheJavaLangObject } }, + // kProtoCacheObjectJ_I + { kClassCacheInt, 2, { kClassCacheJavaLangObject, kClassCacheLong } }, + // kProtoCacheObjectJI_I + { kClassCacheInt, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheInt } }, + // kProtoCacheObjectJI_V + { kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheInt } }, + // kProtoCacheObjectJ_J + { kClassCacheLong, 2, { kClassCacheJavaLangObject, kClassCacheLong } }, + // kProtoCacheObjectJJ_J + { kClassCacheLong, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheLong } }, + // kProtoCacheObjectJJ_V + { kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheLong } }, + // kProtoCacheObjectJ_Object + { kClassCacheJavaLangObject, 2, { kClassCacheJavaLangObject, kClassCacheLong } }, + // kProtoCacheObjectJObject_V + { kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong, + kClassCacheJavaLangObject } }, + // kProtoCacheObjectJObject_Object + { kClassCacheJavaLangObject, 3, { kClassCacheJavaLangObject, kClassCacheLong, + kClassCacheJavaLangObject } }, + // kProtoCacheCharArrayICharArrayII_V + { kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt, + kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt} }, + // kProtoCacheObjectIObjectII_V + { kClassCacheVoid, 5, {kClassCacheJavaLangObject, kClassCacheInt, + kClassCacheJavaLangObject, kClassCacheInt, kClassCacheInt} }, + // kProtoCacheIICharArrayI_V + { kClassCacheVoid, 4, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray, + kClassCacheInt } }, + // kProtoCacheByteArrayIII_String + { kClassCacheJavaLangString, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt, + kClassCacheInt } }, + // kProtoCacheIICharArray_String + { kClassCacheJavaLangString, 3, { kClassCacheInt, kClassCacheInt, + kClassCacheJavaLangCharArray } }, + // kProtoCacheString_String + { kClassCacheJavaLangString, 1, { kClassCacheJavaLangString } }, + // kProtoCache_V + { kClassCacheVoid, 0, { } }, + // kProtoCacheByteArray_V + { kClassCacheVoid, 1, { kClassCacheJavaLangByteArray } }, + // kProtoCacheByteArrayI_V + { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheInt } }, + // kProtoCacheByteArrayII_V + { kClassCacheVoid, 3, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt } }, + // kProtoCacheByteArrayIII_V + { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt, + kClassCacheInt } }, + // kProtoCacheByteArrayIIString_V + { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt, + kClassCacheJavaLangString } }, + // kProtoCacheByteArrayString_V + { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheJavaLangString } }, + // kProtoCacheByteArrayIICharset_V + { kClassCacheVoid, 4, { kClassCacheJavaLangByteArray, kClassCacheInt, kClassCacheInt, + kClassCacheJavaNioCharsetCharset } }, + // kProtoCacheByteArrayCharset_V + { kClassCacheVoid, 2, { kClassCacheJavaLangByteArray, kClassCacheJavaNioCharsetCharset } }, + // kProtoCacheCharArray_V + { kClassCacheVoid, 1, { kClassCacheJavaLangCharArray } }, + // kProtoCacheCharArrayII_V + { kClassCacheVoid, 3, { kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt } }, + // kProtoCacheIICharArray_V + { kClassCacheVoid, 3, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray } }, + // kProtoCacheIntArrayII_V + { kClassCacheVoid, 3, { kClassCacheJavaLangIntArray, kClassCacheInt, kClassCacheInt } }, + // kProtoCacheString_V + { kClassCacheVoid, 1, { kClassCacheJavaLangString } }, + // kProtoCacheStringBuffer_V + { kClassCacheVoid, 1, { kClassCacheJavaLangStringBuffer } }, + // kProtoCacheStringBuilder_V + { kClassCacheVoid, 1, { kClassCacheJavaLangStringBuilder } }, +}; + +const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods[] = { +#define INTRINSIC(c, n, p, o, d) \ + { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, kInlineIntrinsic, { d } } } + + INTRINSIC(JavaLangDouble, DoubleToRawLongBits, D_J, kIntrinsicDoubleCvt, 0), + INTRINSIC(JavaLangDouble, LongBitsToDouble, J_D, kIntrinsicDoubleCvt, kIntrinsicFlagToFloatingPoint), + INTRINSIC(JavaLangFloat, FloatToRawIntBits, F_I, kIntrinsicFloatCvt, 0), + INTRINSIC(JavaLangFloat, IntBitsToFloat, I_F, kIntrinsicFloatCvt, kIntrinsicFlagToFloatingPoint), + + INTRINSIC(JavaLangFloat, FloatToIntBits, F_I, kIntrinsicFloat2Int, 0), + INTRINSIC(JavaLangDouble, DoubleToLongBits, D_J, kIntrinsicDouble2Long, 0), + + INTRINSIC(JavaLangFloat, IsInfinite, F_Z, kIntrinsicFloatIsInfinite, 0), + INTRINSIC(JavaLangDouble, IsInfinite, D_Z, kIntrinsicDoubleIsInfinite, 0), + INTRINSIC(JavaLangFloat, IsNaN, F_Z, kIntrinsicFloatIsNaN, 0), + INTRINSIC(JavaLangDouble, IsNaN, D_Z, kIntrinsicDoubleIsNaN, 0), + + INTRINSIC(JavaLangInteger, ReverseBytes, I_I, kIntrinsicReverseBytes, k32), + INTRINSIC(JavaLangLong, ReverseBytes, J_J, kIntrinsicReverseBytes, k64), + INTRINSIC(JavaLangShort, ReverseBytes, S_S, kIntrinsicReverseBytes, kSignedHalf), + INTRINSIC(JavaLangInteger, Reverse, I_I, kIntrinsicReverseBits, k32), + INTRINSIC(JavaLangLong, Reverse, J_J, kIntrinsicReverseBits, k64), + + INTRINSIC(JavaLangInteger, BitCount, I_I, kIntrinsicBitCount, k32), + INTRINSIC(JavaLangLong, BitCount, J_I, kIntrinsicBitCount, k64), + INTRINSIC(JavaLangInteger, Compare, II_I, kIntrinsicCompare, k32), + INTRINSIC(JavaLangLong, Compare, JJ_I, kIntrinsicCompare, k64), + INTRINSIC(JavaLangInteger, HighestOneBit, I_I, kIntrinsicHighestOneBit, k32), + INTRINSIC(JavaLangLong, HighestOneBit, J_J, kIntrinsicHighestOneBit, k64), + INTRINSIC(JavaLangInteger, LowestOneBit, I_I, kIntrinsicLowestOneBit, k32), + INTRINSIC(JavaLangLong, LowestOneBit, J_J, kIntrinsicLowestOneBit, k64), + INTRINSIC(JavaLangInteger, NumberOfLeadingZeros, I_I, kIntrinsicNumberOfLeadingZeros, k32), + INTRINSIC(JavaLangLong, NumberOfLeadingZeros, J_I, kIntrinsicNumberOfLeadingZeros, k64), + INTRINSIC(JavaLangInteger, NumberOfTrailingZeros, I_I, kIntrinsicNumberOfTrailingZeros, k32), + INTRINSIC(JavaLangLong, NumberOfTrailingZeros, J_I, kIntrinsicNumberOfTrailingZeros, k64), + INTRINSIC(JavaLangInteger, Signum, I_I, kIntrinsicSignum, k32), + INTRINSIC(JavaLangLong, Signum, J_I, kIntrinsicSignum, k64), + + INTRINSIC(JavaLangMath, Abs, I_I, kIntrinsicAbsInt, 0), + INTRINSIC(JavaLangStrictMath, Abs, I_I, kIntrinsicAbsInt, 0), + INTRINSIC(JavaLangMath, Abs, J_J, kIntrinsicAbsLong, 0), + INTRINSIC(JavaLangStrictMath, Abs, J_J, kIntrinsicAbsLong, 0), + INTRINSIC(JavaLangMath, Abs, F_F, kIntrinsicAbsFloat, 0), + INTRINSIC(JavaLangStrictMath, Abs, F_F, kIntrinsicAbsFloat, 0), + INTRINSIC(JavaLangMath, Abs, D_D, kIntrinsicAbsDouble, 0), + INTRINSIC(JavaLangStrictMath, Abs, D_D, kIntrinsicAbsDouble, 0), + INTRINSIC(JavaLangMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin), + INTRINSIC(JavaLangStrictMath, Min, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMin), + INTRINSIC(JavaLangMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax), + INTRINSIC(JavaLangStrictMath, Max, II_I, kIntrinsicMinMaxInt, kIntrinsicFlagMax), + INTRINSIC(JavaLangMath, Min, JJ_J, kIntrinsicMinMaxLong, kIntrinsicFlagMin), + INTRINSIC(JavaLangStrictMath, Min, JJ_J, kIntrinsicMinMaxLong, kIntrinsicFlagMin), + INTRINSIC(JavaLangMath, Max, JJ_J, kIntrinsicMinMaxLong, kIntrinsicFlagMax), + INTRINSIC(JavaLangStrictMath, Max, JJ_J, kIntrinsicMinMaxLong, kIntrinsicFlagMax), + INTRINSIC(JavaLangMath, Min, FF_F, kIntrinsicMinMaxFloat, kIntrinsicFlagMin), + INTRINSIC(JavaLangStrictMath, Min, FF_F, kIntrinsicMinMaxFloat, kIntrinsicFlagMin), + INTRINSIC(JavaLangMath, Max, FF_F, kIntrinsicMinMaxFloat, kIntrinsicFlagMax), + INTRINSIC(JavaLangStrictMath, Max, FF_F, kIntrinsicMinMaxFloat, kIntrinsicFlagMax), + INTRINSIC(JavaLangMath, Min, DD_D, kIntrinsicMinMaxDouble, kIntrinsicFlagMin), + INTRINSIC(JavaLangStrictMath, Min, DD_D, kIntrinsicMinMaxDouble, kIntrinsicFlagMin), + INTRINSIC(JavaLangMath, Max, DD_D, kIntrinsicMinMaxDouble, kIntrinsicFlagMax), + INTRINSIC(JavaLangStrictMath, Max, DD_D, kIntrinsicMinMaxDouble, kIntrinsicFlagMax), + + INTRINSIC(JavaLangMath, Cos, D_D, kIntrinsicCos, 0), + INTRINSIC(JavaLangMath, Sin, D_D, kIntrinsicSin, 0), + INTRINSIC(JavaLangMath, Acos, D_D, kIntrinsicAcos, 0), + INTRINSIC(JavaLangMath, Asin, D_D, kIntrinsicAsin, 0), + INTRINSIC(JavaLangMath, Atan, D_D, kIntrinsicAtan, 0), + INTRINSIC(JavaLangMath, Atan2, DD_D, kIntrinsicAtan2, 0), + INTRINSIC(JavaLangMath, Cbrt, D_D, kIntrinsicCbrt, 0), + INTRINSIC(JavaLangMath, Cosh, D_D, kIntrinsicCosh, 0), + INTRINSIC(JavaLangMath, Exp, D_D, kIntrinsicExp, 0), + INTRINSIC(JavaLangMath, Expm1, D_D, kIntrinsicExpm1, 0), + INTRINSIC(JavaLangMath, Hypot, DD_D, kIntrinsicHypot, 0), + INTRINSIC(JavaLangMath, Log, D_D, kIntrinsicLog, 0), + INTRINSIC(JavaLangMath, Log10, D_D, kIntrinsicLog10, 0), + INTRINSIC(JavaLangMath, NextAfter, DD_D, kIntrinsicNextAfter, 0), + INTRINSIC(JavaLangMath, Sinh, D_D, kIntrinsicSinh, 0), + INTRINSIC(JavaLangMath, Tan, D_D, kIntrinsicTan, 0), + INTRINSIC(JavaLangMath, Tanh, D_D, kIntrinsicTanh, 0), + INTRINSIC(JavaLangMath, Sqrt, D_D, kIntrinsicSqrt, 0), + INTRINSIC(JavaLangStrictMath, Sqrt, D_D, kIntrinsicSqrt, 0), + + INTRINSIC(JavaLangMath, Ceil, D_D, kIntrinsicCeil, 0), + INTRINSIC(JavaLangStrictMath, Ceil, D_D, kIntrinsicCeil, 0), + INTRINSIC(JavaLangMath, Floor, D_D, kIntrinsicFloor, 0), + INTRINSIC(JavaLangStrictMath, Floor, D_D, kIntrinsicFloor, 0), + INTRINSIC(JavaLangMath, Rint, D_D, kIntrinsicRint, 0), + INTRINSIC(JavaLangStrictMath, Rint, D_D, kIntrinsicRint, 0), + INTRINSIC(JavaLangMath, Round, F_I, kIntrinsicRoundFloat, 0), + INTRINSIC(JavaLangStrictMath, Round, F_I, kIntrinsicRoundFloat, 0), + INTRINSIC(JavaLangMath, Round, D_J, kIntrinsicRoundDouble, 0), + INTRINSIC(JavaLangStrictMath, Round, D_J, kIntrinsicRoundDouble, 0), + + INTRINSIC(JavaLangRefReference, ReferenceGetReferent, _Object, kIntrinsicReferenceGetReferent, 0), + + INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0), + INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0), + INTRINSIC(JavaLangString, Equals, Object_Z, kIntrinsicEquals, 0), + INTRINSIC(JavaLangString, GetCharsNoCheck, IICharArrayI_V, kIntrinsicGetCharsNoCheck, 0), + INTRINSIC(JavaLangString, IsEmpty, _Z, kIntrinsicIsEmptyOrLength, kIntrinsicFlagIsEmpty), + INTRINSIC(JavaLangString, IndexOf, II_I, kIntrinsicIndexOf, kIntrinsicFlagNone), + INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0), + INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength), + + INTRINSIC(JavaLangStringFactory, NewStringFromBytes, ByteArrayIII_String, + kIntrinsicNewStringFromBytes, kIntrinsicFlagNone), + INTRINSIC(JavaLangStringFactory, NewStringFromChars, IICharArray_String, + kIntrinsicNewStringFromChars, kIntrinsicFlagNone), + INTRINSIC(JavaLangStringFactory, NewStringFromString, String_String, + kIntrinsicNewStringFromString, kIntrinsicFlagNone), + + INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0), + + INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte), + INTRINSIC(LibcoreIoMemory, PeekIntNative, J_I, kIntrinsicPeek, k32), + INTRINSIC(LibcoreIoMemory, PeekLongNative, J_J, kIntrinsicPeek, k64), + INTRINSIC(LibcoreIoMemory, PeekShortNative, J_S, kIntrinsicPeek, kSignedHalf), + INTRINSIC(LibcoreIoMemory, PokeByte, JB_V, kIntrinsicPoke, kSignedByte), + INTRINSIC(LibcoreIoMemory, PokeIntNative, JI_V, kIntrinsicPoke, k32), + INTRINSIC(LibcoreIoMemory, PokeLongNative, JJ_V, kIntrinsicPoke, k64), + INTRINSIC(LibcoreIoMemory, PokeShortNative, JS_V, kIntrinsicPoke, kSignedHalf), + + INTRINSIC(SunMiscUnsafe, CompareAndSwapInt, ObjectJII_Z, kIntrinsicCas, + kIntrinsicFlagNone), + INTRINSIC(SunMiscUnsafe, CompareAndSwapLong, ObjectJJJ_Z, kIntrinsicCas, + kIntrinsicFlagIsLong), + INTRINSIC(SunMiscUnsafe, CompareAndSwapObject, ObjectJObjectObject_Z, kIntrinsicCas, + kIntrinsicFlagIsObject), + +#define UNSAFE_GET_PUT(type, code, type_flags) \ + INTRINSIC(SunMiscUnsafe, Get ## type, ObjectJ_ ## code, kIntrinsicUnsafeGet, \ + type_flags), \ + INTRINSIC(SunMiscUnsafe, Get ## type ## Volatile, ObjectJ_ ## code, kIntrinsicUnsafeGet, \ + type_flags | kIntrinsicFlagIsVolatile), \ + INTRINSIC(SunMiscUnsafe, Put ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \ + type_flags), \ + INTRINSIC(SunMiscUnsafe, Put ## type ## Volatile, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \ + type_flags | kIntrinsicFlagIsVolatile), \ + INTRINSIC(SunMiscUnsafe, PutOrdered ## type, ObjectJ ## code ## _V, kIntrinsicUnsafePut, \ + type_flags | kIntrinsicFlagIsOrdered) + + UNSAFE_GET_PUT(Int, I, kIntrinsicFlagNone), + UNSAFE_GET_PUT(Long, J, kIntrinsicFlagIsLong), + UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject), +#undef UNSAFE_GET_PUT + + // 1.8 + INTRINSIC(SunMiscUnsafe, GetAndAddInt, ObjectJI_I, kIntrinsicUnsafeGetAndAddInt, 0), + INTRINSIC(SunMiscUnsafe, GetAndAddLong, ObjectJJ_J, kIntrinsicUnsafeGetAndAddLong, 0), + INTRINSIC(SunMiscUnsafe, GetAndSetInt, ObjectJI_I, kIntrinsicUnsafeGetAndSetInt, 0), + INTRINSIC(SunMiscUnsafe, GetAndSetLong, ObjectJJ_J, kIntrinsicUnsafeGetAndSetLong, 0), + INTRINSIC(SunMiscUnsafe, GetAndSetObject, ObjectJObject_Object, kIntrinsicUnsafeGetAndSetObject, 0), + INTRINSIC(SunMiscUnsafe, LoadFence, _V, kIntrinsicUnsafeLoadFence, 0), + INTRINSIC(SunMiscUnsafe, StoreFence, _V, kIntrinsicUnsafeStoreFence, 0), + INTRINSIC(SunMiscUnsafe, FullFence, _V, kIntrinsicUnsafeFullFence, 0), + + INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray, + 0), + INTRINSIC(JavaLangSystem, ArrayCopy, ObjectIObjectII_V , kIntrinsicSystemArrayCopy, + 0), + + INTRINSIC(JavaLangInteger, RotateRight, II_I, kIntrinsicRotateRight, k32), + INTRINSIC(JavaLangLong, RotateRight, JI_J, kIntrinsicRotateRight, k64), + INTRINSIC(JavaLangInteger, RotateLeft, II_I, kIntrinsicRotateLeft, k32), + INTRINSIC(JavaLangLong, RotateLeft, JI_J, kIntrinsicRotateLeft, k64), + +#undef INTRINSIC + +#define SPECIAL(c, n, p, o, d) \ + { { kClassCache ## c, kNameCache ## n, kProtoCache ## p }, { o, kInlineSpecial, { d } } } + + SPECIAL(JavaLangString, Init, _V, kInlineStringInit, 0), + SPECIAL(JavaLangString, Init, ByteArray_V, kInlineStringInit, 1), + SPECIAL(JavaLangString, Init, ByteArrayI_V, kInlineStringInit, 2), + SPECIAL(JavaLangString, Init, ByteArrayII_V, kInlineStringInit, 3), + SPECIAL(JavaLangString, Init, ByteArrayIII_V, kInlineStringInit, 4), + SPECIAL(JavaLangString, Init, ByteArrayIIString_V, kInlineStringInit, 5), + SPECIAL(JavaLangString, Init, ByteArrayString_V, kInlineStringInit, 6), + SPECIAL(JavaLangString, Init, ByteArrayIICharset_V, kInlineStringInit, 7), + SPECIAL(JavaLangString, Init, ByteArrayCharset_V, kInlineStringInit, 8), + SPECIAL(JavaLangString, Init, CharArray_V, kInlineStringInit, 9), + SPECIAL(JavaLangString, Init, CharArrayII_V, kInlineStringInit, 10), + SPECIAL(JavaLangString, Init, IICharArray_V, kInlineStringInit, 11), + SPECIAL(JavaLangString, Init, IntArrayII_V, kInlineStringInit, 12), + SPECIAL(JavaLangString, Init, String_V, kInlineStringInit, 13), + SPECIAL(JavaLangString, Init, StringBuffer_V, kInlineStringInit, 14), + SPECIAL(JavaLangString, Init, StringBuilder_V, kInlineStringInit, 15), + +#undef SPECIAL +}; + +DexFileMethodInliner::DexFileMethodInliner() + : lock_("DexFileMethodInliner lock", kDexFileMethodInlinerLock), + dex_file_(nullptr) { + static_assert(kClassCacheFirst == 0, "kClassCacheFirst not 0"); + static_assert(arraysize(kClassCacheNames) == kClassCacheLast, + "bad arraysize for kClassCacheNames"); + static_assert(kNameCacheFirst == 0, "kNameCacheFirst not 0"); + static_assert(arraysize(kNameCacheNames) == kNameCacheLast, + "bad arraysize for kNameCacheNames"); + static_assert(kProtoCacheFirst == 0, "kProtoCacheFirst not 0"); + static_assert(arraysize(kProtoCacheDefs) == kProtoCacheLast, + "bad arraysize kProtoCacheNames"); +} + +DexFileMethodInliner::~DexFileMethodInliner() { +} + +bool DexFileMethodInliner::AnalyseMethodCode(verifier::MethodVerifier* verifier) { + InlineMethod method; + bool success = InlineMethodAnalyser::AnalyseMethodCode(verifier, &method); + return success && AddInlineMethod(verifier->GetMethodReference().dex_method_index, method); +} + +InlineMethodFlags DexFileMethodInliner::IsIntrinsicOrSpecial(uint32_t method_index) { + ReaderMutexLock mu(Thread::Current(), lock_); + auto it = inline_methods_.find(method_index); + if (it != inline_methods_.end()) { + DCHECK_NE(it->second.flags & (kInlineIntrinsic | kInlineSpecial), 0); + return it->second.flags; + } else { + return kNoInlineMethodFlags; + } +} + +bool DexFileMethodInliner::IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) { + ReaderMutexLock mu(Thread::Current(), lock_); + auto it = inline_methods_.find(method_index); + bool res = (it != inline_methods_.end() && (it->second.flags & kInlineIntrinsic) != 0); + if (res && intrinsic != nullptr) { + *intrinsic = it->second; + } + return res; +} + +bool DexFileMethodInliner::IsSpecial(uint32_t method_index) { + ReaderMutexLock mu(Thread::Current(), lock_); + auto it = inline_methods_.find(method_index); + return it != inline_methods_.end() && (it->second.flags & kInlineSpecial) != 0; +} + +uint32_t DexFileMethodInliner::FindClassIndex(const DexFile* dex_file, IndexCache* cache, + ClassCacheIndex index) { + uint32_t* class_index = &cache->class_indexes[index]; + if (*class_index != kIndexUnresolved) { + return *class_index; + } + + const DexFile::TypeId* type_id = dex_file->FindTypeId(kClassCacheNames[index]); + if (type_id == nullptr) { + *class_index = kIndexNotFound; + return *class_index; + } + *class_index = dex_file->GetIndexForTypeId(*type_id); + return *class_index; +} + +uint32_t DexFileMethodInliner::FindNameIndex(const DexFile* dex_file, IndexCache* cache, + NameCacheIndex index) { + uint32_t* name_index = &cache->name_indexes[index]; + if (*name_index != kIndexUnresolved) { + return *name_index; + } + + const DexFile::StringId* string_id = dex_file->FindStringId(kNameCacheNames[index]); + if (string_id == nullptr) { + *name_index = kIndexNotFound; + return *name_index; + } + *name_index = dex_file->GetIndexForStringId(*string_id); + return *name_index; +} + +uint32_t DexFileMethodInliner::FindProtoIndex(const DexFile* dex_file, IndexCache* cache, + ProtoCacheIndex index) { + uint32_t* proto_index = &cache->proto_indexes[index]; + if (*proto_index != kIndexUnresolved) { + return *proto_index; + } + + const ProtoDef& proto_def = kProtoCacheDefs[index]; + uint32_t return_index = FindClassIndex(dex_file, cache, proto_def.return_type); + if (return_index == kIndexNotFound) { + *proto_index = kIndexNotFound; + return *proto_index; + } + uint16_t return_type = static_cast(return_index); + DCHECK_EQ(static_cast(return_type), return_index); + + uint32_t signature_length = proto_def.param_count; + uint16_t signature_type_idxs[kProtoMaxParams]; + for (uint32_t i = 0; i != signature_length; ++i) { + uint32_t param_index = FindClassIndex(dex_file, cache, proto_def.params[i]); + if (param_index == kIndexNotFound) { + *proto_index = kIndexNotFound; + return *proto_index; + } + signature_type_idxs[i] = static_cast(param_index); + DCHECK_EQ(static_cast(signature_type_idxs[i]), param_index); + } + + const DexFile::ProtoId* proto_id = dex_file->FindProtoId(return_type, signature_type_idxs, + signature_length); + if (proto_id == nullptr) { + *proto_index = kIndexNotFound; + return *proto_index; + } + *proto_index = dex_file->GetIndexForProtoId(*proto_id); + return *proto_index; +} + +uint32_t DexFileMethodInliner::FindMethodIndex(const DexFile* dex_file, IndexCache* cache, + const MethodDef& method_def) { + uint32_t declaring_class_index = FindClassIndex(dex_file, cache, method_def.declaring_class); + if (declaring_class_index == kIndexNotFound) { + return kIndexNotFound; + } + uint32_t name_index = FindNameIndex(dex_file, cache, method_def.name); + if (name_index == kIndexNotFound) { + return kIndexNotFound; + } + uint32_t proto_index = FindProtoIndex(dex_file, cache, method_def.proto); + if (proto_index == kIndexNotFound) { + return kIndexNotFound; + } + const DexFile::MethodId* method_id = + dex_file->FindMethodId(dex_file->GetTypeId(declaring_class_index), + dex_file->GetStringId(name_index), + dex_file->GetProtoId(proto_index)); + if (method_id == nullptr) { + return kIndexNotFound; + } + return dex_file->GetIndexForMethodId(*method_id); +} + +DexFileMethodInliner::IndexCache::IndexCache() { + std::fill_n(class_indexes, arraysize(class_indexes), kIndexUnresolved); + std::fill_n(name_indexes, arraysize(name_indexes), kIndexUnresolved); + std::fill_n(proto_indexes, arraysize(proto_indexes), kIndexUnresolved); +} + +void DexFileMethodInliner::FindIntrinsics(const DexFile* dex_file) { + DCHECK(dex_file != nullptr); + DCHECK(dex_file_ == nullptr); + IndexCache cache; + for (const IntrinsicDef& def : kIntrinsicMethods) { + uint32_t method_idx = FindMethodIndex(dex_file, &cache, def.method_def); + if (method_idx != kIndexNotFound) { + DCHECK(inline_methods_.find(method_idx) == inline_methods_.end()); + inline_methods_.Put(method_idx, def.intrinsic); + } + } + dex_file_ = dex_file; +} + +bool DexFileMethodInliner::AddInlineMethod(int32_t method_idx, const InlineMethod& method) { + WriterMutexLock mu(Thread::Current(), lock_); + if (LIKELY(inline_methods_.find(method_idx) == inline_methods_.end())) { + inline_methods_.Put(method_idx, method); + return true; + } else { + if (PrettyMethod(method_idx, *dex_file_) == "int java.lang.String.length()") { + // TODO: String.length is both kIntrinsicIsEmptyOrLength and kInlineOpIGet. + } else { + LOG(WARNING) << "Inliner: " << PrettyMethod(method_idx, *dex_file_) << " already inline"; + } + return false; + } +} + +uint32_t DexFileMethodInliner::GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) { + ReaderMutexLock mu(Thread::Current(), lock_); + auto it = inline_methods_.find(method_index); + if (it != inline_methods_.end() && (it->second.opcode == kInlineStringInit)) { + uint32_t string_init_base_offset = Thread::QuickEntryPointOffsetWithSize( + OFFSETOF_MEMBER(QuickEntryPoints, pNewEmptyString), pointer_size); + return string_init_base_offset + it->second.d.data * pointer_size; + } + return 0; +} + +bool DexFileMethodInliner::IsStringInitMethodIndex(uint32_t method_index) { + ReaderMutexLock mu(Thread::Current(), lock_); + auto it = inline_methods_.find(method_index); + return (it != inline_methods_.end()) && (it->second.opcode == kInlineStringInit); +} + +} // namespace art diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h new file mode 100644 index 000000000..fbe403f59 --- /dev/null +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -0,0 +1,398 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_QUICK_DEX_FILE_METHOD_INLINER_H_ +#define ART_COMPILER_DEX_QUICK_DEX_FILE_METHOD_INLINER_H_ + +#include +#include "base/mutex.h" +#include "base/macros.h" +#include "safe_map.h" +#include "dex/compiler_enums.h" +#include "dex_file.h" +#include "quick/inline_method_analyser.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +/** + * Handles inlining of methods from a particular DexFile. + * + * Intrinsics are a special case of inline methods. The DexFile indices for + * all the supported intrinsic methods are looked up once by the FindIntrinsics + * function and cached by this class for quick lookup by the method index. + * + * TODO: Detect short methods (at least getters, setters and empty functions) + * from the verifier and mark them for inlining. Inline these methods early + * during compilation to allow further optimizations. Similarly, provide + * additional information about intrinsics to the early phases of compilation. + */ +class DexFileMethodInliner { + public: + DexFileMethodInliner(); + ~DexFileMethodInliner(); + + /** + * Analyse method code to determine if the method is a candidate for inlining. + * If it is, record its data for later. + * + * @param verifier the method verifier holding data about the method to analyse. + * @return true if the method is a candidate for inlining, false otherwise. + */ + bool AnalyseMethodCode(verifier::MethodVerifier* verifier) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); + + /** + * Check whether a particular method index corresponds to an intrinsic or special function. + */ + InlineMethodFlags IsIntrinsicOrSpecial(uint32_t method_index) REQUIRES(!lock_); + + /** + * Check whether a particular method index corresponds to an intrinsic function. + */ + bool IsIntrinsic(uint32_t method_index, InlineMethod* intrinsic) REQUIRES(!lock_); + + /** + * Check whether a particular method index corresponds to a special function. + */ + bool IsSpecial(uint32_t method_index) REQUIRES(!lock_); + + /** + * Gets the thread pointer entrypoint offset for a string init method index and pointer size. + */ + uint32_t GetOffsetForStringInit(uint32_t method_index, size_t pointer_size) + REQUIRES(!lock_); + + /** + * Check whether a particular method index is a string init. + */ + bool IsStringInitMethodIndex(uint32_t method_index) REQUIRES(!lock_); + + /** + * To avoid multiple lookups of a class by its descriptor, we cache its + * type index in the IndexCache. These are the indexes into the IndexCache + * class_indexes array. + */ + enum ClassCacheIndex : uint8_t { // unit8_t to save space, make larger if needed + kClassCacheFirst = 0, + kClassCacheBoolean = kClassCacheFirst, + kClassCacheByte, + kClassCacheChar, + kClassCacheShort, + kClassCacheInt, + kClassCacheLong, + kClassCacheFloat, + kClassCacheDouble, + kClassCacheVoid, + kClassCacheJavaLangByteArray, + kClassCacheJavaLangCharArray, + kClassCacheJavaLangIntArray, + kClassCacheJavaLangObject, + kClassCacheJavaLangRefReference, + kClassCacheJavaLangString, + kClassCacheJavaLangStringBuffer, + kClassCacheJavaLangStringBuilder, + kClassCacheJavaLangStringFactory, + kClassCacheJavaLangDouble, + kClassCacheJavaLangFloat, + kClassCacheJavaLangInteger, + kClassCacheJavaLangLong, + kClassCacheJavaLangShort, + kClassCacheJavaLangMath, + kClassCacheJavaLangStrictMath, + kClassCacheJavaLangThread, + kClassCacheJavaNioCharsetCharset, + kClassCacheLibcoreIoMemory, + kClassCacheSunMiscUnsafe, + kClassCacheJavaLangSystem, + kClassCacheLast + }; + + /** + * To avoid multiple lookups of a method name string, we cache its string + * index in the IndexCache. These are the indexes into the IndexCache + * name_indexes array. + */ + enum NameCacheIndex : uint8_t { // unit8_t to save space, make larger if needed + kNameCacheFirst = 0, + kNameCacheReverse = kNameCacheFirst, + kNameCacheReverseBytes, + kNameCacheDoubleToRawLongBits, + kNameCacheLongBitsToDouble, + kNameCacheFloatToRawIntBits, + kNameCacheIntBitsToFloat, + kNameCacheAbs, + kNameCacheMax, + kNameCacheMin, + kNameCacheCos, + kNameCacheSin, + kNameCacheAcos, + kNameCacheAsin, + kNameCacheAtan, + kNameCacheAtan2, + kNameCacheCbrt, + kNameCacheCosh, + kNameCacheExp, + kNameCacheExpm1, + kNameCacheHypot, + kNameCacheLog, + kNameCacheLog10, + kNameCacheNextAfter, + kNameCacheSinh, + kNameCacheTan, + kNameCacheTanh, + kNameCacheSqrt, + kNameCacheCeil, + kNameCacheFloor, + kNameCacheRint, + kNameCacheRound, + kNameCacheReferenceGetReferent, + kNameCacheCharAt, + kNameCacheCompareTo, + kNameCacheEquals, + kNameCacheGetCharsNoCheck, + kNameCacheIsEmpty, + kNameCacheFloatToIntBits, + kNameCacheDoubleToLongBits, + kNameCacheIsInfinite, + kNameCacheIsNaN, + kNameCacheIndexOf, + kNameCacheLength, + kNameCacheInit, + kNameCacheNewStringFromBytes, + kNameCacheNewStringFromChars, + kNameCacheNewStringFromString, + kNameCacheCurrentThread, + kNameCachePeekByte, + kNameCachePeekIntNative, + kNameCachePeekLongNative, + kNameCachePeekShortNative, + kNameCachePokeByte, + kNameCachePokeIntNative, + kNameCachePokeLongNative, + kNameCachePokeShortNative, + kNameCacheCompareAndSwapInt, + kNameCacheCompareAndSwapLong, + kNameCacheCompareAndSwapObject, + kNameCacheGetInt, + kNameCacheGetIntVolatile, + kNameCachePutInt, + kNameCachePutIntVolatile, + kNameCachePutOrderedInt, + kNameCacheGetLong, + kNameCacheGetLongVolatile, + kNameCachePutLong, + kNameCachePutLongVolatile, + kNameCachePutOrderedLong, + kNameCacheGetObject, + kNameCacheGetObjectVolatile, + kNameCachePutObject, + kNameCachePutObjectVolatile, + kNameCachePutOrderedObject, + kNameCacheGetAndAddInt, + kNameCacheGetAndAddLong, + kNameCacheGetAndSetInt, + kNameCacheGetAndSetLong, + kNameCacheGetAndSetObject, + kNameCacheLoadFence, + kNameCacheStoreFence, + kNameCacheFullFence, + kNameCacheArrayCopy, + kNameCacheBitCount, + kNameCacheCompare, + kNameCacheHighestOneBit, + kNameCacheLowestOneBit, + kNameCacheNumberOfLeadingZeros, + kNameCacheNumberOfTrailingZeros, + kNameCacheRotateRight, + kNameCacheRotateLeft, + kNameCacheSignum, + kNameCacheLast + }; + + /** + * To avoid multiple lookups of a method signature, we cache its proto + * index in the IndexCache. These are the indexes into the IndexCache + * proto_indexes array. + */ + enum ProtoCacheIndex : uint8_t { // unit8_t to save space, make larger if needed + kProtoCacheFirst = 0, + kProtoCacheI_I = kProtoCacheFirst, + kProtoCacheJ_J, + kProtoCacheS_S, + kProtoCacheD_D, + kProtoCacheDD_D, + kProtoCacheF_F, + kProtoCacheFF_F, + kProtoCacheD_J, + kProtoCacheD_Z, + kProtoCacheJ_D, + kProtoCacheF_I, + kProtoCacheF_Z, + kProtoCacheI_F, + kProtoCacheII_I, + kProtoCacheI_C, + kProtoCacheString_I, + kProtoCache_Z, + kProtoCache_I, + kProtoCache_Object, + kProtoCache_Thread, + kProtoCacheJ_B, + kProtoCacheJ_I, + kProtoCacheJ_S, + kProtoCacheJB_V, + kProtoCacheJI_V, + kProtoCacheJJ_J, + kProtoCacheJJ_I, + kProtoCacheJJ_V, + kProtoCacheJS_V, + kProtoCacheObject_Z, + kProtoCacheJI_J, + kProtoCacheObjectJII_Z, + kProtoCacheObjectJJJ_Z, + kProtoCacheObjectJObjectObject_Z, + kProtoCacheObjectJ_I, + kProtoCacheObjectJI_I, + kProtoCacheObjectJI_V, + kProtoCacheObjectJ_J, + kProtoCacheObjectJJ_J, + kProtoCacheObjectJJ_V, + kProtoCacheObjectJ_Object, + kProtoCacheObjectJObject_V, + kProtoCacheObjectJObject_Object, + kProtoCacheCharArrayICharArrayII_V, + kProtoCacheObjectIObjectII_V, + kProtoCacheIICharArrayI_V, + kProtoCacheByteArrayIII_String, + kProtoCacheIICharArray_String, + kProtoCacheString_String, + kProtoCache_V, + kProtoCacheByteArray_V, + kProtoCacheByteArrayI_V, + kProtoCacheByteArrayII_V, + kProtoCacheByteArrayIII_V, + kProtoCacheByteArrayIIString_V, + kProtoCacheByteArrayString_V, + kProtoCacheByteArrayIICharset_V, + kProtoCacheByteArrayCharset_V, + kProtoCacheCharArray_V, + kProtoCacheCharArrayII_V, + kProtoCacheIICharArray_V, + kProtoCacheIntArrayII_V, + kProtoCacheString_V, + kProtoCacheStringBuffer_V, + kProtoCacheStringBuilder_V, + kProtoCacheLast + }; + + private: + /** + * The maximum number of method parameters we support in the ProtoDef. + */ + static constexpr uint32_t kProtoMaxParams = 6; + + /** + * The method signature (proto) definition using cached class indexes. + * The return_type and params are used with the IndexCache to look up + * appropriate class indexes to be passed to DexFile::FindProtoId(). + */ + struct ProtoDef { + ClassCacheIndex return_type; + uint8_t param_count; + ClassCacheIndex params[kProtoMaxParams]; + }; + + /** + * The method definition using cached class, name and proto indexes. + * The class index, method name index and proto index are used with + * IndexCache to look up appropriate parameters for DexFile::FindMethodId(). + */ + struct MethodDef { + ClassCacheIndex declaring_class; + NameCacheIndex name; + ProtoCacheIndex proto; + }; + + /** + * The definition of an intrinsic function binds the method definition + * to an Intrinsic. + */ + struct IntrinsicDef { + MethodDef method_def; + InlineMethod intrinsic; + }; + + /** + * Cache for class, method name and method signature indexes used during + * intrinsic function lookup to avoid multiple lookups of the same items. + * + * Many classes have multiple intrinsics and/or they are used in multiple + * method signatures and we want to avoid repeated lookups since they are + * not exactly cheap. The method names and method signatures are sometimes + * reused and therefore cached as well. + */ + struct IndexCache { + IndexCache(); + + uint32_t class_indexes[kClassCacheLast - kClassCacheFirst]; + uint32_t name_indexes[kNameCacheLast - kNameCacheFirst]; + uint32_t proto_indexes[kProtoCacheLast - kProtoCacheFirst]; + }; + + static const char* const kClassCacheNames[]; + static const char* const kNameCacheNames[]; + static const ProtoDef kProtoCacheDefs[]; + static const IntrinsicDef kIntrinsicMethods[]; + + static const uint32_t kIndexNotFound = static_cast(-1); + static const uint32_t kIndexUnresolved = static_cast(-2); + + static uint32_t FindClassIndex(const DexFile* dex_file, IndexCache* cache, + ClassCacheIndex index); + static uint32_t FindNameIndex(const DexFile* dex_file, IndexCache* cache, + NameCacheIndex index); + static uint32_t FindProtoIndex(const DexFile* dex_file, IndexCache* cache, + ProtoCacheIndex index); + static uint32_t FindMethodIndex(const DexFile* dex_file, IndexCache* cache, + const MethodDef& method_def); + + /** + * Find all known intrinsic methods in the dex_file and cache their indices. + * + * Only DexFileToMethodInlinerMap may call this function to initialize the inliner. + */ + void FindIntrinsics(const DexFile* dex_file) REQUIRES(lock_); + + friend class DexFileToMethodInlinerMap; + + bool AddInlineMethod(int32_t method_idx, const InlineMethod& method) REQUIRES(!lock_); + + ReaderWriterMutex lock_; + /* + * Maps method indexes (for the particular DexFile) to Intrinsic defintions. + */ + SafeMap inline_methods_ GUARDED_BY(lock_); + const DexFile* dex_file_; + + DISALLOW_COPY_AND_ASSIGN(DexFileMethodInliner); +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_QUICK_DEX_FILE_METHOD_INLINER_H_ diff --git a/compiler/dex/quick/dex_file_to_method_inliner_map.cc b/compiler/dex/quick/dex_file_to_method_inliner_map.cc new file mode 100644 index 000000000..2fec18328 --- /dev/null +++ b/compiler/dex/quick/dex_file_to_method_inliner_map.cc @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "thread.h" +#include "thread-inl.h" +#include "base/mutex.h" +#include "base/mutex-inl.h" +#include "base/logging.h" +#include "driver/compiler_driver.h" + +#include "dex_file_to_method_inliner_map.h" + +namespace art { + +DexFileToMethodInlinerMap::DexFileToMethodInlinerMap() + : lock_("DexFileToMethodInlinerMap lock", kDexFileToMethodInlinerMapLock) { +} + +DexFileToMethodInlinerMap::~DexFileToMethodInlinerMap() { + for (auto& entry : inliners_) { + delete entry.second; + } +} + +DexFileMethodInliner* DexFileToMethodInlinerMap::GetMethodInliner(const DexFile* dex_file) { + Thread* self = Thread::Current(); + { + ReaderMutexLock mu(self, lock_); + auto it = inliners_.find(dex_file); + if (it != inliners_.end()) { + return it->second; + } + } + + // We need to acquire our lock_ to modify inliners_ but we want to release it + // before we initialize the new inliner. However, we need to acquire the + // new inliner's lock_ before we release our lock_ to prevent another thread + // from using the uninitialized inliner. This requires explicit calls to + // ExclusiveLock()/ExclusiveUnlock() on one of the locks, the other one + // can use WriterMutexLock. + DexFileMethodInliner* locked_inliner; + { + WriterMutexLock mu(self, lock_); + DexFileMethodInliner** inliner = &inliners_[dex_file]; // inserts new entry if not found + if (*inliner) { + return *inliner; + } + *inliner = new DexFileMethodInliner; + DCHECK(*inliner != nullptr); + locked_inliner = *inliner; + locked_inliner->lock_.ExclusiveLock(self); // Acquire inliner's lock_ before releasing lock_. + } + locked_inliner->FindIntrinsics(dex_file); + locked_inliner->lock_.ExclusiveUnlock(self); + return locked_inliner; +} + +} // namespace art diff --git a/compiler/dex/quick/dex_file_to_method_inliner_map.h b/compiler/dex/quick/dex_file_to_method_inliner_map.h new file mode 100644 index 000000000..215dc12b2 --- /dev/null +++ b/compiler/dex/quick/dex_file_to_method_inliner_map.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_QUICK_DEX_FILE_TO_METHOD_INLINER_MAP_H_ +#define ART_COMPILER_DEX_QUICK_DEX_FILE_TO_METHOD_INLINER_MAP_H_ + +#include +#include +#include "base/macros.h" +#include "base/mutex.h" + +#include "dex/quick/dex_file_method_inliner.h" + +namespace art { + +class CompilerDriver; +class DexFile; + +/** + * Map each DexFile to its DexFileMethodInliner. + * + * The method inliner is created and initialized the first time it's requested + * for a particular DexFile. + */ +class DexFileToMethodInlinerMap { + public: + DexFileToMethodInlinerMap(); + ~DexFileToMethodInlinerMap(); + + DexFileMethodInliner* GetMethodInliner(const DexFile* dex_file) NO_THREAD_SAFETY_ANALYSIS; + // TODO: There is an irregular non-scoped use of locks that defeats annotalysis with -O0. + // Fix the NO_THREAD_SAFETY_ANALYSIS when this works and add the appropriate LOCKS_EXCLUDED. + + private: + ReaderWriterMutex lock_; + std::map inliners_ GUARDED_BY(lock_); + + DISALLOW_COPY_AND_ASSIGN(DexFileToMethodInlinerMap); +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_QUICK_DEX_FILE_TO_METHOD_INLINER_MAP_H_ diff --git a/compiler/dex/quick_compiler_callbacks.cc b/compiler/dex/quick_compiler_callbacks.cc new file mode 100644 index 000000000..2532bda63 --- /dev/null +++ b/compiler/dex/quick_compiler_callbacks.cc @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "quick_compiler_callbacks.h" + +#include "quick/dex_file_to_method_inliner_map.h" +#include "verifier/method_verifier-inl.h" +#include "verification_results.h" + +namespace art { + +void QuickCompilerCallbacks::MethodVerified(verifier::MethodVerifier* verifier) { + verification_results_->ProcessVerifiedMethod(verifier); + MethodReference ref = verifier->GetMethodReference(); + method_inliner_map_->GetMethodInliner(ref.dex_file)->AnalyseMethodCode(verifier); +} + +void QuickCompilerCallbacks::ClassRejected(ClassReference ref) { + verification_results_->AddRejectedClass(ref); +} + +} // namespace art diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h new file mode 100644 index 000000000..4f5ea766d --- /dev/null +++ b/compiler/dex/quick_compiler_callbacks.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_ +#define ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_ + +#include "compiler_callbacks.h" + +namespace art { + +class VerificationResults; +class DexFileToMethodInlinerMap; + +class QuickCompilerCallbacks FINAL : public CompilerCallbacks { + public: + QuickCompilerCallbacks(VerificationResults* verification_results, + DexFileToMethodInlinerMap* method_inliner_map, + CompilerCallbacks::CallbackMode mode) + : CompilerCallbacks(mode), verification_results_(verification_results), + method_inliner_map_(method_inliner_map) { + CHECK(verification_results != nullptr); + CHECK(method_inliner_map != nullptr); + } + + ~QuickCompilerCallbacks() { } + + void MethodVerified(verifier::MethodVerifier* verifier) + SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE; + + void ClassRejected(ClassReference ref) OVERRIDE; + + // We are running in an environment where we can call patchoat safely so we should. + bool IsRelocationPossible() OVERRIDE { + return true; + } + + private: + VerificationResults* const verification_results_; + DexFileToMethodInlinerMap* const method_inliner_map_; +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_ diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc new file mode 100644 index 000000000..d87762d41 --- /dev/null +++ b/compiler/dex/verification_results.cc @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verification_results.h" + +#include "base/logging.h" +#include "base/stl_util.h" +#include "base/mutex-inl.h" +#include "driver/compiler_driver.h" +#include "driver/compiler_options.h" +#include "thread.h" +#include "thread-inl.h" +#include "verified_method.h" +#include "verifier/method_verifier-inl.h" + +namespace art { + +VerificationResults::VerificationResults(const CompilerOptions* compiler_options) + : compiler_options_(compiler_options), + verified_methods_lock_("compiler verified methods lock"), + verified_methods_(), + rejected_classes_lock_("compiler rejected classes lock"), + rejected_classes_() { +} + +VerificationResults::~VerificationResults() { + Thread* self = Thread::Current(); + { + WriterMutexLock mu(self, verified_methods_lock_); + STLDeleteValues(&verified_methods_); + } +} + +void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) { + DCHECK(method_verifier != nullptr); + MethodReference ref = method_verifier->GetMethodReference(); + bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags()); + const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile); + if (verified_method == nullptr) { + // We'll punt this later. + return; + } + + WriterMutexLock mu(Thread::Current(), verified_methods_lock_); + auto it = verified_methods_.find(ref); + if (it != verified_methods_.end()) { + // TODO: Investigate why are we doing the work again for this method and try to avoid it. + LOG(WARNING) << "Method processed more than once: " + << PrettyMethod(ref.dex_method_index, *ref.dex_file); + if (!Runtime::Current()->UseJitCompilation()) { + DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); + DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); + } + // Delete the new verified method since there was already an existing one registered. It + // is unsafe to replace the existing one since the JIT may be using it to generate a + // native GC map. + delete verified_method; + return; + } + verified_methods_.Put(ref, verified_method); + DCHECK(verified_methods_.find(ref) != verified_methods_.end()); +} + +const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) { + ReaderMutexLock mu(Thread::Current(), verified_methods_lock_); + auto it = verified_methods_.find(ref); + return (it != verified_methods_.end()) ? it->second : nullptr; +} + +void VerificationResults::AddRejectedClass(ClassReference ref) { + { + WriterMutexLock mu(Thread::Current(), rejected_classes_lock_); + rejected_classes_.insert(ref); + } + DCHECK(IsClassRejected(ref)); +} + +bool VerificationResults::IsClassRejected(ClassReference ref) { + ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_); + return (rejected_classes_.find(ref) != rejected_classes_.end()); +} + +bool VerificationResults::IsCandidateForCompilation(MethodReference&, + const uint32_t access_flags) { + if (!compiler_options_->IsBytecodeCompilationEnabled()) { + return false; + } + // Don't compile class initializers unless kEverything. + if ((compiler_options_->GetCompilerFilter() != CompilerFilter::kEverything) && + ((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { + return false; + } + return true; +} + +} // namespace art diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h new file mode 100644 index 000000000..1af11a811 --- /dev/null +++ b/compiler/dex/verification_results.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ +#define ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ + +#include +#include +#include + +#include "base/macros.h" +#include "base/mutex.h" +#include "class_reference.h" +#include "method_reference.h" +#include "safe_map.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +class CompilerOptions; +class VerifiedMethod; + +// Used by CompilerCallbacks to track verification information from the Runtime. +class VerificationResults { + public: + explicit VerificationResults(const CompilerOptions* compiler_options); + ~VerificationResults(); + + void ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!verified_methods_lock_); + + const VerifiedMethod* GetVerifiedMethod(MethodReference ref) + REQUIRES(!verified_methods_lock_); + + void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_); + bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_); + + bool IsCandidateForCompilation(MethodReference& method_ref, + const uint32_t access_flags); + + private: + const CompilerOptions* const compiler_options_; + + // Verified methods. + typedef SafeMap VerifiedMethodMap; + ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + VerifiedMethodMap verified_methods_ GUARDED_BY(verified_methods_lock_); + + // Rejected classes. + ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + std::set rejected_classes_ GUARDED_BY(rejected_classes_lock_); +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc new file mode 100644 index 000000000..bace01471 --- /dev/null +++ b/compiler/dex/verified_method.cc @@ -0,0 +1,257 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verified_method.h" + +#include +#include +#include + +#include "art_method-inl.h" +#include "base/logging.h" +#include "base/stl_util.h" +#include "dex_file.h" +#include "dex_instruction-inl.h" +#include "dex_instruction_utils.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/object-inl.h" +#include "utils.h" +#include "verifier/method_verifier-inl.h" +#include "verifier/reg_type-inl.h" +#include "verifier/register_line-inl.h" + +namespace art { + +VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw) + : encountered_error_types_(encountered_error_types), + has_runtime_throw_(has_runtime_throw) { +} + +const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier, + bool compile) { + std::unique_ptr verified_method( + new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(), + method_verifier->HasInstructionThatWillThrow())); + + if (compile) { + // TODO: move this out when DEX-to-DEX supports devirtualization. + if (method_verifier->HasVirtualOrInterfaceInvokes()) { + verified_method->GenerateDevirtMap(method_verifier); + } + + // Only need dequicken info for JIT so far. + if (Runtime::Current()->UseJitCompilation() && + !verified_method->GenerateDequickenMap(method_verifier)) { + return nullptr; + } + } + + if (method_verifier->HasCheckCasts()) { + verified_method->GenerateSafeCastSet(method_verifier); + } + + return verified_method.release(); +} + +const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const { + auto it = devirt_map_.find(dex_pc); + return (it != devirt_map_.end()) ? &it->second : nullptr; +} + +const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const { + DCHECK(Runtime::Current()->UseJitCompilation()); + auto it = dequicken_map_.find(dex_pc); + return (it != dequicken_map_.end()) ? &it->second : nullptr; +} + +bool VerifiedMethod::IsSafeCast(uint32_t pc) const { + return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc); +} + +bool VerifiedMethod::GenerateDequickenMap(verifier::MethodVerifier* method_verifier) { + if (method_verifier->HasFailures()) { + return false; + } + const DexFile::CodeItem* code_item = method_verifier->CodeItem(); + const uint16_t* insns = code_item->insns_; + const Instruction* inst = Instruction::At(insns); + const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_); + for (; inst < end; inst = inst->Next()) { + const bool is_virtual_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK; + const bool is_range_quick = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK; + if (is_virtual_quick || is_range_quick) { + uint32_t dex_pc = inst->GetDexPc(insns); + verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + ArtMethod* method = + method_verifier->GetQuickInvokedMethod(inst, line, is_range_quick, true); + if (method == nullptr) { + // It can be null if the line wasn't verified since it was unreachable. + return false; + } + // The verifier must know what the type of the object was or else we would have gotten a + // failure. Put the dex method index in the dequicken map since we need this to get number of + // arguments in the compiler. + dequicken_map_.Put(dex_pc, DexFileReference(method->GetDexFile(), + method->GetDexMethodIndex())); + } else if (IsInstructionIGetQuickOrIPutQuick(inst->Opcode())) { + uint32_t dex_pc = inst->GetDexPc(insns); + verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + ArtField* field = method_verifier->GetQuickFieldAccess(inst, line); + if (field == nullptr) { + // It can be null if the line wasn't verified since it was unreachable. + return false; + } + // The verifier must know what the type of the field was or else we would have gotten a + // failure. Put the dex field index in the dequicken map since we need this for lowering + // in the compiler. + // TODO: Putting a field index in a method reference is gross. + dequicken_map_.Put(dex_pc, DexFileReference(field->GetDexFile(), field->GetDexFieldIndex())); + } + } + return true; +} + +void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier) { + // It is risky to rely on reg_types for sharpening in cases of soft + // verification, we might end up sharpening to a wrong implementation. Just abort. + if (method_verifier->HasFailures()) { + return; + } + + const DexFile::CodeItem* code_item = method_verifier->CodeItem(); + const uint16_t* insns = code_item->insns_; + const Instruction* inst = Instruction::At(insns); + const Instruction* end = Instruction::At(insns + code_item->insns_size_in_code_units_); + + for (; inst < end; inst = inst->Next()) { + const bool is_virtual = inst->Opcode() == Instruction::INVOKE_VIRTUAL || + inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE; + const bool is_interface = inst->Opcode() == Instruction::INVOKE_INTERFACE || + inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE; + + if (!is_interface && !is_virtual) { + continue; + } + // Get reg type for register holding the reference to the object that will be dispatched upon. + uint32_t dex_pc = inst->GetDexPc(insns); + verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + const bool is_range = inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE || + inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE; + const verifier::RegType& + reg_type(line->GetRegisterType(method_verifier, + is_range ? inst->VRegC_3rc() : inst->VRegC_35c())); + + if (!reg_type.HasClass()) { + // We will compute devirtualization information only when we know the Class of the reg type. + continue; + } + mirror::Class* reg_class = reg_type.GetClass(); + if (reg_class->IsInterface()) { + // We can't devirtualize when the known type of the register is an interface. + continue; + } + if (reg_class->IsAbstract() && !reg_class->IsArrayClass()) { + // We can't devirtualize abstract classes except on arrays of abstract classes. + continue; + } + auto* cl = Runtime::Current()->GetClassLinker(); + size_t pointer_size = cl->GetImagePointerSize(); + ArtMethod* abstract_method = method_verifier->GetDexCache()->GetResolvedMethod( + is_range ? inst->VRegB_3rc() : inst->VRegB_35c(), pointer_size); + if (abstract_method == nullptr) { + // If the method is not found in the cache this means that it was never found + // by ResolveMethodAndCheckAccess() called when verifying invoke_*. + continue; + } + // Find the concrete method. + ArtMethod* concrete_method = nullptr; + if (is_interface) { + concrete_method = reg_type.GetClass()->FindVirtualMethodForInterface( + abstract_method, pointer_size); + } + if (is_virtual) { + concrete_method = reg_type.GetClass()->FindVirtualMethodForVirtual( + abstract_method, pointer_size); + } + if (concrete_method == nullptr || !concrete_method->IsInvokable()) { + // In cases where concrete_method is not found, or is not invokable, continue to the next + // invoke. + continue; + } + if (reg_type.IsPreciseReference() || concrete_method->IsFinal() || + concrete_method->GetDeclaringClass()->IsFinal()) { + // If we knew exactly the class being dispatched upon, or if the target method cannot be + // overridden record the target to be used in the compiler driver. + devirt_map_.Put(dex_pc, concrete_method->ToMethodReference()); + } + } +} + +void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) { + /* + * Walks over the method code and adds any cast instructions in which + * the type cast is implicit to a set, which is used in the code generation + * to elide these casts. + */ + if (method_verifier->HasFailures()) { + return; + } + const DexFile::CodeItem* code_item = method_verifier->CodeItem(); + const Instruction* inst = Instruction::At(code_item->insns_); + const Instruction* end = Instruction::At(code_item->insns_ + + code_item->insns_size_in_code_units_); + + for (; inst < end; inst = inst->Next()) { + Instruction::Code code = inst->Opcode(); + if ((code == Instruction::CHECK_CAST) || (code == Instruction::APUT_OBJECT)) { + uint32_t dex_pc = inst->GetDexPc(code_item->insns_); + if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) { + // Do not attempt to quicken this instruction, it's unreachable anyway. + continue; + } + const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + bool is_safe_cast = false; + if (code == Instruction::CHECK_CAST) { + const verifier::RegType& reg_type(line->GetRegisterType(method_verifier, + inst->VRegA_21c())); + const verifier::RegType& cast_type = + method_verifier->ResolveCheckedClass(inst->VRegB_21c()); + is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type); + } else { + const verifier::RegType& array_type(line->GetRegisterType(method_verifier, + inst->VRegB_23x())); + // We only know its safe to assign to an array if the array type is precise. For example, + // an Object[] can have any type of object stored in it, but it may also be assigned a + // String[] in which case the stores need to be of Strings. + if (array_type.IsPreciseReference()) { + const verifier::RegType& value_type(line->GetRegisterType(method_verifier, + inst->VRegA_23x())); + const verifier::RegType& component_type = method_verifier->GetRegTypeCache() + ->GetComponentType(array_type, method_verifier->GetClassLoader()); + is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type); + } + } + if (is_safe_cast) { + // Verify ordering for push_back() to the sorted vector. + DCHECK(safe_cast_set_.empty() || safe_cast_set_.back() < dex_pc); + safe_cast_set_.push_back(dex_pc); + } + } + } +} + +} // namespace art diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h new file mode 100644 index 000000000..495acf07b --- /dev/null +++ b/compiler/dex/verified_method.h @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_VERIFIED_METHOD_H_ +#define ART_COMPILER_DEX_VERIFIED_METHOD_H_ + +#include + +#include "base/mutex.h" +#include "dex_file.h" +#include "method_reference.h" +#include "safe_map.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +class VerifiedMethod { + public: + // Cast elision set type. + // Since we're adding the dex PCs to the set in increasing order, a sorted vector + // is better for performance (not just memory usage), especially for large sets. + typedef std::vector SafeCastSet; + + // Devirtualization map type maps dex offset to concrete method reference. + typedef SafeMap DevirtualizationMap; + + // Devirtualization map type maps dex offset to field / method idx. + typedef SafeMap DequickenMap; + + static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile) + SHARED_REQUIRES(Locks::mutator_lock_); + ~VerifiedMethod() = default; + + const DevirtualizationMap& GetDevirtMap() const { + return devirt_map_; + } + + const SafeCastSet& GetSafeCastSet() const { + return safe_cast_set_; + } + + // Returns the devirtualization target method, or null if none. + const MethodReference* GetDevirtTarget(uint32_t dex_pc) const; + + // Returns the dequicken field / method for a quick invoke / field get. Returns null if there is + // no entry for that dex pc. + const DexFileReference* GetDequickenIndex(uint32_t dex_pc) const; + + // Returns true if the cast can statically be verified to be redundant + // by using the check-cast elision peephole optimization in the verifier. + bool IsSafeCast(uint32_t pc) const; + + // Returns true if there were any errors during verification. + bool HasVerificationFailures() const { + return encountered_error_types_ != 0; + } + + uint32_t GetEncounteredVerificationFailures() const { + return encountered_error_types_; + } + + bool HasRuntimeThrow() const { + return has_runtime_throw_; + } + + private: + VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw); + + /* + * Generate the GC map for a method that has just been verified (i.e. we're doing this as part of + * verification). For type-precise determination we have all the data we need, so we just need to + * encode it in some clever fashion. + * Stores the data in dex_gc_map_, returns true on success and false on failure. + */ + bool GenerateGcMap(verifier::MethodVerifier* method_verifier); + + // Verify that the GC map associated with method_ is well formed. + static void VerifyGcMap(verifier::MethodVerifier* method_verifier, + const std::vector& data); + + // Compute sizes for GC map data. + static void ComputeGcMapSizes(verifier::MethodVerifier* method_verifier, + size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc); + + // Generate devirtualizaion map into devirt_map_. + void GenerateDevirtMap(verifier::MethodVerifier* method_verifier) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Generate dequickening map into dequicken_map_. Returns false if there is an error. + bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Generate safe case set into safe_cast_set_. + void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) + SHARED_REQUIRES(Locks::mutator_lock_); + + DevirtualizationMap devirt_map_; + // Dequicken map is required for compiling quickened byte codes. The quicken maps from + // dex PC to dex method index or dex field index based on the instruction. + DequickenMap dequicken_map_; + SafeCastSet safe_cast_set_; + + const uint32_t encountered_error_types_; + const bool has_runtime_throw_; +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_VERIFIED_METHOD_H_ diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc new file mode 100644 index 000000000..a0a8f81c1 --- /dev/null +++ b/compiler/driver/compiled_method_storage.cc @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "compiled_method_storage.h" + +#include "base/logging.h" +#include "compiled_method.h" +#include "thread-inl.h" +#include "utils.h" +#include "utils/dedupe_set-inl.h" +#include "utils/swap_space.h" + +namespace art { + +namespace { // anonymous namespace + +template +const LengthPrefixedArray* CopyArray(SwapSpace* swap_space, const ArrayRef& array) { + DCHECK(!array.empty()); + SwapAllocator allocator(swap_space); + void* storage = allocator.allocate(LengthPrefixedArray::ComputeSize(array.size())); + LengthPrefixedArray* array_copy = new(storage) LengthPrefixedArray(array.size()); + std::copy(array.begin(), array.end(), array_copy->begin()); + return array_copy; +} + +template +void ReleaseArray(SwapSpace* swap_space, const LengthPrefixedArray* array) { + SwapAllocator allocator(swap_space); + size_t size = LengthPrefixedArray::ComputeSize(array->size()); + array->~LengthPrefixedArray(); + allocator.deallocate(const_cast(reinterpret_cast(array)), size); +} + +} // anonymous namespace + +template +inline const LengthPrefixedArray* CompiledMethodStorage::AllocateOrDeduplicateArray( + const ArrayRef& data, + DedupeSetType* dedupe_set) { + if (data.empty()) { + return nullptr; + } else if (!DedupeEnabled()) { + return CopyArray(swap_space_.get(), data); + } else { + return dedupe_set->Add(Thread::Current(), data); + } +} + +template +inline void CompiledMethodStorage::ReleaseArrayIfNotDeduplicated( + const LengthPrefixedArray* array) { + if (array != nullptr && !DedupeEnabled()) { + ReleaseArray(swap_space_.get(), array); + } +} + +template +class CompiledMethodStorage::DedupeHashFunc { + private: + static constexpr bool kUseMurmur3Hash = true; + + public: + size_t operator()(const ArrayRef& array) const { + const uint8_t* data = reinterpret_cast(array.data()); + // TODO: More reasonable assertion. + // static_assert(IsPowerOfTwo(sizeof(ContentType)), + // "ContentType is not power of two, don't know whether array layout is as assumed"); + uint32_t len = sizeof(ContentType) * array.size(); + if (kUseMurmur3Hash) { + static constexpr uint32_t c1 = 0xcc9e2d51; + static constexpr uint32_t c2 = 0x1b873593; + static constexpr uint32_t r1 = 15; + static constexpr uint32_t r2 = 13; + static constexpr uint32_t m = 5; + static constexpr uint32_t n = 0xe6546b64; + + uint32_t hash = 0; + + const int nblocks = len / 4; + typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; + const unaligned_uint32_t *blocks = reinterpret_cast(data); + int i; + for (i = 0; i < nblocks; i++) { + uint32_t k = blocks[i]; + k *= c1; + k = (k << r1) | (k >> (32 - r1)); + k *= c2; + + hash ^= k; + hash = ((hash << r2) | (hash >> (32 - r2))) * m + n; + } + + const uint8_t *tail = reinterpret_cast(data + nblocks * 4); + uint32_t k1 = 0; + + switch (len & 3) { + case 3: + k1 ^= tail[2] << 16; + FALLTHROUGH_INTENDED; + case 2: + k1 ^= tail[1] << 8; + FALLTHROUGH_INTENDED; + case 1: + k1 ^= tail[0]; + + k1 *= c1; + k1 = (k1 << r1) | (k1 >> (32 - r1)); + k1 *= c2; + hash ^= k1; + } + + hash ^= len; + hash ^= (hash >> 16); + hash *= 0x85ebca6b; + hash ^= (hash >> 13); + hash *= 0xc2b2ae35; + hash ^= (hash >> 16); + + return hash; + } else { + size_t hash = 0x811c9dc5; + for (uint32_t i = 0; i < len; ++i) { + hash = (hash * 16777619) ^ data[i]; + } + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + return hash; + } + } +}; + +template +class CompiledMethodStorage::LengthPrefixedArrayAlloc { + public: + explicit LengthPrefixedArrayAlloc(SwapSpace* swap_space) + : swap_space_(swap_space) { + } + + const LengthPrefixedArray* Copy(const ArrayRef& array) { + return CopyArray(swap_space_, array); + } + + void Destroy(const LengthPrefixedArray* array) { + ReleaseArray(swap_space_, array); + } + + private: + SwapSpace* const swap_space_; +}; + +CompiledMethodStorage::CompiledMethodStorage(int swap_fd) + : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)), + dedupe_enabled_(true), + dedupe_code_("dedupe code", LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_src_mapping_table_("dedupe source mapping table", + LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_vmap_table_("dedupe vmap table", + LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc(swap_space_.get())), + dedupe_linker_patches_("dedupe cfi info", + LengthPrefixedArrayAlloc(swap_space_.get())) { +} + +CompiledMethodStorage::~CompiledMethodStorage() { + // All done by member destructors. +} + +void CompiledMethodStorage::DumpMemoryUsage(std::ostream& os, bool extended) const { + if (swap_space_.get() != nullptr) { + const size_t swap_size = swap_space_->GetSize(); + os << " swap=" << PrettySize(swap_size) << " (" << swap_size << "B)"; + } + if (extended) { + Thread* self = Thread::Current(); + os << "\nCode dedupe: " << dedupe_code_.DumpStats(self); + os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self); + os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self); + } +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateCode( + const ArrayRef& code) { + return AllocateOrDeduplicateArray(code, &dedupe_code_); +} + +void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray* code) { + ReleaseArrayIfNotDeduplicated(code); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateSrcMappingTable( + const ArrayRef& src_map) { + return AllocateOrDeduplicateArray(src_map, &dedupe_src_mapping_table_); +} + +void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray* src_map) { + ReleaseArrayIfNotDeduplicated(src_map); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateVMapTable( + const ArrayRef& table) { + return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_); +} + +void CompiledMethodStorage::ReleaseVMapTable(const LengthPrefixedArray* table) { + ReleaseArrayIfNotDeduplicated(table); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateCFIInfo( + const ArrayRef& cfi_info) { + return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_); +} + +void CompiledMethodStorage::ReleaseCFIInfo(const LengthPrefixedArray* cfi_info) { + ReleaseArrayIfNotDeduplicated(cfi_info); +} + +const LengthPrefixedArray* CompiledMethodStorage::DeduplicateLinkerPatches( + const ArrayRef& linker_patches) { + return AllocateOrDeduplicateArray(linker_patches, &dedupe_linker_patches_); +} + +void CompiledMethodStorage::ReleaseLinkerPatches( + const LengthPrefixedArray* linker_patches) { + ReleaseArrayIfNotDeduplicated(linker_patches); +} + +} // namespace art diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h new file mode 100644 index 000000000..8674abf81 --- /dev/null +++ b/compiler/driver/compiled_method_storage.h @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ +#define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ + +#include +#include + +#include "base/length_prefixed_array.h" +#include "base/macros.h" +#include "utils/array_ref.h" +#include "utils/dedupe_set.h" +#include "utils/swap_space.h" + +namespace art { + +class LinkerPatch; +class SrcMapElem; + +class CompiledMethodStorage { + public: + explicit CompiledMethodStorage(int swap_fd); + ~CompiledMethodStorage(); + + void DumpMemoryUsage(std::ostream& os, bool extended) const; + + void SetDedupeEnabled(bool dedupe_enabled) { + dedupe_enabled_ = dedupe_enabled; + } + bool DedupeEnabled() const { + return dedupe_enabled_; + } + + SwapAllocator GetSwapSpaceAllocator() { + return SwapAllocator(swap_space_.get()); + } + + const LengthPrefixedArray* DeduplicateCode(const ArrayRef& code); + void ReleaseCode(const LengthPrefixedArray* code); + + const LengthPrefixedArray* DeduplicateSrcMappingTable( + const ArrayRef& src_map); + void ReleaseSrcMappingTable(const LengthPrefixedArray* src_map); + + const LengthPrefixedArray* DeduplicateVMapTable(const ArrayRef& table); + void ReleaseVMapTable(const LengthPrefixedArray* table); + + const LengthPrefixedArray* DeduplicateCFIInfo(const ArrayRef& cfi_info); + void ReleaseCFIInfo(const LengthPrefixedArray* cfi_info); + + const LengthPrefixedArray* DeduplicateLinkerPatches( + const ArrayRef& linker_patches); + void ReleaseLinkerPatches(const LengthPrefixedArray* linker_patches); + + private: + template + const LengthPrefixedArray* AllocateOrDeduplicateArray(const ArrayRef& data, + DedupeSetType* dedupe_set); + + template + void ReleaseArrayIfNotDeduplicated(const LengthPrefixedArray* array); + + // DeDuplication data structures. + template + class DedupeHashFunc; + + template + class LengthPrefixedArrayAlloc; + + template + using ArrayDedupeSet = DedupeSet, + LengthPrefixedArray, + LengthPrefixedArrayAlloc, + size_t, + DedupeHashFunc, + 4>; + + // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first + // as other fields rely on this. + std::unique_ptr swap_space_; + + bool dedupe_enabled_; + + ArrayDedupeSet dedupe_code_; + ArrayDedupeSet dedupe_src_mapping_table_; + ArrayDedupeSet dedupe_vmap_table_; + ArrayDedupeSet dedupe_cfi_info_; + ArrayDedupeSet dedupe_linker_patches_; + + DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage); +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc new file mode 100644 index 000000000..6863f42d1 --- /dev/null +++ b/compiler/driver/compiled_method_storage_test.cc @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "compiled_method_storage.h" +#include "compiled_method.h" +#include "compiler_driver.h" +#include "compiler_options.h" +#include "dex/verification_results.h" +#include "dex/quick/dex_file_to_method_inliner_map.h" + +namespace art { + +TEST(CompiledMethodStorage, Deduplicate) { + CompilerOptions compiler_options; + VerificationResults verification_results(&compiler_options); + DexFileToMethodInlinerMap method_inliner_map; + CompilerDriver driver(&compiler_options, + &verification_results, + &method_inliner_map, + Compiler::kOptimizing, + /* instruction_set_ */ kNone, + /* instruction_set_features */ nullptr, + /* boot_image */ false, + /* app_image */ false, + /* image_classes */ nullptr, + /* compiled_classes */ nullptr, + /* compiled_methods */ nullptr, + /* thread_count */ 1u, + /* dump_stats */ false, + /* dump_passes */ false, + /* timer */ nullptr, + /* swap_fd */ -1, + /* profile_compilation_info */ nullptr); + CompiledMethodStorage* storage = driver.GetCompiledMethodStorage(); + + ASSERT_TRUE(storage->DedupeEnabled()); // The default. + + const uint8_t raw_code1[] = { 1u, 2u, 3u }; + const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u }; + ArrayRef code[] = { + ArrayRef(raw_code1), + ArrayRef(raw_code2), + }; + const SrcMapElem raw_src_map1[] = { { 1u, 2u }, { 3u, 4u }, { 5u, 6u } }; + const SrcMapElem raw_src_map2[] = { { 8u, 7u }, { 6u, 5u }, { 4u, 3u }, { 2u, 1u } }; + ArrayRef src_map[] = { + ArrayRef(raw_src_map1), + ArrayRef(raw_src_map2), + }; + const uint8_t raw_vmap_table1[] = { 2, 4, 6 }; + const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 }; + ArrayRef vmap_table[] = { + ArrayRef(raw_vmap_table1), + ArrayRef(raw_vmap_table2), + }; + const uint8_t raw_cfi_info1[] = { 1, 3, 5 }; + const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 }; + ArrayRef cfi_info[] = { + ArrayRef(raw_cfi_info1), + ArrayRef(raw_cfi_info2), + }; + const LinkerPatch raw_patches1[] = { + LinkerPatch::CodePatch(0u, nullptr, 1u), + LinkerPatch::MethodPatch(4u, nullptr, 1u), + }; + const LinkerPatch raw_patches2[] = { + LinkerPatch::CodePatch(0u, nullptr, 1u), + LinkerPatch::MethodPatch(4u, nullptr, 2u), + }; + ArrayRef patches[] = { + ArrayRef(raw_patches1), + ArrayRef(raw_patches2), + }; + + std::vector compiled_methods; + compiled_methods.reserve(1u << 7); + for (auto&& c : code) { + for (auto&& s : src_map) { + for (auto&& v : vmap_table) { + for (auto&& f : cfi_info) { + for (auto&& p : patches) { + compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod( + &driver, kNone, c, 0u, 0u, 0u, s, v, f, p)); + } + } + } + } + } + constexpr size_t code_bit = 1u << 4; + constexpr size_t src_map_bit = 1u << 3; + constexpr size_t vmap_table_bit = 1u << 2; + constexpr size_t cfi_info_bit = 1u << 1; + constexpr size_t patches_bit = 1u << 0; + CHECK_EQ(compiled_methods.size(), 1u << 5); + for (size_t i = 0; i != compiled_methods.size(); ++i) { + for (size_t j = 0; j != compiled_methods.size(); ++j) { + CompiledMethod* lhs = compiled_methods[i]; + CompiledMethod* rhs = compiled_methods[j]; + bool same_code = ((i ^ j) & code_bit) == 0u; + bool same_src_map = ((i ^ j) & src_map_bit) == 0u; + bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u; + bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u; + bool same_patches = ((i ^ j) & patches_bit) == 0u; + ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data()) + << i << " " << j; + ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data()) + << i << " " << j; + ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data()) + << i << " " << j; + ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data()) + << i << " " << j; + ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data()) + << i << " " << j; + } + } + for (CompiledMethod* method : compiled_methods) { + CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&driver, method); + } +} + +} // namespace art diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h new file mode 100644 index 000000000..94f5acc2b --- /dev/null +++ b/compiler/driver/compiler_driver-inl.h @@ -0,0 +1,446 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ +#define ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ + +#include "compiler_driver.h" + +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "class_linker-inl.h" +#include "dex_compilation_unit.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache-inl.h" +#include "scoped_thread_state_change.h" +#include "handle_scope-inl.h" + +namespace art { + +inline mirror::DexCache* CompilerDriver::GetDexCache(const DexCompilationUnit* mUnit) { + return mUnit->GetClassLinker()->FindDexCache(Thread::Current(), *mUnit->GetDexFile(), false); +} + +inline mirror::ClassLoader* CompilerDriver::GetClassLoader(const ScopedObjectAccess& soa, + const DexCompilationUnit* mUnit) { + return soa.Decode(mUnit->GetClassLoader()); +} + +inline mirror::Class* CompilerDriver::ResolveClass( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, uint16_t cls_index, + const DexCompilationUnit* mUnit) { + DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); + DCHECK_EQ(class_loader.Get(), soa.Decode(mUnit->GetClassLoader())); + mirror::Class* cls = mUnit->GetClassLinker()->ResolveType( + *mUnit->GetDexFile(), cls_index, dex_cache, class_loader); + DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending()); + if (UNLIKELY(cls == nullptr)) { + // Clean up any exception left by type resolution. + soa.Self()->ClearException(); + } + return cls; +} + +inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit) { + DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); + DCHECK_EQ(class_loader.Get(), soa.Decode(mUnit->GetClassLoader())); + const DexFile::MethodId& referrer_method_id = + mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex()); + return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit); +} + +inline ArtField* CompilerDriver::ResolveFieldWithDexFile( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexFile* dex_file, + uint32_t field_idx, bool is_static) { + DCHECK_EQ(dex_cache->GetDexFile(), dex_file); + ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField( + *dex_file, field_idx, dex_cache, class_loader, is_static); + DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending()); + if (UNLIKELY(resolved_field == nullptr)) { + // Clean up any exception left by type resolution. + soa.Self()->ClearException(); + return nullptr; + } + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + // ClassLinker can return a field of the wrong kind directly from the DexCache. + // Silently return null on such incompatible class change. + return nullptr; + } + return resolved_field; +} + +inline mirror::DexCache* CompilerDriver::FindDexCache(const DexFile* dex_file) { + return Runtime::Current()->GetClassLinker()->FindDexCache(Thread::Current(), *dex_file, false); +} + +inline ArtField* CompilerDriver::ResolveField( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit, + uint32_t field_idx, bool is_static) { + DCHECK_EQ(class_loader.Get(), soa.Decode(mUnit->GetClassLoader())); + return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx, + is_static); +} + +inline void CompilerDriver::GetResolvedFieldDexFileLocation( + ArtField* resolved_field, const DexFile** declaring_dex_file, + uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) { + mirror::Class* declaring_class = resolved_field->GetDeclaringClass(); + *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile(); + *declaring_class_idx = declaring_class->GetDexTypeIndex(); + *declaring_field_idx = resolved_field->GetDexFieldIndex(); +} + +inline bool CompilerDriver::IsFieldVolatile(ArtField* field) { + return field->IsVolatile(); +} + +inline MemberOffset CompilerDriver::GetFieldOffset(ArtField* field) { + return field->GetOffset(); +} + +inline std::pair CompilerDriver::IsFastInstanceField( + mirror::DexCache* dex_cache, mirror::Class* referrer_class, + ArtField* resolved_field, uint16_t field_idx) { + DCHECK(!resolved_field->IsStatic()); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + bool fast_get = referrer_class != nullptr && + referrer_class->CanAccessResolvedField(fields_class, resolved_field, + dex_cache, field_idx); + bool fast_put = fast_get && (!resolved_field->IsFinal() || fields_class == referrer_class); + return std::make_pair(fast_get, fast_put); +} + +template +inline bool CompilerDriver::CanAccessResolvedMember(mirror::Class* referrer_class ATTRIBUTE_UNUSED, + mirror::Class* access_to ATTRIBUTE_UNUSED, + ArtMember* member ATTRIBUTE_UNUSED, + mirror::DexCache* dex_cache ATTRIBUTE_UNUSED, + uint32_t field_idx ATTRIBUTE_UNUSED) { + // Not defined for ArtMember values other than ArtField or ArtMethod. + UNREACHABLE(); +} + +template <> +inline bool CompilerDriver::CanAccessResolvedMember(mirror::Class* referrer_class, + mirror::Class* access_to, + ArtField* field, + mirror::DexCache* dex_cache, + uint32_t field_idx) { + return referrer_class->CanAccessResolvedField(access_to, field, dex_cache, field_idx); +} + +template <> +inline bool CompilerDriver::CanAccessResolvedMember( + mirror::Class* referrer_class, + mirror::Class* access_to, + ArtMethod* method, + mirror::DexCache* dex_cache, + uint32_t field_idx) { + return referrer_class->CanAccessResolvedMethod(access_to, method, dex_cache, field_idx); +} + +template +inline std::pair CompilerDriver::IsClassOfStaticMemberAvailableToReferrer( + mirror::DexCache* dex_cache, + mirror::Class* referrer_class, + ArtMember* resolved_member, + uint16_t member_idx, + uint32_t* storage_index) { + DCHECK(resolved_member->IsStatic()); + if (LIKELY(referrer_class != nullptr)) { + mirror::Class* members_class = resolved_member->GetDeclaringClass(); + if (members_class == referrer_class) { + *storage_index = members_class->GetDexTypeIndex(); + return std::make_pair(true, true); + } + if (CanAccessResolvedMember( + referrer_class, members_class, resolved_member, dex_cache, member_idx)) { + // We have the resolved member, we must make it into a index for the referrer + // in its static storage (which may fail if it doesn't have a slot for it) + // TODO: for images we can elide the static storage base null check + // if we know there's a non-null entry in the image + const DexFile* dex_file = dex_cache->GetDexFile(); + uint32_t storage_idx = DexFile::kDexNoIndex; + if (LIKELY(members_class->GetDexCache() == dex_cache)) { + // common case where the dex cache of both the referrer and the member are the same, + // no need to search the dex file + storage_idx = members_class->GetDexTypeIndex(); + } else { + // Search dex file for localized ssb index, may fail if member's class is a parent + // of the class mentioned in the dex file and there is no dex cache entry. + storage_idx = resolved_member->GetDeclaringClass()->FindTypeIndexInOtherDexFile(*dex_file); + } + if (storage_idx != DexFile::kDexNoIndex) { + *storage_index = storage_idx; + return std::make_pair(true, !resolved_member->IsFinal()); + } + } + } + // Conservative defaults. + *storage_index = DexFile::kDexNoIndex; + return std::make_pair(false, false); +} + +inline std::pair CompilerDriver::IsFastStaticField( + mirror::DexCache* dex_cache, mirror::Class* referrer_class, + ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) { + return IsClassOfStaticMemberAvailableToReferrer( + dex_cache, referrer_class, resolved_field, field_idx, storage_index); +} + +inline bool CompilerDriver::IsClassOfStaticMethodAvailableToReferrer( + mirror::DexCache* dex_cache, mirror::Class* referrer_class, + ArtMethod* resolved_method, uint16_t method_idx, uint32_t* storage_index) { + std::pair result = IsClassOfStaticMemberAvailableToReferrer( + dex_cache, referrer_class, resolved_method, method_idx, storage_index); + // Only the first member of `result` is meaningful, as there is no + // "write access" to a method. + return result.first; +} + +inline bool CompilerDriver::IsStaticFieldInReferrerClass(mirror::Class* referrer_class, + ArtField* resolved_field) { + DCHECK(resolved_field->IsStatic()); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + return referrer_class == fields_class; +} + +inline bool CompilerDriver::CanAssumeClassIsInitialized(mirror::Class* klass) { + // Being loaded is a pre-requisite for being initialized but let's do the cheap check first. + // + // NOTE: When AOT compiling an app, we eagerly initialize app classes (and potentially their + // super classes in the boot image) but only those that have a trivial initialization, i.e. + // without () or static values in the dex file for that class or any of its super + // classes. So while we could see the klass as initialized during AOT compilation and have + // it only loaded at runtime, the needed initialization would have to be trivial and + // unobservable from Java, so we may as well treat it as initialized. + if (!klass->IsInitialized()) { + return false; + } + return CanAssumeClassIsLoaded(klass); +} + +inline bool CompilerDriver::CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, + mirror::Class* klass) { + return (referrer_class != nullptr + && !referrer_class->IsInterface() + && referrer_class->IsSubClass(klass)) + || CanAssumeClassIsInitialized(klass); +} + +inline bool CompilerDriver::IsStaticFieldsClassInitialized(mirror::Class* referrer_class, + ArtField* resolved_field) { + DCHECK(resolved_field->IsStatic()); + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + return CanReferrerAssumeClassIsInitialized(referrer_class, fields_class); +} + +inline ArtMethod* CompilerDriver::ResolveMethod( + ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit, + uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change) { + DCHECK_EQ(class_loader.Get(), soa.Decode(mUnit->GetClassLoader())); + ArtMethod* resolved_method = + check_incompatible_class_change + ? mUnit->GetClassLinker()->ResolveMethod( + *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type) + : mUnit->GetClassLinker()->ResolveMethod( + *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type); + if (UNLIKELY(resolved_method == nullptr)) { + DCHECK(soa.Self()->IsExceptionPending()); + // Clean up any exception left by type resolution. + soa.Self()->ClearException(); + } + return resolved_method; +} + +inline void CompilerDriver::GetResolvedMethodDexFileLocation( + ArtMethod* resolved_method, const DexFile** declaring_dex_file, + uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) { + mirror::Class* declaring_class = resolved_method->GetDeclaringClass(); + *declaring_dex_file = declaring_class->GetDexCache()->GetDexFile(); + *declaring_class_idx = declaring_class->GetDexTypeIndex(); + *declaring_method_idx = resolved_method->GetDexMethodIndex(); +} + +inline uint16_t CompilerDriver::GetResolvedMethodVTableIndex( + ArtMethod* resolved_method, InvokeType type) { + if (type == kVirtual || type == kSuper) { + return resolved_method->GetMethodIndex(); + } else if (type == kInterface) { + return resolved_method->GetDexMethodIndex(); + } else { + return DexFile::kDexNoIndex16; + } +} + +inline int CompilerDriver::IsFastInvoke( + ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit, + mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type, + MethodReference* target_method, const MethodReference* devirt_target, + uintptr_t* direct_code, uintptr_t* direct_method) { + // Don't try to fast-path if we don't understand the caller's class. + // Referrer_class is the class that this invoke is contained in. + if (UNLIKELY(referrer_class == nullptr)) { + return 0; + } + StackHandleScope<2> hs(soa.Self()); + // Methods_class is the class refered to by the class_idx field of the methodId the method_idx is + // pointing to. + // For example in + // .class LABC; + // .super LDEF; + // .method hi()V + // ... + // invoke-super {p0}, LDEF;->hi()V + // ... + // .end method + // the referrer_class is 'ABC' and the methods_class is DEF. Note that the methods class is 'DEF' + // even if 'DEF' inherits the method from it's superclass. + Handle methods_class(hs.NewHandle(mUnit->GetClassLinker()->ResolveType( + *target_method->dex_file, + target_method->dex_file->GetMethodId(target_method->dex_method_index).class_idx_, + dex_cache, + class_loader))); + DCHECK(methods_class.Get() != nullptr); + mirror::Class* methods_declaring_class = resolved_method->GetDeclaringClass(); + if (UNLIKELY(!referrer_class->CanAccessResolvedMethod(methods_declaring_class, resolved_method, + dex_cache.Get(), + target_method->dex_method_index))) { + return 0; + } + // Sharpen a virtual call into a direct call when the target is known not to have been + // overridden (ie is final). + const bool same_dex_file = target_method->dex_file == mUnit->GetDexFile(); + bool can_sharpen_virtual_based_on_type = same_dex_file && + (*invoke_type == kVirtual) && (resolved_method->IsFinal() || + methods_declaring_class->IsFinal()); + // For invoke-super, ensure the vtable index will be correct to dispatch in the vtable of + // the super class. + const size_t pointer_size = InstructionSetPointerSize(GetInstructionSet()); + // TODO We should be able to sharpen if we are going into the boot image as well. + bool can_sharpen_super_based_on_type = same_dex_file && + (*invoke_type == kSuper) && + !methods_class->IsInterface() && + (referrer_class != methods_declaring_class) && + referrer_class->IsSubClass(methods_declaring_class) && + resolved_method->GetMethodIndex() < methods_declaring_class->GetVTableLength() && + (methods_declaring_class->GetVTableEntry( + resolved_method->GetMethodIndex(), pointer_size) == resolved_method) && + resolved_method->IsInvokable(); + // TODO We should be able to sharpen if we are going into the boot image as well. + bool can_sharpen_interface_super_based_on_type = same_dex_file && + (*invoke_type == kSuper) && + methods_class->IsInterface() && + methods_class->IsAssignableFrom(referrer_class) && + resolved_method->IsInvokable(); + + if (can_sharpen_virtual_based_on_type || + can_sharpen_super_based_on_type || + can_sharpen_interface_super_based_on_type) { + // Sharpen a virtual call into a direct call. The method_idx is into referrer's + // dex cache, check that this resolved method is where we expect it. + CHECK_EQ(target_method->dex_file, mUnit->GetDexFile()); + DCHECK_EQ(dex_cache.Get(), mUnit->GetClassLinker()->FindDexCache( + soa.Self(), *mUnit->GetDexFile(), false)); + CHECK_EQ(referrer_class->GetDexCache()->GetResolvedMethod( + target_method->dex_method_index, pointer_size), + resolved_method) << PrettyMethod(resolved_method); + int stats_flags = kFlagMethodResolved; + GetCodeAndMethodForDirectCall(/*out*/invoke_type, + kDirect, // Sharp type + false, // The dex cache is guaranteed to be available + referrer_class, resolved_method, + /*out*/&stats_flags, + target_method, + /*out*/direct_code, + /*out*/direct_method); + DCHECK_NE(*invoke_type, kSuper) << PrettyMethod(resolved_method); + if (*invoke_type == kDirect) { + stats_flags |= kFlagsMethodResolvedVirtualMadeDirect; + } + return stats_flags; + } + + if ((*invoke_type == kVirtual || *invoke_type == kInterface) && devirt_target != nullptr) { + // Post-verification callback recorded a more precise invoke target based on its type info. + ArtMethod* called_method; + ClassLinker* class_linker = mUnit->GetClassLinker(); + if (LIKELY(devirt_target->dex_file == mUnit->GetDexFile())) { + called_method = class_linker->ResolveMethod( + *devirt_target->dex_file, devirt_target->dex_method_index, dex_cache, class_loader, + nullptr, kVirtual); + } else { + auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file, + class_loader.Get()))); + called_method = class_linker->ResolveMethod( + *devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache, + class_loader, nullptr, kVirtual); + } + CHECK(called_method != nullptr); + CHECK(called_method->IsInvokable()); + int stats_flags = kFlagMethodResolved; + GetCodeAndMethodForDirectCall(/*out*/invoke_type, + kDirect, // Sharp type + true, // The dex cache may not be available + referrer_class, called_method, + /*out*/&stats_flags, + target_method, + /*out*/direct_code, + /*out*/direct_method); + DCHECK_NE(*invoke_type, kSuper); + if (*invoke_type == kDirect) { + stats_flags |= kFlagsMethodResolvedPreciseTypeDevirtualization; + } + return stats_flags; + } + + if (UNLIKELY(*invoke_type == kSuper)) { + // Unsharpened super calls are suspicious so go slow-path. + return 0; + } + + // Sharpening failed so generate a regular resolved method dispatch. + int stats_flags = kFlagMethodResolved; + GetCodeAndMethodForDirectCall(/*out*/invoke_type, + *invoke_type, // Sharp type + false, // The dex cache is guaranteed to be available + referrer_class, resolved_method, + /*out*/&stats_flags, + target_method, + /*out*/direct_code, + /*out*/direct_method); + return stats_flags; +} + +inline bool CompilerDriver::IsMethodsClassInitialized(mirror::Class* referrer_class, + ArtMethod* resolved_method) { + if (!resolved_method->IsStatic()) { + return true; + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + return CanReferrerAssumeClassIsInitialized(referrer_class, methods_class); +} + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc new file mode 100644 index 000000000..5cde93cb7 --- /dev/null +++ b/compiler/driver/compiler_driver.cc @@ -0,0 +1,2918 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler_driver.h" + +#include +#include +#include + +#ifndef __APPLE__ +#include // For mallinfo +#endif + +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "base/bit_vector.h" +#include "base/stl_util.h" +#include "base/systrace.h" +#include "base/time_utils.h" +#include "base/timing_logger.h" +#include "class_linker-inl.h" +#include "compiled_class.h" +#include "compiled_method.h" +#include "compiler.h" +#include "compiler_driver-inl.h" +#include "dex_compilation_unit.h" +#include "dex_file-inl.h" +#include "dex_instruction-inl.h" +#include "dex/dex_to_dex_compiler.h" +#include "dex/verification_results.h" +#include "dex/verified_method.h" +#include "dex/quick/dex_file_method_inliner.h" +#include "dex/quick/dex_file_to_method_inliner_map.h" +#include "driver/compiler_options.h" +#include "jni_internal.h" +#include "object_lock.h" +#include "profiler.h" +#include "runtime.h" +#include "gc/accounting/card_table-inl.h" +#include "gc/accounting/heap_bitmap.h" +#include "gc/space/image_space.h" +#include "gc/space/space.h" +#include "mirror/class_loader.h" +#include "mirror/class-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/throwable.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" +#include "handle_scope-inl.h" +#include "thread.h" +#include "thread_list.h" +#include "thread_pool.h" +#include "trampolines/trampoline_compiler.h" +#include "transaction.h" +#include "utils/array_ref.h" +#include "utils/dex_cache_arrays_layout-inl.h" +#include "utils/swap_space.h" +#include "verifier/method_verifier.h" +#include "verifier/method_verifier-inl.h" + +namespace art { + +static constexpr bool kTimeCompileMethod = !kIsDebugBuild; + +// Whether classes-to-compile and methods-to-compile are only applied to the boot image, or, when +// given, too all compilations. +static constexpr bool kRestrictCompilationFiltersToImage = true; + +// Print additional info during profile guided compilation. +static constexpr bool kDebugProfileGuidedCompilation = false; + +static double Percentage(size_t x, size_t y) { + return 100.0 * (static_cast(x)) / (static_cast(x + y)); +} + +static void DumpStat(size_t x, size_t y, const char* str) { + if (x == 0 && y == 0) { + return; + } + LOG(INFO) << Percentage(x, y) << "% of " << str << " for " << (x + y) << " cases"; +} + +class CompilerDriver::AOTCompilationStats { + public: + AOTCompilationStats() + : stats_lock_("AOT compilation statistics lock"), + types_in_dex_cache_(0), types_not_in_dex_cache_(0), + strings_in_dex_cache_(0), strings_not_in_dex_cache_(0), + resolved_types_(0), unresolved_types_(0), + resolved_instance_fields_(0), unresolved_instance_fields_(0), + resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0), + type_based_devirtualization_(0), + safe_casts_(0), not_safe_casts_(0) { + for (size_t i = 0; i <= kMaxInvokeType; i++) { + resolved_methods_[i] = 0; + unresolved_methods_[i] = 0; + virtual_made_direct_[i] = 0; + direct_calls_to_boot_[i] = 0; + direct_methods_to_boot_[i] = 0; + } + } + + void Dump() { + DumpStat(types_in_dex_cache_, types_not_in_dex_cache_, "types known to be in dex cache"); + DumpStat(strings_in_dex_cache_, strings_not_in_dex_cache_, "strings known to be in dex cache"); + DumpStat(resolved_types_, unresolved_types_, "types resolved"); + DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved"); + DumpStat(resolved_local_static_fields_ + resolved_static_fields_, unresolved_static_fields_, + "static fields resolved"); + DumpStat(resolved_local_static_fields_, resolved_static_fields_ + unresolved_static_fields_, + "static fields local to a class"); + DumpStat(safe_casts_, not_safe_casts_, "check-casts removed based on type information"); + // Note, the code below subtracts the stat value so that when added to the stat value we have + // 100% of samples. TODO: clean this up. + DumpStat(type_based_devirtualization_, + resolved_methods_[kVirtual] + unresolved_methods_[kVirtual] + + resolved_methods_[kInterface] + unresolved_methods_[kInterface] - + type_based_devirtualization_, + "virtual/interface calls made direct based on type information"); + + for (size_t i = 0; i <= kMaxInvokeType; i++) { + std::ostringstream oss; + oss << static_cast(i) << " methods were AOT resolved"; + DumpStat(resolved_methods_[i], unresolved_methods_[i], oss.str().c_str()); + if (virtual_made_direct_[i] > 0) { + std::ostringstream oss2; + oss2 << static_cast(i) << " methods made direct"; + DumpStat(virtual_made_direct_[i], + resolved_methods_[i] + unresolved_methods_[i] - virtual_made_direct_[i], + oss2.str().c_str()); + } + if (direct_calls_to_boot_[i] > 0) { + std::ostringstream oss2; + oss2 << static_cast(i) << " method calls are direct into boot"; + DumpStat(direct_calls_to_boot_[i], + resolved_methods_[i] + unresolved_methods_[i] - direct_calls_to_boot_[i], + oss2.str().c_str()); + } + if (direct_methods_to_boot_[i] > 0) { + std::ostringstream oss2; + oss2 << static_cast(i) << " method calls have methods in boot"; + DumpStat(direct_methods_to_boot_[i], + resolved_methods_[i] + unresolved_methods_[i] - direct_methods_to_boot_[i], + oss2.str().c_str()); + } + } + } + +// Allow lossy statistics in non-debug builds. +#ifndef NDEBUG +#define STATS_LOCK() MutexLock mu(Thread::Current(), stats_lock_) +#else +#define STATS_LOCK() +#endif + + void TypeInDexCache() REQUIRES(!stats_lock_) { + STATS_LOCK(); + types_in_dex_cache_++; + } + + void TypeNotInDexCache() REQUIRES(!stats_lock_) { + STATS_LOCK(); + types_not_in_dex_cache_++; + } + + void StringInDexCache() REQUIRES(!stats_lock_) { + STATS_LOCK(); + strings_in_dex_cache_++; + } + + void StringNotInDexCache() REQUIRES(!stats_lock_) { + STATS_LOCK(); + strings_not_in_dex_cache_++; + } + + void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) { + STATS_LOCK(); + resolved_types_++; + } + + void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) { + STATS_LOCK(); + unresolved_types_++; + } + + void ResolvedInstanceField() REQUIRES(!stats_lock_) { + STATS_LOCK(); + resolved_instance_fields_++; + } + + void UnresolvedInstanceField() REQUIRES(!stats_lock_) { + STATS_LOCK(); + unresolved_instance_fields_++; + } + + void ResolvedLocalStaticField() REQUIRES(!stats_lock_) { + STATS_LOCK(); + resolved_local_static_fields_++; + } + + void ResolvedStaticField() REQUIRES(!stats_lock_) { + STATS_LOCK(); + resolved_static_fields_++; + } + + void UnresolvedStaticField() REQUIRES(!stats_lock_) { + STATS_LOCK(); + unresolved_static_fields_++; + } + + // Indicate that type information from the verifier led to devirtualization. + void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) { + STATS_LOCK(); + type_based_devirtualization_++; + } + + // Indicate that a method of the given type was resolved at compile time. + void ResolvedMethod(InvokeType type) REQUIRES(!stats_lock_) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + resolved_methods_[type]++; + } + + // Indicate that a method of the given type was unresolved at compile time as it was in an + // unknown dex file. + void UnresolvedMethod(InvokeType type) REQUIRES(!stats_lock_) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + unresolved_methods_[type]++; + } + + // Indicate that a type of virtual method dispatch has been converted into a direct method + // dispatch. + void VirtualMadeDirect(InvokeType type) REQUIRES(!stats_lock_) { + DCHECK(type == kVirtual || type == kInterface || type == kSuper); + STATS_LOCK(); + virtual_made_direct_[type]++; + } + + // Indicate that a method of the given type was able to call directly into boot. + void DirectCallsToBoot(InvokeType type) REQUIRES(!stats_lock_) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + direct_calls_to_boot_[type]++; + } + + // Indicate that a method of the given type was able to be resolved directly from boot. + void DirectMethodsToBoot(InvokeType type) REQUIRES(!stats_lock_) { + DCHECK_LE(type, kMaxInvokeType); + STATS_LOCK(); + direct_methods_to_boot_[type]++; + } + + void ProcessedInvoke(InvokeType type, int flags) REQUIRES(!stats_lock_) { + STATS_LOCK(); + if (flags == 0) { + unresolved_methods_[type]++; + } else { + DCHECK_NE((flags & kFlagMethodResolved), 0); + resolved_methods_[type]++; + if ((flags & kFlagVirtualMadeDirect) != 0) { + virtual_made_direct_[type]++; + if ((flags & kFlagPreciseTypeDevirtualization) != 0) { + type_based_devirtualization_++; + } + } else { + DCHECK_EQ((flags & kFlagPreciseTypeDevirtualization), 0); + } + if ((flags & kFlagDirectCallToBoot) != 0) { + direct_calls_to_boot_[type]++; + } + if ((flags & kFlagDirectMethodToBoot) != 0) { + direct_methods_to_boot_[type]++; + } + } + } + + // A check-cast could be eliminated due to verifier type analysis. + void SafeCast() REQUIRES(!stats_lock_) { + STATS_LOCK(); + safe_casts_++; + } + + // A check-cast couldn't be eliminated due to verifier type analysis. + void NotASafeCast() REQUIRES(!stats_lock_) { + STATS_LOCK(); + not_safe_casts_++; + } + + private: + Mutex stats_lock_; + + size_t types_in_dex_cache_; + size_t types_not_in_dex_cache_; + + size_t strings_in_dex_cache_; + size_t strings_not_in_dex_cache_; + + size_t resolved_types_; + size_t unresolved_types_; + + size_t resolved_instance_fields_; + size_t unresolved_instance_fields_; + + size_t resolved_local_static_fields_; + size_t resolved_static_fields_; + size_t unresolved_static_fields_; + // Type based devirtualization for invoke interface and virtual. + size_t type_based_devirtualization_; + + size_t resolved_methods_[kMaxInvokeType + 1]; + size_t unresolved_methods_[kMaxInvokeType + 1]; + size_t virtual_made_direct_[kMaxInvokeType + 1]; + size_t direct_calls_to_boot_[kMaxInvokeType + 1]; + size_t direct_methods_to_boot_[kMaxInvokeType + 1]; + + size_t safe_casts_; + size_t not_safe_casts_; + + DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats); +}; + +class CompilerDriver::DexFileMethodSet { + public: + explicit DexFileMethodSet(const DexFile& dex_file) + : dex_file_(dex_file), + method_indexes_(dex_file.NumMethodIds(), false, Allocator::GetMallocAllocator()) { + } + DexFileMethodSet(DexFileMethodSet&& other) = default; + + const DexFile& GetDexFile() const { return dex_file_; } + + BitVector& GetMethodIndexes() { return method_indexes_; } + const BitVector& GetMethodIndexes() const { return method_indexes_; } + + private: + const DexFile& dex_file_; + BitVector method_indexes_; +}; + +CompilerDriver::CompilerDriver( + const CompilerOptions* compiler_options, + VerificationResults* verification_results, + DexFileToMethodInlinerMap* method_inliner_map, + Compiler::Kind compiler_kind, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features, + bool boot_image, + bool app_image, + std::unordered_set* image_classes, + std::unordered_set* compiled_classes, + std::unordered_set* compiled_methods, + size_t thread_count, + bool dump_stats, + bool dump_passes, + CumulativeLogger* timer, + int swap_fd, + const ProfileCompilationInfo* profile_compilation_info) + : compiler_options_(compiler_options), + verification_results_(verification_results), + method_inliner_map_(method_inliner_map), + compiler_(Compiler::Create(this, compiler_kind)), + compiler_kind_(compiler_kind), + instruction_set_(instruction_set), + instruction_set_features_(instruction_set_features), + requires_constructor_barrier_lock_("constructor barrier lock"), + compiled_classes_lock_("compiled classes lock"), + compiled_methods_lock_("compiled method lock"), + compiled_methods_(MethodTable::key_compare()), + non_relative_linker_patch_count_(0u), + boot_image_(boot_image), + app_image_(app_image), + image_classes_(image_classes), + classes_to_compile_(compiled_classes), + methods_to_compile_(compiled_methods), + had_hard_verifier_failure_(false), + parallel_thread_count_(thread_count), + stats_(new AOTCompilationStats), + dump_stats_(dump_stats), + dump_passes_(dump_passes), + timings_logger_(timer), + compiler_context_(nullptr), + support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64), + dex_files_for_oat_file_(nullptr), + compiled_method_storage_(swap_fd), + profile_compilation_info_(profile_compilation_info), + max_arena_alloc_(0), + dex_to_dex_references_lock_("dex-to-dex references lock"), + dex_to_dex_references_(), + current_dex_to_dex_methods_(nullptr) { + DCHECK(compiler_options_ != nullptr); + DCHECK(method_inliner_map_ != nullptr); + + compiler_->Init(); + + if (compiler_options->VerifyOnlyProfile()) { + CHECK(profile_compilation_info_ != nullptr) << "Requires profile"; + } + if (boot_image_) { + CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image"; + } +} + +CompilerDriver::~CompilerDriver() { + Thread* self = Thread::Current(); + { + MutexLock mu(self, compiled_classes_lock_); + STLDeleteValues(&compiled_classes_); + } + { + MutexLock mu(self, compiled_methods_lock_); + for (auto& pair : compiled_methods_) { + CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, pair.second); + } + } + compiler_->UnInit(); +} + + +#define CREATE_TRAMPOLINE(type, abi, offset) \ + if (Is64BitInstructionSet(instruction_set_)) { \ + return CreateTrampoline64(instruction_set_, abi, \ + type ## _ENTRYPOINT_OFFSET(8, offset)); \ + } else { \ + return CreateTrampoline32(instruction_set_, abi, \ + type ## _ENTRYPOINT_OFFSET(4, offset)); \ + } + +std::unique_ptr> CompilerDriver::CreateJniDlsymLookup() const { + CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookup) +} + +std::unique_ptr> CompilerDriver::CreateQuickGenericJniTrampoline() + const { + CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickGenericJniTrampoline) +} + +std::unique_ptr> CompilerDriver::CreateQuickImtConflictTrampoline() + const { + CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickImtConflictTrampoline) +} + +std::unique_ptr> CompilerDriver::CreateQuickResolutionTrampoline() + const { + CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickResolutionTrampoline) +} + +std::unique_ptr> CompilerDriver::CreateQuickToInterpreterBridge() + const { + CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickToInterpreterBridge) +} +#undef CREATE_TRAMPOLINE + +void CompilerDriver::CompileAll(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + DCHECK(!Runtime::Current()->IsStarted()); + + InitializeThreadPools(); + + VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false); + // Precompile: + // 1) Load image classes + // 2) Resolve all classes + // 3) Attempt to verify all classes + // 4) Attempt to initialize image classes, and trivially initialized classes + PreCompile(class_loader, dex_files, timings); + // Compile: + // 1) Compile all classes and methods enabled for compilation. May fall back to dex-to-dex + // compilation. + if (!GetCompilerOptions().VerifyAtRuntime()) { + Compile(class_loader, dex_files, timings); + } + if (dump_stats_) { + stats_->Dump(); + } + + FreeThreadPools(); +} + +static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( + Thread* self, const CompilerDriver& driver, Handle class_loader, + const DexFile& dex_file, const DexFile::ClassDef& class_def) + SHARED_REQUIRES(Locks::mutator_lock_) { + auto* const runtime = Runtime::Current(); + if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) { + // Verify at runtime shouldn't dex to dex since we didn't resolve of verify. + return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; + } + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = runtime->GetClassLinker(); + mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader); + if (klass == nullptr) { + CHECK(self->IsExceptionPending()); + self->ClearException(); + return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; + } + // DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic + // references with actual offsets. We cannot re-verify such instructions. + // + // We store the verification information in the class status in the oat file, which the linker + // can validate (checksums) and use to skip load-time verification. It is thus safe to + // optimize when a class has been fully verified before. + if (klass->IsVerified()) { + // Class is verified so we can enable DEX-to-DEX compilation for performance. + return optimizer::DexToDexCompilationLevel::kOptimize; + } else if (klass->IsCompileTimeVerified()) { + // Class verification has soft-failed. Anyway, ensure at least correctness. + DCHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime); + return optimizer::DexToDexCompilationLevel::kRequired; + } else { + // Class verification has failed: do not run DEX-to-DEX compilation. + return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; + } +} + +static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( + Thread* self, + const CompilerDriver& driver, + jobject jclass_loader, + const DexFile& dex_file, + const DexFile::ClassDef& class_def) { + ScopedObjectAccess soa(self); + StackHandleScope<1> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(jclass_loader))); + return GetDexToDexCompilationLevel(self, driver, class_loader, dex_file, class_def); +} + +// Does the runtime for the InstructionSet provide an implementation returned by +// GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler? +static bool InstructionSetHasGenericJniStub(InstructionSet isa) { + switch (isa) { + case kArm: + case kArm64: + case kThumb2: + case kMips: + case kMips64: + case kX86: + case kX86_64: return true; + default: return false; + } +} + +static void CompileMethod(Thread* self, + CompilerDriver* driver, + const DexFile::CodeItem* code_item, + uint32_t access_flags, + InvokeType invoke_type, + uint16_t class_def_idx, + uint32_t method_idx, + jobject class_loader, + const DexFile& dex_file, + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level, + bool compilation_enabled, + Handle dex_cache) + REQUIRES(!driver->compiled_methods_lock_) { + DCHECK(driver != nullptr); + CompiledMethod* compiled_method = nullptr; + uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0; + MethodReference method_ref(&dex_file, method_idx); + + if (driver->GetCurrentDexToDexMethods() != nullptr) { + // This is the second pass when we dex-to-dex compile previously marked methods. + // TODO: Refactor the compilation to avoid having to distinguish the two passes + // here. That should be done on a higher level. http://b/29089975 + if (driver->GetCurrentDexToDexMethods()->IsBitSet(method_idx)) { + const VerifiedMethod* verified_method = + driver->GetVerificationResults()->GetVerifiedMethod(method_ref); + // Do not optimize if a VerifiedMethod is missing. SafeCast elision, + // for example, relies on it. + compiled_method = optimizer::ArtCompileDEX( + driver, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + class_loader, + dex_file, + (verified_method != nullptr) + ? dex_to_dex_compilation_level + : optimizer::DexToDexCompilationLevel::kRequired); + } + } else if ((access_flags & kAccNative) != 0) { + // Are we extracting only and have support for generic JNI down calls? + if (!driver->GetCompilerOptions().IsJniCompilationEnabled() && + InstructionSetHasGenericJniStub(driver->GetInstructionSet())) { + // Leaving this empty will trigger the generic JNI version + } else { + compiled_method = driver->GetCompiler()->JniCompile(access_flags, method_idx, dex_file); + CHECK(compiled_method != nullptr); + } + } else if ((access_flags & kAccAbstract) != 0) { + // Abstract methods don't have code. + } else { + const VerifiedMethod* verified_method = + driver->GetVerificationResults()->GetVerifiedMethod(method_ref); + bool compile = compilation_enabled && + // Basic checks, e.g., not . + driver->GetVerificationResults() + ->IsCandidateForCompilation(method_ref, access_flags) && + // Did not fail to create VerifiedMethod metadata. + verified_method != nullptr && + // Do not have failures that should punt to the interpreter. + !verified_method->HasRuntimeThrow() && + (verified_method->GetEncounteredVerificationFailures() & + (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 && + // Is eligable for compilation by methods-to-compile filter. + driver->IsMethodToCompile(method_ref) && + driver->ShouldCompileBasedOnProfile(method_ref); + + if (compile) { + // NOTE: if compiler declines to compile this method, it will return null. + compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type, + class_def_idx, method_idx, class_loader, + dex_file, dex_cache); + } + if (compiled_method == nullptr && + dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) { + DCHECK(!Runtime::Current()->UseJitCompilation()); + // TODO: add a command-line option to disable DEX-to-DEX compilation ? + driver->MarkForDexToDexCompilation(self, method_ref); + } + } + if (kTimeCompileMethod) { + uint64_t duration_ns = NanoTime() - start_ns; + if (duration_ns > MsToNs(driver->GetCompiler()->GetMaximumCompilationTimeBeforeWarning())) { + LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file) + << " took " << PrettyDuration(duration_ns); + } + } + + if (compiled_method != nullptr) { + // Count non-relative linker patches. + size_t non_relative_linker_patch_count = 0u; + for (const LinkerPatch& patch : compiled_method->GetPatches()) { + if (!patch.IsPcRelative()) { + ++non_relative_linker_patch_count; + } + } + bool compile_pic = driver->GetCompilerOptions().GetCompilePic(); // Off by default + // When compiling with PIC, there should be zero non-relative linker patches + CHECK(!compile_pic || non_relative_linker_patch_count == 0u); + + driver->AddCompiledMethod(method_ref, compiled_method, non_relative_linker_patch_count); + } + + if (self->IsExceptionPending()) { + ScopedObjectAccess soa(self); + LOG(FATAL) << "Unexpected exception compiling: " << PrettyMethod(method_idx, dex_file) << "\n" + << self->GetException()->Dump(); + } +} + +void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) { + DCHECK(!Runtime::Current()->IsStarted()); + jobject jclass_loader; + const DexFile* dex_file; + uint16_t class_def_idx; + uint32_t method_idx = method->GetDexMethodIndex(); + uint32_t access_flags = method->GetAccessFlags(); + InvokeType invoke_type = method->GetInvokeType(); + StackHandleScope<1> hs(self); + Handle dex_cache(hs.NewHandle(method->GetDexCache())); + { + ScopedObjectAccessUnchecked soa(self); + ScopedLocalRef local_class_loader( + soa.Env(), soa.AddLocalReference(method->GetDeclaringClass()->GetClassLoader())); + jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get()); + // Find the dex_file + dex_file = method->GetDexFile(); + class_def_idx = method->GetClassDefIndex(); + } + const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); + + // Go to native so that we don't block GC during compilation. + ScopedThreadSuspension sts(self, kNative); + + std::vector dex_files; + dex_files.push_back(dex_file); + + InitializeThreadPools(); + + PreCompile(jclass_loader, dex_files, timings); + + // Can we run DEX-to-DEX compiler on this class ? + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = + GetDexToDexCompilationLevel(self, + *this, + jclass_loader, + *dex_file, + dex_file->GetClassDef(class_def_idx)); + + DCHECK(current_dex_to_dex_methods_ == nullptr); + CompileMethod(self, + this, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + jclass_loader, + *dex_file, + dex_to_dex_compilation_level, + true, + dex_cache); + + ArrayRef dex_to_dex_references; + { + // From this point on, we shall not modify dex_to_dex_references_, so + // just grab a reference to it that we use without holding the mutex. + MutexLock lock(Thread::Current(), dex_to_dex_references_lock_); + dex_to_dex_references = ArrayRef(dex_to_dex_references_); + } + if (!dex_to_dex_references.empty()) { + DCHECK_EQ(dex_to_dex_references.size(), 1u); + DCHECK(&dex_to_dex_references[0].GetDexFile() == dex_file); + current_dex_to_dex_methods_ = &dex_to_dex_references.front().GetMethodIndexes(); + DCHECK(current_dex_to_dex_methods_->IsBitSet(method_idx)); + DCHECK_EQ(current_dex_to_dex_methods_->NumSetBits(), 1u); + CompileMethod(self, + this, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + jclass_loader, + *dex_file, + dex_to_dex_compilation_level, + true, + dex_cache); + current_dex_to_dex_methods_ = nullptr; + } + + FreeThreadPools(); + + self->GetJniEnv()->DeleteGlobalRef(jclass_loader); +} + +void CompilerDriver::Resolve(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + // Resolution allocates classes and needs to run single-threaded to be deterministic. + bool force_determinism = GetCompilerOptions().IsForceDeterminism(); + ThreadPool* resolve_thread_pool = force_determinism + ? single_thread_pool_.get() + : parallel_thread_pool_.get(); + size_t resolve_thread_count = force_determinism ? 1U : parallel_thread_count_; + + for (size_t i = 0; i != dex_files.size(); ++i) { + const DexFile* dex_file = dex_files[i]; + CHECK(dex_file != nullptr); + ResolveDexFile(class_loader, + *dex_file, + dex_files, + resolve_thread_pool, + resolve_thread_count, + timings); + } +} + +// Resolve const-strings in the code. Done to have deterministic allocation behavior. Right now +// this is single-threaded for simplicity. +// TODO: Collect the relevant string indices in parallel, then allocate them sequentially in a +// stable order. + +static void ResolveConstStrings(CompilerDriver* driver, + const DexFile& dex_file, + const DexFile::CodeItem* code_item) { + if (code_item == nullptr) { + // Abstract or native method. + return; + } + + const uint16_t* code_ptr = code_item->insns_; + const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_; + + while (code_ptr < code_end) { + const Instruction* inst = Instruction::At(code_ptr); + switch (inst->Opcode()) { + case Instruction::CONST_STRING: { + uint32_t string_index = inst->VRegB_21c(); + driver->CanAssumeStringIsPresentInDexCache(dex_file, string_index); + break; + } + case Instruction::CONST_STRING_JUMBO: { + uint32_t string_index = inst->VRegB_31c(); + driver->CanAssumeStringIsPresentInDexCache(dex_file, string_index); + break; + } + + default: + break; + } + + code_ptr += inst->SizeInCodeUnits(); + } +} + +static void ResolveConstStrings(CompilerDriver* driver, + const std::vector& dex_files, + TimingLogger* timings) { + for (const DexFile* dex_file : dex_files) { + TimingLogger::ScopedTiming t("Resolve const-string Strings", timings); + + size_t class_def_count = dex_file->NumClassDefs(); + for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) { + const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index); + + const uint8_t* class_data = dex_file->GetClassData(class_def); + if (class_data == nullptr) { + // empty class, probably a marker interface + continue; + } + + ClassDataItemIterator it(*dex_file, class_data); + // Skip fields + while (it.HasNextStaticField()) { + it.Next(); + } + while (it.HasNextInstanceField()) { + it.Next(); + } + + bool compilation_enabled = driver->IsClassToCompile( + dex_file->StringByTypeIdx(class_def.class_idx_)); + if (!compilation_enabled) { + // Compilation is skipped, do not resolve const-string in code of this class. + // TODO: Make sure that inlining honors this. + continue; + } + + // Direct methods. + int64_t previous_direct_method_idx = -1; + while (it.HasNextDirectMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_direct_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_direct_method_idx = method_idx; + ResolveConstStrings(driver, *dex_file, it.GetMethodCodeItem()); + it.Next(); + } + // Virtual methods. + int64_t previous_virtual_method_idx = -1; + while (it.HasNextVirtualMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_virtual_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_virtual_method_idx = method_idx; + ResolveConstStrings(driver, *dex_file, it.GetMethodCodeItem()); + it.Next(); + } + DCHECK(!it.HasNext()); + } + } +} + +inline void CompilerDriver::CheckThreadPools() { + DCHECK(parallel_thread_pool_ != nullptr); + DCHECK(single_thread_pool_ != nullptr); +} + +void CompilerDriver::PreCompile(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + CheckThreadPools(); + + LoadImageClasses(timings); + VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false); + + const bool verification_enabled = compiler_options_->IsVerificationEnabled(); + const bool never_verify = compiler_options_->NeverVerify(); + const bool verify_only_profile = compiler_options_->VerifyOnlyProfile(); + + // We need to resolve for never_verify since it needs to run dex to dex to add the + // RETURN_VOID_NO_BARRIER. + // Let the verifier resolve as needed for the verify_only_profile case. + if ((never_verify || verification_enabled) && !verify_only_profile) { + Resolve(class_loader, dex_files, timings); + VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false); + } + + if (never_verify) { + VLOG(compiler) << "Verify none mode specified, skipping verification."; + SetVerified(class_loader, dex_files, timings); + } + + if (!verification_enabled) { + return; + } + + if (GetCompilerOptions().IsForceDeterminism() && IsBootImage()) { + // Resolve strings from const-string. Do this now to have a deterministic image. + ResolveConstStrings(this, dex_files, timings); + VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false); + } + + Verify(class_loader, dex_files, timings); + VLOG(compiler) << "Verify: " << GetMemoryUsageString(false); + + if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) { + LOG(FATAL) << "Had a hard failure verifying all classes, and was asked to abort in such " + << "situations. Please check the log."; + } + + InitializeClasses(class_loader, dex_files, timings); + VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false); + + UpdateImageClasses(timings); + VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false); +} + +bool CompilerDriver::IsImageClass(const char* descriptor) const { + if (image_classes_ != nullptr) { + // If we have a set of image classes, use those. + return image_classes_->find(descriptor) != image_classes_->end(); + } + // No set of image classes, assume we include all the classes. + // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case. + return !IsBootImage(); +} + +bool CompilerDriver::IsClassToCompile(const char* descriptor) const { + if (kRestrictCompilationFiltersToImage && !IsBootImage()) { + return true; + } + + if (classes_to_compile_ == nullptr) { + return true; + } + return classes_to_compile_->find(descriptor) != classes_to_compile_->end(); +} + +bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const { + if (kRestrictCompilationFiltersToImage && !IsBootImage()) { + return true; + } + + if (methods_to_compile_ == nullptr) { + return true; + } + + std::string tmp = PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file, true); + return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end(); +} + +bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const { + if (profile_compilation_info_ == nullptr) { + // If we miss profile information it means that we don't do a profile guided compilation. + // Return true, and let the other filters decide if the method should be compiled. + return true; + } + bool result = profile_compilation_info_->ContainsMethod(method_ref); + + if (kDebugProfileGuidedCompilation) { + LOG(INFO) << "[ProfileGuidedCompilation] " + << (result ? "Compiled" : "Skipped") << " method:" + << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file, true); + } + return result; +} + +bool CompilerDriver::ShouldVerifyClassBasedOnProfile(const DexFile& dex_file, + uint16_t class_idx) const { + if (!compiler_options_->VerifyOnlyProfile()) { + // No profile, verify everything. + return true; + } + DCHECK(profile_compilation_info_ != nullptr); + bool result = profile_compilation_info_->ContainsClass(dex_file, class_idx); + if (kDebugProfileGuidedCompilation) { + LOG(INFO) << "[ProfileGuidedCompilation] " + << (result ? "Verified" : "Skipped") << " method:" + << dex_file.GetClassDescriptor(dex_file.GetClassDef(class_idx)); + } + return result; +} + +class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { + public: + ResolveCatchBlockExceptionsClassVisitor( + std::set>& exceptions_to_resolve) + : exceptions_to_resolve_(exceptions_to_resolve) {} + + virtual bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + for (auto& m : c->GetMethods(pointer_size)) { + ResolveExceptionsForMethod(&m, pointer_size); + } + return true; + } + + private: + void ResolveExceptionsForMethod(ArtMethod* method_handle, size_t pointer_size) + SHARED_REQUIRES(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = method_handle->GetCodeItem(); + if (code_item == nullptr) { + return; // native or abstract method + } + if (code_item->tries_size_ == 0) { + return; // nothing to process + } + const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); + size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); + for (size_t i = 0; i < num_encoded_catch_handlers; i++) { + int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); + bool has_catch_all = false; + if (encoded_catch_handler_size <= 0) { + encoded_catch_handler_size = -encoded_catch_handler_size; + has_catch_all = true; + } + for (int32_t j = 0; j < encoded_catch_handler_size; j++) { + uint16_t encoded_catch_handler_handlers_type_idx = + DecodeUnsignedLeb128(&encoded_catch_handler_list); + // Add to set of types to resolve if not already in the dex cache resolved types + if (!method_handle->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx, + pointer_size)) { + exceptions_to_resolve_.emplace(encoded_catch_handler_handlers_type_idx, + method_handle->GetDexFile()); + } + // ignore address associated with catch handler + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + if (has_catch_all) { + // ignore catch all address + DecodeUnsignedLeb128(&encoded_catch_handler_list); + } + } + } + + std::set>& exceptions_to_resolve_; +}; + +class RecordImageClassesVisitor : public ClassVisitor { + public: + explicit RecordImageClassesVisitor(std::unordered_set* image_classes) + : image_classes_(image_classes) {} + + bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::string temp; + image_classes_->insert(klass->GetDescriptor(&temp)); + return true; + } + + private: + std::unordered_set* const image_classes_; +}; + +// Make a list of descriptors for classes to include in the image +void CompilerDriver::LoadImageClasses(TimingLogger* timings) { + CHECK(timings != nullptr); + if (!IsBootImage()) { + return; + } + + TimingLogger::ScopedTiming t("LoadImageClasses", timings); + // Make a first class to load all classes explicitly listed in the file + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + CHECK(image_classes_.get() != nullptr); + for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) { + const std::string& descriptor(*it); + StackHandleScope<1> hs(self); + Handle klass( + hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str()))); + if (klass.Get() == nullptr) { + VLOG(compiler) << "Failed to find class " << descriptor; + image_classes_->erase(it++); + self->ClearException(); + } else { + ++it; + } + } + + // Resolve exception classes referenced by the loaded classes. The catch logic assumes + // exceptions are resolved by the verifier when there is a catch block in an interested method. + // Do this here so that exception classes appear to have been specified image classes. + std::set> unresolved_exception_types; + StackHandleScope<1> hs(self); + Handle java_lang_Throwable( + hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"))); + do { + unresolved_exception_types.clear(); + ResolveCatchBlockExceptionsClassVisitor visitor(unresolved_exception_types); + class_linker->VisitClasses(&visitor); + for (const std::pair& exception_type : unresolved_exception_types) { + uint16_t exception_type_idx = exception_type.first; + const DexFile* dex_file = exception_type.second; + StackHandleScope<2> hs2(self); + Handle dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file, + nullptr))); + Handle klass(hs2.NewHandle( + class_linker->ResolveType(*dex_file, + exception_type_idx, + dex_cache, + ScopedNullHandle()))); + if (klass.Get() == nullptr) { + const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx); + const char* descriptor = dex_file->GetTypeDescriptor(type_id); + LOG(FATAL) << "Failed to resolve class " << descriptor; + } + DCHECK(java_lang_Throwable->IsAssignableFrom(klass.Get())); + } + // Resolving exceptions may load classes that reference more exceptions, iterate until no + // more are found + } while (!unresolved_exception_types.empty()); + + // We walk the roots looking for classes so that we'll pick up the + // above classes plus any classes them depend on such super + // classes, interfaces, and the required ClassLinker roots. + RecordImageClassesVisitor visitor(image_classes_.get()); + class_linker->VisitClasses(&visitor); + + CHECK_NE(image_classes_->size(), 0U); +} + +static void MaybeAddToImageClasses(Handle c, + std::unordered_set* image_classes) + SHARED_REQUIRES(Locks::mutator_lock_) { + Thread* self = Thread::Current(); + StackHandleScope<1> hs(self); + // Make a copy of the handle so that we don't clobber it doing Assign. + MutableHandle klass(hs.NewHandle(c.Get())); + std::string temp; + const size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + while (!klass->IsObjectClass()) { + const char* descriptor = klass->GetDescriptor(&temp); + std::pair::iterator, bool> result = + image_classes->insert(descriptor); + if (!result.second) { // Previously inserted. + break; + } + VLOG(compiler) << "Adding " << descriptor << " to image classes"; + for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) { + StackHandleScope<1> hs2(self); + MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)), + image_classes); + } + for (auto& m : c->GetVirtualMethods(pointer_size)) { + StackHandleScope<1> hs2(self); + MaybeAddToImageClasses(hs2.NewHandle(m.GetDeclaringClass()), image_classes); + } + if (klass->IsArrayClass()) { + StackHandleScope<1> hs2(self); + MaybeAddToImageClasses(hs2.NewHandle(klass->GetComponentType()), image_classes); + } + klass.Assign(klass->GetSuperClass()); + } +} + +// Keeps all the data for the update together. Also doubles as the reference visitor. +// Note: we can use object pointers because we suspend all threads. +class ClinitImageUpdate { + public: + static ClinitImageUpdate* Create(std::unordered_set* image_class_descriptors, + Thread* self, ClassLinker* linker, std::string* error_msg) { + std::unique_ptr res(new ClinitImageUpdate(image_class_descriptors, self, + linker)); + if (res->dex_cache_class_ == nullptr) { + *error_msg = "Could not find DexCache class."; + return nullptr; + } + + return res.release(); + } + + ~ClinitImageUpdate() { + // Allow others to suspend again. + self_->EndAssertNoThreadSuspension(old_cause_); + } + + // Visitor for VisitReferences. + void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const + SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::Object* ref = object->GetFieldObject(field_offset); + if (ref != nullptr) { + VisitClinitClassesObject(ref); + } + } + + // java.lang.Reference visitor for VisitReferences. + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref ATTRIBUTE_UNUSED) + const {} + + // Ignore class native roots. + void VisitRootIfNonNull(mirror::CompressedReference* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference* root ATTRIBUTE_UNUSED) const {} + + void Walk() SHARED_REQUIRES(Locks::mutator_lock_) { + // Use the initial classes as roots for a search. + for (mirror::Class* klass_root : image_classes_) { + VisitClinitClassesObject(klass_root); + } + } + + private: + class FindImageClassesVisitor : public ClassVisitor { + public: + explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {} + + bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + std::string temp; + const char* name = klass->GetDescriptor(&temp); + if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) { + data_->image_classes_.push_back(klass); + } else { + // Check whether it is initialized and has a clinit. They must be kept, too. + if (klass->IsInitialized() && klass->FindClassInitializer( + Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { + data_->image_classes_.push_back(klass); + } + } + return true; + } + + private: + ClinitImageUpdate* const data_; + }; + + ClinitImageUpdate(std::unordered_set* image_class_descriptors, Thread* self, + ClassLinker* linker) + SHARED_REQUIRES(Locks::mutator_lock_) : + image_class_descriptors_(image_class_descriptors), self_(self) { + CHECK(linker != nullptr); + CHECK(image_class_descriptors != nullptr); + + // Make sure nobody interferes with us. + old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure"); + + // Find the interesting classes. + dex_cache_class_ = linker->LookupClass(self, "Ljava/lang/DexCache;", + ComputeModifiedUtf8Hash("Ljava/lang/DexCache;"), nullptr); + + // Find all the already-marked classes. + WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); + FindImageClassesVisitor visitor(this); + linker->VisitClasses(&visitor); + } + + void VisitClinitClassesObject(mirror::Object* object) const + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(object != nullptr); + if (marked_objects_.find(object) != marked_objects_.end()) { + // Already processed. + return; + } + + // Mark it. + marked_objects_.insert(object); + + if (object->IsClass()) { + // If it is a class, add it. + StackHandleScope<1> hs(self_); + MaybeAddToImageClasses(hs.NewHandle(object->AsClass()), image_class_descriptors_); + } else { + // Else visit the object's class. + VisitClinitClassesObject(object->GetClass()); + } + + // If it is not a DexCache, visit all references. + mirror::Class* klass = object->GetClass(); + if (klass != dex_cache_class_) { + object->VisitReferences(*this, *this); + } + } + + mutable std::unordered_set marked_objects_; + std::unordered_set* const image_class_descriptors_; + std::vector image_classes_; + const mirror::Class* dex_cache_class_; + Thread* const self_; + const char* old_cause_; + + DISALLOW_COPY_AND_ASSIGN(ClinitImageUpdate); +}; + +void CompilerDriver::UpdateImageClasses(TimingLogger* timings) { + if (IsBootImage()) { + TimingLogger::ScopedTiming t("UpdateImageClasses", timings); + + Runtime* runtime = Runtime::Current(); + + // Suspend all threads. + ScopedSuspendAll ssa(__FUNCTION__); + + std::string error_msg; + std::unique_ptr update(ClinitImageUpdate::Create(image_classes_.get(), + Thread::Current(), + runtime->GetClassLinker(), + &error_msg)); + CHECK(update.get() != nullptr) << error_msg; // TODO: Soft failure? + + // Do the marking. + update->Walk(); + } +} + +bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) { + Runtime* runtime = Runtime::Current(); + if (!runtime->IsAotCompiler()) { + DCHECK(runtime->UseJitCompilation()); + // Having the klass reference here implies that the klass is already loaded. + return true; + } + if (!IsBootImage()) { + // Assume loaded only if klass is in the boot image. App classes cannot be assumed + // loaded because we don't even know what class loader will be used to load them. + bool class_in_image = runtime->GetHeap()->FindSpaceFromObject(klass, false)->IsImageSpace(); + return class_in_image; + } + std::string temp; + const char* descriptor = klass->GetDescriptor(&temp); + return IsImageClass(descriptor); +} + +void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref) { + MutexLock lock(self, dex_to_dex_references_lock_); + // Since we're compiling one dex file at a time, we need to look for the + // current dex file entry only at the end of dex_to_dex_references_. + if (dex_to_dex_references_.empty() || + &dex_to_dex_references_.back().GetDexFile() != method_ref.dex_file) { + dex_to_dex_references_.emplace_back(*method_ref.dex_file); + } + dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index); +} + +bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(Handle dex_cache, + uint32_t type_idx) { + bool result = false; + if ((IsBootImage() && + IsImageClass(dex_cache->GetDexFile()->StringDataByIdx( + dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) || + Runtime::Current()->UseJitCompilation()) { + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + result = (resolved_class != nullptr); + } + + if (result) { + stats_->TypeInDexCache(); + } else { + stats_->TypeNotInDexCache(); + } + return result; +} + +bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, + uint32_t string_idx) { + // See also Compiler::ResolveDexFile + + bool result = false; + if (IsBootImage() || Runtime::Current()->UseJitCompilation()) { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); + Handle dex_cache(hs.NewHandle(class_linker->FindDexCache( + soa.Self(), dex_file, false))); + if (IsBootImage()) { + // We resolve all const-string strings when building for the image. + class_linker->ResolveString(dex_file, string_idx, dex_cache); + result = true; + } else { + // Just check whether the dex cache already has the string. + DCHECK(Runtime::Current()->UseJitCompilation()); + result = (dex_cache->GetResolvedString(string_idx) != nullptr); + } + } + if (result) { + stats_->StringInDexCache(); + } else { + stats_->StringNotInDexCache(); + } + return result; +} + +bool CompilerDriver::CanAccessTypeWithoutChecks(uint32_t referrer_idx, + Handle dex_cache, + uint32_t type_idx) { + // Get type from dex cache assuming it was populated by the verifier + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + if (resolved_class == nullptr) { + stats_->TypeNeedsAccessCheck(); + return false; // Unknown class needs access checks. + } + const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx); + bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible. + if (!is_accessible) { + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); + if (referrer_class == nullptr) { + stats_->TypeNeedsAccessCheck(); + return false; // Incomplete referrer knowledge needs access check. + } + // Perform access check, will return true if access is ok or false if we're going to have to + // check this at runtime (for example for class loaders). + is_accessible = referrer_class->CanAccess(resolved_class); + } + if (is_accessible) { + stats_->TypeDoesntNeedAccessCheck(); + } else { + stats_->TypeNeedsAccessCheck(); + } + return is_accessible; +} + +bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, + Handle dex_cache, + uint32_t type_idx, + bool* finalizable) { + // Get type from dex cache assuming it was populated by the verifier. + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + if (resolved_class == nullptr) { + stats_->TypeNeedsAccessCheck(); + // Be conservative. + *finalizable = true; + return false; // Unknown class needs access checks. + } + *finalizable = resolved_class->IsFinalizable(); + const DexFile::MethodId& method_id = dex_cache->GetDexFile()->GetMethodId(referrer_idx); + bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible. + if (!is_accessible) { + mirror::Class* referrer_class = dex_cache->GetResolvedType(method_id.class_idx_); + if (referrer_class == nullptr) { + stats_->TypeNeedsAccessCheck(); + return false; // Incomplete referrer knowledge needs access check. + } + // Perform access and instantiable checks, will return true if access is ok or false if we're + // going to have to check this at runtime (for example for class loaders). + is_accessible = referrer_class->CanAccess(resolved_class); + } + bool result = is_accessible && resolved_class->IsInstantiable(); + if (result) { + stats_->TypeDoesntNeedAccessCheck(); + } else { + stats_->TypeNeedsAccessCheck(); + } + return result; +} + +bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx, + bool* is_type_initialized, bool* use_direct_type_ptr, + uintptr_t* direct_type_ptr, bool* out_is_finalizable) { + ScopedObjectAccess soa(Thread::Current()); + Runtime* runtime = Runtime::Current(); + mirror::DexCache* dex_cache = runtime->GetClassLinker()->FindDexCache( + soa.Self(), dex_file, false); + mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); + if (resolved_class == nullptr) { + return false; + } + if (GetCompilerOptions().GetCompilePic()) { + // Do not allow a direct class pointer to be used when compiling for position-independent + return false; + } + *out_is_finalizable = resolved_class->IsFinalizable(); + gc::Heap* heap = runtime->GetHeap(); + const bool compiling_boot = heap->IsCompilingBoot(); + const bool support_boot_image_fixup = GetSupportBootImageFixup(); + if (compiling_boot) { + // boot -> boot class pointers. + // True if the class is in the image at boot compiling time. + const bool is_image_class = IsBootImage() && IsImageClass( + dex_file.StringDataByIdx(dex_file.GetTypeId(type_idx).descriptor_idx_)); + // True if pc relative load works. + if (is_image_class && support_boot_image_fixup) { + *is_type_initialized = resolved_class->IsInitialized(); + *use_direct_type_ptr = false; + *direct_type_ptr = 0; + return true; + } else { + return false; + } + } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) { + *is_type_initialized = resolved_class->IsInitialized(); + // If the class may move around, then don't embed it as a direct pointer. + *use_direct_type_ptr = true; + *direct_type_ptr = reinterpret_cast(resolved_class); + return true; + } else { + // True if the class is in the image at app compiling time. + const bool class_in_image = heap->FindSpaceFromObject(resolved_class, false)->IsImageSpace(); + if (class_in_image && support_boot_image_fixup) { + // boot -> app class pointers. + *is_type_initialized = resolved_class->IsInitialized(); + // TODO This is somewhat hacky. We should refactor all of this invoke codepath. + *use_direct_type_ptr = !GetCompilerOptions().GetIncludePatchInformation(); + *direct_type_ptr = reinterpret_cast(resolved_class); + return true; + } else { + // app -> app class pointers. + // Give up because app does not have an image and class + // isn't created at compile time. TODO: implement this + // if/when each app gets an image. + return false; + } + } +} + +bool CompilerDriver::CanEmbedReferenceTypeInCode(ClassReference* ref, + bool* use_direct_ptr, + uintptr_t* direct_type_ptr) { + CHECK(ref != nullptr); + CHECK(use_direct_ptr != nullptr); + CHECK(direct_type_ptr != nullptr); + + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference(); + bool is_initialized = false; + bool unused_finalizable; + // Make sure we have a finished Reference class object before attempting to use it. + if (!CanEmbedTypeInCode(*reference_class->GetDexCache()->GetDexFile(), + reference_class->GetDexTypeIndex(), &is_initialized, + use_direct_ptr, direct_type_ptr, &unused_finalizable) || + !is_initialized) { + return false; + } + ref->first = &reference_class->GetDexFile(); + ref->second = reference_class->GetDexClassDefIndex(); + return true; +} + +uint32_t CompilerDriver::GetReferenceSlowFlagOffset() const { + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* klass = mirror::Reference::GetJavaLangRefReference(); + DCHECK(klass->IsInitialized()); + return klass->GetSlowPathFlagOffset().Uint32Value(); +} + +uint32_t CompilerDriver::GetReferenceDisableFlagOffset() const { + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* klass = mirror::Reference::GetJavaLangRefReference(); + DCHECK(klass->IsInitialized()); + return klass->GetDisableIntrinsicFlagOffset().Uint32Value(); +} + +DexCacheArraysLayout CompilerDriver::GetDexCacheArraysLayout(const DexFile* dex_file) { + return ContainsElement(GetDexFilesForOatFile(), dex_file) + ? DexCacheArraysLayout(GetInstructionSetPointerSize(instruction_set_), dex_file) + : DexCacheArraysLayout(); +} + +void CompilerDriver::ProcessedInstanceField(bool resolved) { + if (!resolved) { + stats_->UnresolvedInstanceField(); + } else { + stats_->ResolvedInstanceField(); + } +} + +void CompilerDriver::ProcessedStaticField(bool resolved, bool local) { + if (!resolved) { + stats_->UnresolvedStaticField(); + } else if (local) { + stats_->ResolvedLocalStaticField(); + } else { + stats_->ResolvedStaticField(); + } +} + +void CompilerDriver::ProcessedInvoke(InvokeType invoke_type, int flags) { + stats_->ProcessedInvoke(invoke_type, flags); +} + +ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, + const DexCompilationUnit* mUnit, bool is_put, + const ScopedObjectAccess& soa) { + // Try to resolve the field and compiling method's class. + ArtField* resolved_field; + mirror::Class* referrer_class; + Handle dex_cache(mUnit->GetDexCache()); + { + StackHandleScope<1> hs(soa.Self()); + Handle class_loader_handle( + hs.NewHandle(soa.Decode(mUnit->GetClassLoader()))); + resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false); + referrer_class = resolved_field != nullptr + ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr; + } + bool can_link = false; + if (resolved_field != nullptr && referrer_class != nullptr) { + std::pair fast_path = IsFastInstanceField( + dex_cache.Get(), referrer_class, resolved_field, field_idx); + can_link = is_put ? fast_path.second : fast_path.first; + } + ProcessedInstanceField(can_link); + return can_link ? resolved_field : nullptr; +} + +bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, + bool is_put, MemberOffset* field_offset, + bool* is_volatile) { + ScopedObjectAccess soa(Thread::Current()); + ArtField* resolved_field = ComputeInstanceFieldInfo(field_idx, mUnit, is_put, soa); + + if (resolved_field == nullptr) { + // Conservative defaults. + *is_volatile = true; + *field_offset = MemberOffset(static_cast(-1)); + return false; + } else { + *is_volatile = resolved_field->IsVolatile(); + *field_offset = resolved_field->GetOffset(); + return true; + } +} + +void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType sharp_type, + bool no_guarantee_of_dex_cache_entry, + const mirror::Class* referrer_class, + ArtMethod* method, + int* stats_flags, + MethodReference* target_method, + uintptr_t* direct_code, + uintptr_t* direct_method) { + // For direct and static methods compute possible direct_code and direct_method values, ie + // an address for the Method* being invoked and an address of the code for that Method*. + // For interface calls compute a value for direct_method that is the interface method being + // invoked, so this can be passed to the out-of-line runtime support code. + *direct_code = 0; + *direct_method = 0; + Runtime* const runtime = Runtime::Current(); + gc::Heap* const heap = runtime->GetHeap(); + auto* cl = runtime->GetClassLinker(); + const auto pointer_size = cl->GetImagePointerSize(); + bool use_dex_cache = GetCompilerOptions().GetCompilePic(); // Off by default + const bool compiling_boot = heap->IsCompilingBoot(); + // TODO This is somewhat hacky. We should refactor all of this invoke codepath. + const bool force_relocations = (compiling_boot || + GetCompilerOptions().GetIncludePatchInformation()); + if (sharp_type != kStatic && sharp_type != kDirect) { + return; + } + // TODO: support patching on all architectures. + use_dex_cache = use_dex_cache || (force_relocations && !support_boot_image_fixup_); + mirror::Class* declaring_class = method->GetDeclaringClass(); + bool method_code_in_boot = declaring_class->GetClassLoader() == nullptr; + if (!use_dex_cache) { + if (!method_code_in_boot) { + use_dex_cache = true; + } else { + bool has_clinit_trampoline = + method->IsStatic() && !declaring_class->IsInitialized(); + if (has_clinit_trampoline && declaring_class != referrer_class) { + // Ensure we run the clinit trampoline unless we are invoking a static method in the same + // class. + use_dex_cache = true; + } + } + } + if (runtime->UseJitCompilation()) { + // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will + // never be updated even after we compile the method. + if (cl->IsQuickToInterpreterBridge( + reinterpret_cast(compiler_->GetEntryPointOf(method)))) { + use_dex_cache = true; + } + } + if (method_code_in_boot) { + *stats_flags |= kFlagDirectCallToBoot | kFlagDirectMethodToBoot; + } + if (!use_dex_cache && force_relocations) { + bool is_in_image; + if (IsBootImage()) { + is_in_image = IsImageClass(method->GetDeclaringClassDescriptor()); + } else { + is_in_image = instruction_set_ != kX86 && instruction_set_ != kX86_64 && + heap->FindSpaceFromObject(method->GetDeclaringClass(), false)->IsImageSpace() && + !cl->IsQuickToInterpreterBridge( + reinterpret_cast(compiler_->GetEntryPointOf(method))); + } + if (!is_in_image) { + // We can only branch directly to Methods that are resolved in the DexCache. + // Otherwise we won't invoke the resolution trampoline. + use_dex_cache = true; + } + } + // The method is defined not within this dex file. We need a dex cache slot within the current + // dex file or direct pointers. + bool must_use_direct_pointers = false; + mirror::DexCache* dex_cache = declaring_class->GetDexCache(); + if (target_method->dex_file == dex_cache->GetDexFile() && + !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod( + method->GetDexMethodIndex(), pointer_size) == nullptr)) { + target_method->dex_method_index = method->GetDexMethodIndex(); + } else { + if (no_guarantee_of_dex_cache_entry) { + // See if the method is also declared in this dex cache. + uint32_t dex_method_idx = method->FindDexMethodIndexInOtherDexFile( + *target_method->dex_file, target_method->dex_method_index); + if (dex_method_idx != DexFile::kDexNoIndex) { + target_method->dex_method_index = dex_method_idx; + } else { + if (force_relocations && !use_dex_cache) { + target_method->dex_method_index = method->GetDexMethodIndex(); + target_method->dex_file = dex_cache->GetDexFile(); + } + must_use_direct_pointers = true; + } + } + } + if (use_dex_cache) { + if (must_use_direct_pointers) { + // Fail. Test above showed the only safe dispatch was via the dex cache, however, the direct + // pointers are required as the dex cache lacks an appropriate entry. + VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method); + } else { + *type = sharp_type; + } + } else { + bool method_in_image = false; + const std::vector image_spaces = heap->GetBootImageSpaces(); + for (gc::space::ImageSpace* image_space : image_spaces) { + const auto& method_section = image_space->GetImageHeader().GetMethodsSection(); + if (method_section.Contains(reinterpret_cast(method) - image_space->Begin())) { + method_in_image = true; + break; + } + } + if (method_in_image || compiling_boot || runtime->UseJitCompilation()) { + // We know we must be able to get to the method in the image, so use that pointer. + // In the case where we are the JIT, we can always use direct pointers since we know where + // the method and its code are / will be. We don't sharpen to interpreter bridge since we + // check IsQuickToInterpreterBridge above. + CHECK(!method->IsAbstract()); + *type = sharp_type; + *direct_method = force_relocations ? -1 : reinterpret_cast(method); + *direct_code = force_relocations ? -1 : compiler_->GetEntryPointOf(method); + target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile(); + target_method->dex_method_index = method->GetDexMethodIndex(); + } else if (!must_use_direct_pointers) { + // Set the code and rely on the dex cache for the method. + *type = sharp_type; + if (force_relocations) { + *direct_code = -1; + target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile(); + target_method->dex_method_index = method->GetDexMethodIndex(); + } else { + *direct_code = compiler_->GetEntryPointOf(method); + } + } else { + // Direct pointers were required but none were available. + VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method); + } + } +} + +bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc, + bool update_stats, bool enable_devirtualization, + InvokeType* invoke_type, MethodReference* target_method, + int* vtable_idx, uintptr_t* direct_code, + uintptr_t* direct_method) { + InvokeType orig_invoke_type = *invoke_type; + int stats_flags = 0; + ScopedObjectAccess soa(Thread::Current()); + // Try to resolve the method and compiling method's class. + StackHandleScope<2> hs(soa.Self()); + Handle dex_cache(mUnit->GetDexCache()); + Handle class_loader(hs.NewHandle( + soa.Decode(mUnit->GetClassLoader()))); + uint32_t method_idx = target_method->dex_method_index; + ArtMethod* resolved_method = ResolveMethod( + soa, dex_cache, class_loader, mUnit, method_idx, orig_invoke_type); + auto h_referrer_class = hs.NewHandle(resolved_method != nullptr ? + ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr); + bool result = false; + if (resolved_method != nullptr) { + *vtable_idx = GetResolvedMethodVTableIndex(resolved_method, orig_invoke_type); + + if (enable_devirtualization && mUnit->GetVerifiedMethod() != nullptr) { + const MethodReference* devirt_target = mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc); + + stats_flags = IsFastInvoke( + soa, dex_cache, class_loader, mUnit, h_referrer_class.Get(), resolved_method, + invoke_type, target_method, devirt_target, direct_code, direct_method); + result = stats_flags != 0; + } else { + // Devirtualization not enabled. Inline IsFastInvoke(), dropping the devirtualization parts. + if (UNLIKELY(h_referrer_class.Get() == nullptr) || + UNLIKELY(!h_referrer_class->CanAccessResolvedMethod(resolved_method->GetDeclaringClass(), + resolved_method, dex_cache.Get(), + target_method->dex_method_index)) || + *invoke_type == kSuper) { + // Slow path. (Without devirtualization, all super calls go slow path as well.) + } else { + // Sharpening failed so generate a regular resolved method dispatch. + stats_flags = kFlagMethodResolved; + GetCodeAndMethodForDirectCall( + invoke_type, *invoke_type, false, h_referrer_class.Get(), resolved_method, &stats_flags, + target_method, direct_code, direct_method); + result = true; + } + } + } + if (!result) { + // Conservative defaults. + *vtable_idx = -1; + *direct_code = 0u; + *direct_method = 0u; + } + if (update_stats) { + ProcessedInvoke(orig_invoke_type, stats_flags); + } + return result; +} + +const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file, + uint32_t method_idx) const { + MethodReference ref(dex_file, method_idx); + return verification_results_->GetVerifiedMethod(ref); +} + +bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) { + if (!compiler_options_->IsVerificationEnabled()) { + // If we didn't verify, every cast has to be treated as non-safe. + return false; + } + DCHECK(mUnit->GetVerifiedMethod() != nullptr); + bool result = mUnit->GetVerifiedMethod()->IsSafeCast(dex_pc); + if (result) { + stats_->SafeCast(); + } else { + stats_->NotASafeCast(); + } + return result; +} + +class CompilationVisitor { + public: + virtual ~CompilationVisitor() {} + virtual void Visit(size_t index) = 0; +}; + +class ParallelCompilationManager { + public: + ParallelCompilationManager(ClassLinker* class_linker, + jobject class_loader, + CompilerDriver* compiler, + const DexFile* dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool) + : index_(0), + class_linker_(class_linker), + class_loader_(class_loader), + compiler_(compiler), + dex_file_(dex_file), + dex_files_(dex_files), + thread_pool_(thread_pool) {} + + ClassLinker* GetClassLinker() const { + CHECK(class_linker_ != nullptr); + return class_linker_; + } + + jobject GetClassLoader() const { + return class_loader_; + } + + CompilerDriver* GetCompiler() const { + CHECK(compiler_ != nullptr); + return compiler_; + } + + const DexFile* GetDexFile() const { + CHECK(dex_file_ != nullptr); + return dex_file_; + } + + const std::vector& GetDexFiles() const { + return dex_files_; + } + + void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units) + REQUIRES(!*Locks::mutator_lock_) { + Thread* self = Thread::Current(); + self->AssertNoPendingException(); + CHECK_GT(work_units, 0U); + + index_.StoreRelaxed(begin); + for (size_t i = 0; i < work_units; ++i) { + thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor)); + } + thread_pool_->StartWorkers(self); + + // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker + // thread destructor's called below perform join). + CHECK_NE(self->GetState(), kRunnable); + + // Wait for all the worker threads to finish. + thread_pool_->Wait(self, true, false); + + // And stop the workers accepting jobs. + thread_pool_->StopWorkers(self); + } + + size_t NextIndex() { + return index_.FetchAndAddSequentiallyConsistent(1); + } + + private: + class ForAllClosure : public Task { + public: + ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor) + : manager_(manager), + end_(end), + visitor_(visitor) {} + + virtual void Run(Thread* self) { + while (true) { + const size_t index = manager_->NextIndex(); + if (UNLIKELY(index >= end_)) { + break; + } + visitor_->Visit(index); + self->AssertNoPendingException(); + } + } + + virtual void Finalize() { + delete this; + } + + private: + ParallelCompilationManager* const manager_; + const size_t end_; + CompilationVisitor* const visitor_; + }; + + AtomicInteger index_; + ClassLinker* const class_linker_; + const jobject class_loader_; + CompilerDriver* const compiler_; + const DexFile* const dex_file_; + const std::vector& dex_files_; + ThreadPool* const thread_pool_; + + DISALLOW_COPY_AND_ASSIGN(ParallelCompilationManager); +}; + +// A fast version of SkipClass above if the class pointer is available +// that avoids the expensive FindInClassPath search. +static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass) + SHARED_REQUIRES(Locks::mutator_lock_) { + DCHECK(klass != nullptr); + const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile(); + if (&dex_file != &original_dex_file) { + if (class_loader == nullptr) { + LOG(WARNING) << "Skipping class " << PrettyDescriptor(klass) << " from " + << dex_file.GetLocation() << " previously found in " + << original_dex_file.GetLocation(); + } + return true; + } + return false; +} + +static void CheckAndClearResolveException(Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_) { + CHECK(self->IsExceptionPending()); + mirror::Throwable* exception = self->GetException(); + std::string temp; + const char* descriptor = exception->GetClass()->GetDescriptor(&temp); + const char* expected_exceptions[] = { + "Ljava/lang/IllegalAccessError;", + "Ljava/lang/IncompatibleClassChangeError;", + "Ljava/lang/InstantiationError;", + "Ljava/lang/LinkageError;", + "Ljava/lang/NoClassDefFoundError;", + "Ljava/lang/NoSuchFieldError;", + "Ljava/lang/NoSuchMethodError;" + }; + bool found = false; + for (size_t i = 0; (found == false) && (i < arraysize(expected_exceptions)); ++i) { + if (strcmp(descriptor, expected_exceptions[i]) == 0) { + found = true; + } + } + if (!found) { + LOG(FATAL) << "Unexpected exception " << exception->Dump(); + } + self->ClearException(); +} + +bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file, + uint16_t class_def_idx) const { + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx); + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // Empty class such as a marker interface. + return false; + } + ClassDataItemIterator it(dex_file, class_data); + while (it.HasNextStaticField()) { + it.Next(); + } + // We require a constructor barrier if there are final instance fields. + while (it.HasNextInstanceField()) { + if (it.MemberIsFinal()) { + return true; + } + it.Next(); + } + return false; +} + +class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor { + public: + explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager) + : manager_(manager) {} + + void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) { + ATRACE_CALL(); + Thread* const self = Thread::Current(); + jobject jclass_loader = manager_->GetClassLoader(); + const DexFile& dex_file = *manager_->GetDexFile(); + ClassLinker* class_linker = manager_->GetClassLinker(); + + // If an instance field is final then we need to have a barrier on the return, static final + // fields are assigned within the lock held for class initialization. Conservatively assume + // constructor barriers are always required. + bool requires_constructor_barrier = true; + + // Method and Field are the worst. We can't resolve without either + // context from the code use (to disambiguate virtual vs direct + // method and instance vs static field) or from class + // definitions. While the compiler will resolve what it can as it + // needs it, here we try to resolve fields and methods used in class + // definitions, since many of them many never be referenced by + // generated code. + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + ScopedObjectAccess soa(self); + StackHandleScope<2> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(jclass_loader))); + Handle dex_cache(hs.NewHandle(class_linker->FindDexCache( + soa.Self(), dex_file, false))); + // Resolve the class. + mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, + class_loader); + bool resolve_fields_and_methods; + if (klass == nullptr) { + // Class couldn't be resolved, for example, super-class is in a different dex file. Don't + // attempt to resolve methods and fields when there is no declaring class. + CheckAndClearResolveException(soa.Self()); + resolve_fields_and_methods = false; + } else { + // We successfully resolved a class, should we skip it? + if (SkipClass(jclass_loader, dex_file, klass)) { + return; + } + // We want to resolve the methods and fields eagerly. + resolve_fields_and_methods = true; + } + // Note the class_data pointer advances through the headers, + // static fields, instance fields, direct methods, and virtual + // methods. + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // Empty class such as a marker interface. + requires_constructor_barrier = false; + } else { + ClassDataItemIterator it(dex_file, class_data); + while (it.HasNextStaticField()) { + if (resolve_fields_and_methods) { + ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, true); + if (field == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + } + it.Next(); + } + // We require a constructor barrier if there are final instance fields. + requires_constructor_barrier = false; + while (it.HasNextInstanceField()) { + if (it.MemberIsFinal()) { + requires_constructor_barrier = true; + } + if (resolve_fields_and_methods) { + ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), + dex_cache, class_loader, false); + if (field == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + } + it.Next(); + } + if (resolve_fields_and_methods) { + while (it.HasNextDirectMethod()) { + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); + if (method == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + it.Next(); + } + while (it.HasNextVirtualMethod()) { + ArtMethod* method = class_linker->ResolveMethod( + dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMethodInvokeType(class_def)); + if (method == nullptr) { + CheckAndClearResolveException(soa.Self()); + } + it.Next(); + } + DCHECK(!it.HasNext()); + } + } + manager_->GetCompiler()->SetRequiresConstructorBarrier(self, + &dex_file, + class_def_index, + requires_constructor_barrier); + } + + private: + const ParallelCompilationManager* const manager_; +}; + +class ResolveTypeVisitor : public CompilationVisitor { + public: + explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) { + } + virtual void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) { + // Class derived values are more complicated, they require the linker and loader. + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* class_linker = manager_->GetClassLinker(); + const DexFile& dex_file = *manager_->GetDexFile(); + StackHandleScope<2> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(manager_->GetClassLoader()))); + Handle dex_cache(hs.NewHandle(class_linker->RegisterDexFile( + dex_file, + class_loader.Get()))); + mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader); + + if (klass == nullptr) { + soa.Self()->AssertPendingException(); + mirror::Throwable* exception = soa.Self()->GetException(); + VLOG(compiler) << "Exception during type resolution: " << exception->Dump(); + if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) { + // There's little point continuing compilation if the heap is exhausted. + LOG(FATAL) << "Out of memory during type resolution for compilation"; + } + soa.Self()->ClearException(); + } + } + + private: + const ParallelCompilationManager* const manager_; +}; + +void CompilerDriver::ResolveDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + + // TODO: we could resolve strings here, although the string table is largely filled with class + // and method names. + + ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, + thread_pool); + if (IsBootImage()) { + // For images we resolve all types, such as array, whereas for applications just those with + // classdefs are resolved by ResolveClassFieldsAndMethods. + TimingLogger::ScopedTiming t("Resolve Types", timings); + ResolveTypeVisitor visitor(&context); + context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count); + } + + TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings); + ResolveClassFieldsAndMethodsVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); +} + +void CompilerDriver::SetVerified(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + // This can be run in parallel. + for (const DexFile* dex_file : dex_files) { + CHECK(dex_file != nullptr); + SetVerifiedDexFile(class_loader, + *dex_file, + dex_files, + parallel_thread_pool_.get(), + parallel_thread_count_, + timings); + } +} + +void CompilerDriver::Verify(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + // Note: verification should not be pulling in classes anymore when compiling the boot image, + // as all should have been resolved before. As such, doing this in parallel should still + // be deterministic. + for (const DexFile* dex_file : dex_files) { + CHECK(dex_file != nullptr); + VerifyDexFile(class_loader, + *dex_file, + dex_files, + parallel_thread_pool_.get(), + parallel_thread_count_, + timings); + } +} + +class VerifyClassVisitor : public CompilationVisitor { + public: + VerifyClassVisitor(const ParallelCompilationManager* manager, LogSeverity log_level) + : manager_(manager), log_level_(log_level) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + ScopedObjectAccess soa(Thread::Current()); + const DexFile& dex_file = *manager_->GetDexFile(); + if (!manager_->GetCompiler()->ShouldVerifyClassBasedOnProfile(dex_file, class_def_index)) { + // Skip verification since the class is not in the profile. + return; + } + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + StackHandleScope<3> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(jclass_loader))); + Handle klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + if (klass.Get() == nullptr) { + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + + /* + * At compile time, we can still structurally verify the class even if FindClass fails. + * This is to ensure the class is structurally sound for compilation. An unsound class + * will be rejected by the verifier and later skipped during compilation in the compiler. + */ + Handle dex_cache(hs.NewHandle(class_linker->FindDexCache( + soa.Self(), dex_file, false))); + std::string error_msg; + if (verifier::MethodVerifier::VerifyClass(soa.Self(), + &dex_file, + dex_cache, + class_loader, + &class_def, + Runtime::Current()->GetCompilerCallbacks(), + true /* allow soft failures */, + log_level_, + &error_msg) == + verifier::MethodVerifier::kHardFailure) { + LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor) + << " because: " << error_msg; + manager_->GetCompiler()->SetHadHardVerifierFailure(); + } + } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) { + CHECK(klass->IsResolved()) << PrettyClass(klass.Get()); + class_linker->VerifyClass(soa.Self(), klass, log_level_); + + if (klass->IsErroneous()) { + // ClassLinker::VerifyClass throws, which isn't useful in the compiler. + CHECK(soa.Self()->IsExceptionPending()); + soa.Self()->ClearException(); + manager_->GetCompiler()->SetHadHardVerifierFailure(); + } + + CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous()) + << PrettyDescriptor(klass.Get()) << ": state=" << klass->GetStatus(); + + // It is *very* problematic if there are verification errors in the boot classpath. For example, + // we rely on things working OK without verification when the decryption dialog is brought up. + // So abort in a debug build if we find this violated. + DCHECK(!manager_->GetCompiler()->IsBootImage() || klass->IsVerified()) + << "Boot classpath class " << PrettyClass(klass.Get()) << " failed to fully verify."; + } + soa.Self()->AssertNoPendingException(); + } + + private: + const ParallelCompilationManager* const manager_; + const LogSeverity log_level_; +}; + +void CompilerDriver::VerifyDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) { + TimingLogger::ScopedTiming t("Verify Dex File", timings); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, + thread_pool); + LogSeverity log_level = GetCompilerOptions().AbortOnHardVerifierFailure() + ? LogSeverity::INTERNAL_FATAL + : LogSeverity::WARNING; + VerifyClassVisitor visitor(&context, log_level); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); +} + +class SetVerifiedClassVisitor : public CompilationVisitor { + public: + explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + ScopedObjectAccess soa(Thread::Current()); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + StackHandleScope<3> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(jclass_loader))); + Handle klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + // Class might have failed resolution. Then don't set it to verified. + if (klass.Get() != nullptr) { + // Only do this if the class is resolved. If even resolution fails, quickening will go very, + // very wrong. + if (klass->IsResolved()) { + if (klass->GetStatus() < mirror::Class::kStatusVerified) { + ObjectLock lock(soa.Self(), klass); + // Set class status to verified. + mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self()); + // Mark methods as pre-verified. If we don't do this, the interpreter will run with + // access checks. + klass->SetSkipAccessChecksFlagOnAllMethods( + GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet())); + klass->SetVerificationAttempted(); + } + // Record the final class status if necessary. + ClassReference ref(manager_->GetDexFile(), class_def_index); + manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); + } + } else { + Thread* self = soa.Self(); + DCHECK(self->IsExceptionPending()); + self->ClearException(); + } + } + + private: + const ParallelCompilationManager* const manager_; +}; + +void CompilerDriver::SetVerifiedDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) { + TimingLogger::ScopedTiming t("Verify Dex File", timings); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, + thread_pool); + SetVerifiedClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); +} + +class InitializeClassVisitor : public CompilationVisitor { + public: + explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + jobject jclass_loader = manager_->GetClassLoader(); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_); + const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); + + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<3> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(jclass_loader))); + Handle klass( + hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader))); + + if (klass.Get() != nullptr && !SkipClass(jclass_loader, dex_file, klass.Get())) { + // Only try to initialize classes that were successfully verified. + if (klass->IsVerified()) { + // Attempt to initialize the class but bail if we either need to initialize the super-class + // or static fields. + manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false); + if (!klass->IsInitialized()) { + // We don't want non-trivial class initialization occurring on multiple threads due to + // deadlock problems. For example, a parent class is initialized (holding its lock) that + // refers to a sub-class in its static/class initializer causing it to try to acquire the + // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock) + // after first initializing its parents, whose locks are acquired. This leads to a + // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock. + // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather + // than use a special Object for the purpose we use the Class of java.lang.Class. + Handle h_klass(hs.NewHandle(klass->GetClass())); + ObjectLock lock(soa.Self(), h_klass); + // Attempt to initialize allowing initialization of parent classes but still not static + // fields. + manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true); + if (!klass->IsInitialized()) { + // We need to initialize static fields, we only do this for image classes that aren't + // marked with the $NoPreloadHolder (which implies this should not be initialized early). + bool can_init_static_fields = manager_->GetCompiler()->IsBootImage() && + manager_->GetCompiler()->IsImageClass(descriptor) && + !StringPiece(descriptor).ends_with("$NoPreloadHolder;"); + if (can_init_static_fields) { + VLOG(compiler) << "Initializing: " << descriptor; + // TODO multithreading support. We should ensure the current compilation thread has + // exclusive access to the runtime and the transaction. To achieve this, we could use + // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity + // checks in Thread::AssertThreadSuspensionIsAllowable. + Runtime* const runtime = Runtime::Current(); + Transaction transaction; + + // Run the class initializer in transaction mode. + runtime->EnterTransactionMode(&transaction); + const mirror::Class::Status old_status = klass->GetStatus(); + bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true, + true); + // TODO we detach transaction from runtime to indicate we quit the transactional + // mode which prevents the GC from visiting objects modified during the transaction. + // Ensure GC is not run so don't access freed objects when aborting transaction. + + ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end"); + runtime->ExitTransactionMode(); + + if (!success) { + CHECK(soa.Self()->IsExceptionPending()); + mirror::Throwable* exception = soa.Self()->GetException(); + VLOG(compiler) << "Initialization of " << descriptor << " aborted because of " + << exception->Dump(); + std::ostream* file_log = manager_->GetCompiler()-> + GetCompilerOptions().GetInitFailureOutput(); + if (file_log != nullptr) { + *file_log << descriptor << "\n"; + *file_log << exception->Dump() << "\n"; + } + soa.Self()->ClearException(); + transaction.Rollback(); + CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored"; + } + } + } + soa.Self()->AssertNoPendingException(); + } + } + // Record the final class status if necessary. + ClassReference ref(manager_->GetDexFile(), class_def_index); + manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); + } + // Clear any class not found or verification exceptions. + soa.Self()->ClearException(); + } + + private: + const ParallelCompilationManager* const manager_; +}; + +void CompilerDriver::InitializeClasses(jobject jni_class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + TimingLogger* timings) { + TimingLogger::ScopedTiming t("InitializeNoClinit", timings); + + // Initialization allocates objects and needs to run single-threaded to be deterministic. + bool force_determinism = GetCompilerOptions().IsForceDeterminism(); + ThreadPool* init_thread_pool = force_determinism + ? single_thread_pool_.get() + : parallel_thread_pool_.get(); + size_t init_thread_count = force_determinism ? 1U : parallel_thread_count_; + + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files, + init_thread_pool); + if (IsBootImage()) { + // TODO: remove this when transactional mode supports multithreading. + init_thread_count = 1U; + } + InitializeClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count); +} + +class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor { + public: + virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) { + return true; + } + if (klass->IsArrayClass()) { + StackHandleScope<1> hs(Thread::Current()); + Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(), + hs.NewHandle(klass), + true, + true); + } + // Create the conflict tables. + FillIMTAndConflictTables(klass); + return true; + } + + private: + void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + if (!klass->ShouldHaveImt()) { + return; + } + if (visited_classes_.find(klass) != visited_classes_.end()) { + return; + } + if (klass->HasSuperClass()) { + FillIMTAndConflictTables(klass->GetSuperClass()); + } + if (!klass->IsTemp()) { + Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); + } + visited_classes_.insert(klass); + } + + std::set visited_classes_; +}; + +void CompilerDriver::InitializeClasses(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + for (size_t i = 0; i != dex_files.size(); ++i) { + const DexFile* dex_file = dex_files[i]; + CHECK(dex_file != nullptr); + InitializeClasses(class_loader, *dex_file, dex_files, timings); + } + if (boot_image_ || app_image_) { + // Make sure that we call EnsureIntiailized on all the array classes to call + // SetVerificationAttempted so that the access flags are set. If we do not do this they get + // changed at runtime resulting in more dirty image pages. + // Also create conflict tables. + // Only useful if we are compiling an image (image_classes_ is not null). + ScopedObjectAccess soa(Thread::Current()); + InitializeArrayClassesAndCreateConflictTablesVisitor visitor; + Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor); + } + if (IsBootImage()) { + // Prune garbage objects created during aborted transactions. + Runtime::Current()->GetHeap()->CollectGarbage(true); + } +} + +void CompilerDriver::Compile(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) { + if (kDebugProfileGuidedCompilation) { + LOG(INFO) << "[ProfileGuidedCompilation] " << + ((profile_compilation_info_ == nullptr) + ? "null" + : profile_compilation_info_->DumpInfo(&dex_files)); + } + + DCHECK(current_dex_to_dex_methods_ == nullptr); + for (const DexFile* dex_file : dex_files) { + CHECK(dex_file != nullptr); + CompileDexFile(class_loader, + *dex_file, + dex_files, + parallel_thread_pool_.get(), + parallel_thread_count_, + timings); + const ArenaPool* const arena_pool = Runtime::Current()->GetArenaPool(); + const size_t arena_alloc = arena_pool->GetBytesAllocated(); + max_arena_alloc_ = std::max(arena_alloc, max_arena_alloc_); + Runtime::Current()->ReclaimArenaPoolMemory(); + } + + ArrayRef dex_to_dex_references; + { + // From this point on, we shall not modify dex_to_dex_references_, so + // just grab a reference to it that we use without holding the mutex. + MutexLock lock(Thread::Current(), dex_to_dex_references_lock_); + dex_to_dex_references = ArrayRef(dex_to_dex_references_); + } + for (const auto& method_set : dex_to_dex_references) { + current_dex_to_dex_methods_ = &method_set.GetMethodIndexes(); + CompileDexFile(class_loader, + method_set.GetDexFile(), + dex_files, + parallel_thread_pool_.get(), + parallel_thread_count_, + timings); + } + current_dex_to_dex_methods_ = nullptr; + + VLOG(compiler) << "Compile: " << GetMemoryUsageString(false); +} + +class CompileClassVisitor : public CompilationVisitor { + public: + explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} + + virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { + ATRACE_CALL(); + const DexFile& dex_file = *manager_->GetDexFile(); + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); + ClassLinker* class_linker = manager_->GetClassLinker(); + jobject jclass_loader = manager_->GetClassLoader(); + ClassReference ref(&dex_file, class_def_index); + // Skip compiling classes with generic verifier failures since they will still fail at runtime + if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) { + return; + } + // Use a scoped object access to perform to the quick SkipClass check. + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<3> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(jclass_loader))); + Handle klass( + hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); + Handle dex_cache; + if (klass.Get() == nullptr) { + soa.Self()->AssertPendingException(); + soa.Self()->ClearException(); + dex_cache = hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)); + } else if (SkipClass(jclass_loader, dex_file, klass.Get())) { + return; + } else { + dex_cache = hs.NewHandle(klass->GetDexCache()); + } + + const uint8_t* class_data = dex_file.GetClassData(class_def); + if (class_data == nullptr) { + // empty class, probably a marker interface + return; + } + + // Go to native so that we don't block GC during compilation. + ScopedThreadSuspension sts(soa.Self(), kNative); + + CompilerDriver* const driver = manager_->GetCompiler(); + + // Can we run DEX-to-DEX compiler on this class ? + optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = + GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def); + + ClassDataItemIterator it(dex_file, class_data); + // Skip fields + while (it.HasNextStaticField()) { + it.Next(); + } + while (it.HasNextInstanceField()) { + it.Next(); + } + + bool compilation_enabled = driver->IsClassToCompile( + dex_file.StringByTypeIdx(class_def.class_idx_)); + + // Compile direct methods + int64_t previous_direct_method_idx = -1; + while (it.HasNextDirectMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_direct_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_direct_method_idx = method_idx; + CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled, dex_cache); + it.Next(); + } + // Compile virtual methods + int64_t previous_virtual_method_idx = -1; + while (it.HasNextVirtualMethod()) { + uint32_t method_idx = it.GetMemberIndex(); + if (method_idx == previous_virtual_method_idx) { + // smali can create dex files with two encoded_methods sharing the same method_idx + // http://code.google.com/p/smali/issues/detail?id=119 + it.Next(); + continue; + } + previous_virtual_method_idx = method_idx; + CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), + it.GetMethodInvokeType(class_def), class_def_index, + method_idx, jclass_loader, dex_file, dex_to_dex_compilation_level, + compilation_enabled, dex_cache); + it.Next(); + } + DCHECK(!it.HasNext()); + } + + private: + const ParallelCompilationManager* const manager_; +}; + +void CompilerDriver::CompileDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) { + TimingLogger::ScopedTiming t("Compile Dex File", timings); + ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this, + &dex_file, dex_files, thread_pool); + CompileClassVisitor visitor(&context); + context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); +} + +void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref, + CompiledMethod* const compiled_method, + size_t non_relative_linker_patch_count) { + DCHECK(GetCompiledMethod(method_ref) == nullptr) + << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file); + { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + compiled_methods_.Put(method_ref, compiled_method); + non_relative_linker_patch_count_ += non_relative_linker_patch_count; + } + DCHECK(GetCompiledMethod(method_ref) != nullptr) + << PrettyMethod(method_ref.dex_method_index, *method_ref.dex_file); +} + +void CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) { + CompiledMethod* compiled_method = nullptr; + { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + auto it = compiled_methods_.find(method_ref); + if (it != compiled_methods_.end()) { + compiled_method = it->second; + compiled_methods_.erase(it); + } + } + if (compiled_method != nullptr) { + CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, compiled_method); + } +} + +CompiledClass* CompilerDriver::GetCompiledClass(ClassReference ref) const { + MutexLock mu(Thread::Current(), compiled_classes_lock_); + ClassTable::const_iterator it = compiled_classes_.find(ref); + if (it == compiled_classes_.end()) { + return nullptr; + } + CHECK(it->second != nullptr); + return it->second; +} + +void CompilerDriver::RecordClassStatus(ClassReference ref, mirror::Class::Status status) { + MutexLock mu(Thread::Current(), compiled_classes_lock_); + auto it = compiled_classes_.find(ref); + if (it == compiled_classes_.end() || it->second->GetStatus() != status) { + // An entry doesn't exist or the status is lower than the new status. + if (it != compiled_classes_.end()) { + CHECK_GT(status, it->second->GetStatus()); + delete it->second; + } + switch (status) { + case mirror::Class::kStatusNotReady: + case mirror::Class::kStatusError: + case mirror::Class::kStatusRetryVerificationAtRuntime: + case mirror::Class::kStatusVerified: + case mirror::Class::kStatusInitialized: + case mirror::Class::kStatusResolved: + break; // Expected states. + default: + LOG(FATAL) << "Unexpected class status for class " + << PrettyDescriptor(ref.first->GetClassDescriptor(ref.first->GetClassDef(ref.second))) + << " of " << status; + } + CompiledClass* compiled_class = new CompiledClass(status); + compiled_classes_.Overwrite(ref, compiled_class); + } +} + +CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + MethodTable::const_iterator it = compiled_methods_.find(ref); + if (it == compiled_methods_.end()) { + return nullptr; + } + CHECK(it->second != nullptr); + return it->second; +} + +bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx, + uint16_t class_def_idx, + const DexFile& dex_file) const { + const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx); + if (verified_method != nullptr) { + return !verified_method->HasVerificationFailures(); + } + + // If we can't find verification metadata, check if this is a system class (we trust that system + // classes have their methods verified). If it's not, be conservative and assume the method + // has not been verified successfully. + + // TODO: When compiling the boot image it should be safe to assume that everything is verified, + // even if methods are not found in the verification cache. + const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx)); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr; + if (!is_system_class) { + self->ClearException(); + } + return is_system_class; +} + +size_t CompilerDriver::GetNonRelativeLinkerPatchCount() const { + MutexLock mu(Thread::Current(), compiled_methods_lock_); + return non_relative_linker_patch_count_; +} + +void CompilerDriver::SetRequiresConstructorBarrier(Thread* self, + const DexFile* dex_file, + uint16_t class_def_index, + bool requires) { + WriterMutexLock mu(self, requires_constructor_barrier_lock_); + requires_constructor_barrier_.emplace(ClassReference(dex_file, class_def_index), requires); +} + +bool CompilerDriver::RequiresConstructorBarrier(Thread* self, + const DexFile* dex_file, + uint16_t class_def_index) { + ClassReference class_ref(dex_file, class_def_index); + { + ReaderMutexLock mu(self, requires_constructor_barrier_lock_); + auto it = requires_constructor_barrier_.find(class_ref); + if (it != requires_constructor_barrier_.end()) { + return it->second; + } + } + WriterMutexLock mu(self, requires_constructor_barrier_lock_); + const bool requires = RequiresConstructorBarrier(*dex_file, class_def_index); + requires_constructor_barrier_.emplace(class_ref, requires); + return requires; +} + +std::string CompilerDriver::GetMemoryUsageString(bool extended) const { + std::ostringstream oss; + const gc::Heap* const heap = Runtime::Current()->GetHeap(); + const size_t java_alloc = heap->GetBytesAllocated(); + oss << "arena alloc=" << PrettySize(max_arena_alloc_) << " (" << max_arena_alloc_ << "B)"; + oss << " java alloc=" << PrettySize(java_alloc) << " (" << java_alloc << "B)"; +#if defined(__BIONIC__) || defined(__GLIBC__) + const struct mallinfo info = mallinfo(); + const size_t allocated_space = static_cast(info.uordblks); + const size_t free_space = static_cast(info.fordblks); + oss << " native alloc=" << PrettySize(allocated_space) << " (" << allocated_space << "B)" + << " free=" << PrettySize(free_space) << " (" << free_space << "B)"; +#endif + compiled_method_storage_.DumpMemoryUsage(oss, extended); + return oss.str(); +} + +bool CompilerDriver::IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file) { + const char* type = dex_file->GetTypeDescriptor(dex_file->GetTypeId(type_index)); + return strcmp(type, "Ljava/lang/String;") == 0; +} + +bool CompilerDriver::IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset) { + DexFileMethodInliner* inliner = GetMethodInlinerMap()->GetMethodInliner(dex_file); + size_t pointer_size = InstructionSetPointerSize(GetInstructionSet()); + *offset = inliner->GetOffsetForStringInit(method_index, pointer_size); + return inliner->IsStringInitMethodIndex(method_index); +} + +bool CompilerDriver::MayInlineInternal(const DexFile* inlined_from, + const DexFile* inlined_into) const { + // We're not allowed to inline across dex files if we're the no-inline-from dex file. + if (inlined_from != inlined_into && + compiler_options_->GetNoInlineFromDexFile() != nullptr && + ContainsElement(*compiler_options_->GetNoInlineFromDexFile(), inlined_from)) { + return false; + } + + return true; +} + +void CompilerDriver::InitializeThreadPools() { + size_t parallel_count = parallel_thread_count_ > 0 ? parallel_thread_count_ - 1 : 0; + parallel_thread_pool_.reset( + new ThreadPool("Compiler driver thread pool", parallel_count)); + single_thread_pool_.reset(new ThreadPool("Single-threaded Compiler driver thread pool", 0)); +} + +void CompilerDriver::FreeThreadPools() { + parallel_thread_pool_.reset(); + single_thread_pool_.reset(); +} + +} // namespace art diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h new file mode 100644 index 000000000..2dd46514e --- /dev/null +++ b/compiler/driver/compiler_driver.h @@ -0,0 +1,729 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_H_ +#define ART_COMPILER_DRIVER_COMPILER_DRIVER_H_ + +#include +#include +#include +#include + +#include "arch/instruction_set.h" +#include "base/arena_allocator.h" +#include "base/bit_utils.h" +#include "base/mutex.h" +#include "base/timing_logger.h" +#include "class_reference.h" +#include "compiler.h" +#include "dex_file.h" +#include "driver/compiled_method_storage.h" +#include "jit/offline_profiling_info.h" +#include "invoke_type.h" +#include "method_reference.h" +#include "mirror/class.h" // For mirror::Class::Status. +#include "os.h" +#include "runtime.h" +#include "safe_map.h" +#include "thread_pool.h" +#include "utils/array_ref.h" +#include "utils/dex_cache_arrays_layout.h" + +namespace art { + +namespace mirror { +class DexCache; +} // namespace mirror + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +class BitVector; +class CompiledClass; +class CompiledMethod; +class CompilerOptions; +class DexCompilationUnit; +class DexFileToMethodInlinerMap; +struct InlineIGetIPutData; +class InstructionSetFeatures; +class ParallelCompilationManager; +class ScopedObjectAccess; +template class SrcMap; +class SrcMapElem; +using SwapSrcMap = SrcMap>; +template class Handle; +class TimingLogger; +class VerificationResults; +class VerifiedMethod; + +enum EntryPointCallingConvention { + // ABI of invocations to a method's interpreter entry point. + kInterpreterAbi, + // ABI of calls to a method's native code, only used for native methods. + kJniAbi, + // ABI of calls to a method's quick code entry point. + kQuickAbi +}; + +class CompilerDriver { + public: + // Create a compiler targeting the requested "instruction_set". + // "image" should be true if image specific optimizations should be + // enabled. "image_classes" lets the compiler know what classes it + // can assume will be in the image, with null implying all available + // classes. + CompilerDriver(const CompilerOptions* compiler_options, + VerificationResults* verification_results, + DexFileToMethodInlinerMap* method_inliner_map, + Compiler::Kind compiler_kind, + InstructionSet instruction_set, + const InstructionSetFeatures* instruction_set_features, + bool boot_image, + bool app_image, + std::unordered_set* image_classes, + std::unordered_set* compiled_classes, + std::unordered_set* compiled_methods, + size_t thread_count, + bool dump_stats, + bool dump_passes, + CumulativeLogger* timer, + int swap_fd, + const ProfileCompilationInfo* profile_compilation_info); + + ~CompilerDriver(); + + // Set dex files that will be stored in the oat file after being compiled. + void SetDexFilesForOatFile(const std::vector& dex_files) { + dex_files_for_oat_file_ = &dex_files; + } + + // Get dex file that will be stored in the oat file after being compiled. + ArrayRef GetDexFilesForOatFile() const { + return (dex_files_for_oat_file_ != nullptr) + ? ArrayRef(*dex_files_for_oat_file_) + : ArrayRef(); + } + + void CompileAll(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_); + + // Compile a single Method. + void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_); + + VerificationResults* GetVerificationResults() const { + DCHECK(Runtime::Current()->IsAotCompiler()); + return verification_results_; + } + + DexFileToMethodInlinerMap* GetMethodInlinerMap() const { + return method_inliner_map_; + } + + InstructionSet GetInstructionSet() const { + return instruction_set_; + } + + const InstructionSetFeatures* GetInstructionSetFeatures() const { + return instruction_set_features_; + } + + const CompilerOptions& GetCompilerOptions() const { + return *compiler_options_; + } + + Compiler* GetCompiler() const { + return compiler_.get(); + } + + // Are we compiling and creating an image file? + bool IsBootImage() const { + return boot_image_; + } + + const std::unordered_set* GetImageClasses() const { + return image_classes_.get(); + } + + // Generate the trampolines that are invoked by unresolved direct methods. + std::unique_ptr> CreateJniDlsymLookup() const; + std::unique_ptr> CreateQuickGenericJniTrampoline() const; + std::unique_ptr> CreateQuickImtConflictTrampoline() const; + std::unique_ptr> CreateQuickResolutionTrampoline() const; + std::unique_ptr> CreateQuickToInterpreterBridge() const; + + CompiledClass* GetCompiledClass(ClassReference ref) const + REQUIRES(!compiled_classes_lock_); + + CompiledMethod* GetCompiledMethod(MethodReference ref) const + REQUIRES(!compiled_methods_lock_); + size_t GetNonRelativeLinkerPatchCount() const + REQUIRES(!compiled_methods_lock_); + + // Add a compiled method. + void AddCompiledMethod(const MethodReference& method_ref, + CompiledMethod* const compiled_method, + size_t non_relative_linker_patch_count) + REQUIRES(!compiled_methods_lock_); + // Remove and delete a compiled method. + void RemoveCompiledMethod(const MethodReference& method_ref) REQUIRES(!compiled_methods_lock_); + + void SetRequiresConstructorBarrier(Thread* self, + const DexFile* dex_file, + uint16_t class_def_index, + bool requires) + REQUIRES(!requires_constructor_barrier_lock_); + bool RequiresConstructorBarrier(Thread* self, + const DexFile* dex_file, + uint16_t class_def_index) + REQUIRES(!requires_constructor_barrier_lock_); + + // Callbacks from compiler to see what runtime checks must be generated. + + bool CanAssumeTypeIsPresentInDexCache(Handle dex_cache, + uint32_t type_idx) + SHARED_REQUIRES(Locks::mutator_lock_); + + bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx) + REQUIRES(!Locks::mutator_lock_); + + // Are runtime access checks necessary in the compiled code? + bool CanAccessTypeWithoutChecks(uint32_t referrer_idx, + Handle dex_cache, + uint32_t type_idx) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Are runtime access and instantiable checks necessary in the code? + // out_is_finalizable is set to whether the type is finalizable. + bool CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_idx, + Handle dex_cache, + uint32_t type_idx, + bool* out_is_finalizable) + SHARED_REQUIRES(Locks::mutator_lock_); + + bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx, + bool* is_type_initialized, bool* use_direct_type_ptr, + uintptr_t* direct_type_ptr, bool* out_is_finalizable); + + // Query methods for the java.lang.ref.Reference class. + bool CanEmbedReferenceTypeInCode(ClassReference* ref, + bool* use_direct_type_ptr, uintptr_t* direct_type_ptr); + uint32_t GetReferenceSlowFlagOffset() const; + uint32_t GetReferenceDisableFlagOffset() const; + + // Get the DexCache for the + mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit) + SHARED_REQUIRES(Locks::mutator_lock_); + + mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa, + const DexCompilationUnit* mUnit) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Resolve compiling method's class. Returns null on failure. + mirror::Class* ResolveCompilingMethodsClass( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit) + SHARED_REQUIRES(Locks::mutator_lock_); + + mirror::Class* ResolveClass( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, uint16_t type_index, + const DexCompilationUnit* mUnit) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Resolve a field. Returns null on failure, including incompatible class change. + // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static. + ArtField* ResolveField( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit, + uint32_t field_idx, bool is_static) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Resolve a field with a given dex file. + ArtField* ResolveFieldWithDexFile( + const ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexFile* dex_file, + uint32_t field_idx, bool is_static) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Get declaration location of a resolved field. + void GetResolvedFieldDexFileLocation( + ArtField* resolved_field, const DexFile** declaring_dex_file, + uint16_t* declaring_class_idx, uint16_t* declaring_field_idx) + SHARED_REQUIRES(Locks::mutator_lock_); + + bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_); + MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_); + + // Find a dex cache for a dex file. + inline mirror::DexCache* FindDexCache(const DexFile* dex_file) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset. + std::pair IsFastInstanceField( + mirror::DexCache* dex_cache, mirror::Class* referrer_class, + ArtField* resolved_field, uint16_t field_idx) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index + // of the declaring class in the referrer's dex file. + std::pair IsFastStaticField( + mirror::DexCache* dex_cache, mirror::Class* referrer_class, + ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Return whether the declaring class of `resolved_method` is + // available to `referrer_class`. If this is true, compute the type + // index of the declaring class in the referrer's dex file and + // return it through the out argument `storage_index`; otherwise + // return DexFile::kDexNoIndex through `storage_index`. + bool IsClassOfStaticMethodAvailableToReferrer(mirror::DexCache* dex_cache, + mirror::Class* referrer_class, + ArtMethod* resolved_method, + uint16_t method_idx, + uint32_t* storage_index) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Is static field's in referrer's class? + bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Is static field's class initialized? + bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class, + ArtField* resolved_field) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Resolve a method. Returns null on failure, including incompatible class change. + ArtMethod* ResolveMethod( + ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit, + uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Get declaration location of a resolved field. + void GetResolvedMethodDexFileLocation( + ArtMethod* resolved_method, const DexFile** declaring_dex_file, + uint16_t* declaring_class_idx, uint16_t* declaring_method_idx) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Get the index in the vtable of the method. + uint16_t GetResolvedMethodVTableIndex( + ArtMethod* resolved_method, InvokeType type) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value + // for ProcessedInvoke() and computes the necessary lowering info. + int IsFastInvoke( + ScopedObjectAccess& soa, Handle dex_cache, + Handle class_loader, const DexCompilationUnit* mUnit, + mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type, + MethodReference* target_method, const MethodReference* devirt_target, + uintptr_t* direct_code, uintptr_t* direct_method) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Is method's class initialized for an invoke? + // For static invokes to determine whether we need to consider potential call to (). + // For non-static invokes, assuming a non-null reference, the class is always initialized. + bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Get the layout of dex cache arrays for a dex file. Returns invalid layout if the + // dex cache arrays don't have a fixed layout. + DexCacheArraysLayout GetDexCacheArraysLayout(const DexFile* dex_file); + + void ProcessedInstanceField(bool resolved); + void ProcessedStaticField(bool resolved, bool local); + void ProcessedInvoke(InvokeType invoke_type, int flags); + + void ComputeFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, + const ScopedObjectAccess& soa, bool is_static, + ArtField** resolved_field, + mirror::Class** referrer_class, + mirror::DexCache** dex_cache) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Can we fast path instance field access? Computes field's offset and volatility. + bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, + MemberOffset* field_offset, bool* is_volatile) + REQUIRES(!Locks::mutator_lock_); + + ArtField* ComputeInstanceFieldInfo(uint32_t field_idx, + const DexCompilationUnit* mUnit, + bool is_put, + const ScopedObjectAccess& soa) + SHARED_REQUIRES(Locks::mutator_lock_); + + + // Can we fastpath a interface, super class or virtual method call? Computes method's vtable + // index. + bool ComputeInvokeInfo(const DexCompilationUnit* mUnit, const uint32_t dex_pc, + bool update_stats, bool enable_devirtualization, + InvokeType* type, MethodReference* target_method, int* vtable_idx, + uintptr_t* direct_code, uintptr_t* direct_method) + REQUIRES(!Locks::mutator_lock_); + + const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const; + bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc); + + bool GetSupportBootImageFixup() const { + return support_boot_image_fixup_; + } + + void SetSupportBootImageFixup(bool support_boot_image_fixup) { + support_boot_image_fixup_ = support_boot_image_fixup; + } + + void SetCompilerContext(void* compiler_context) { + compiler_context_ = compiler_context; + } + + void* GetCompilerContext() const { + return compiler_context_; + } + + size_t GetThreadCount() const { + return parallel_thread_count_; + } + + bool GetDumpStats() const { + return dump_stats_; + } + + bool GetDumpPasses() const { + return dump_passes_; + } + + CumulativeLogger* GetTimingsLogger() const { + return timings_logger_; + } + + void SetDedupeEnabled(bool dedupe_enabled) { + compiled_method_storage_.SetDedupeEnabled(dedupe_enabled); + } + bool DedupeEnabled() const { + return compiled_method_storage_.DedupeEnabled(); + } + + // Checks if class specified by type_idx is one of the image_classes_ + bool IsImageClass(const char* descriptor) const; + + // Checks whether the provided class should be compiled, i.e., is in classes_to_compile_. + bool IsClassToCompile(const char* descriptor) const; + + // Checks whether the provided method should be compiled, i.e., is in method_to_compile_. + bool IsMethodToCompile(const MethodReference& method_ref) const; + + // Checks whether profile guided compilation is enabled and if the method should be compiled + // according to the profile file. + bool ShouldCompileBasedOnProfile(const MethodReference& method_ref) const; + + // Checks whether profile guided verification is enabled and if the method should be verified + // according to the profile file. + bool ShouldVerifyClassBasedOnProfile(const DexFile& dex_file, uint16_t class_idx) const; + + void RecordClassStatus(ClassReference ref, mirror::Class::Status status) + REQUIRES(!compiled_classes_lock_); + + // Checks if the specified method has been verified without failures. Returns + // false if the method is not in the verification results (GetVerificationResults). + bool IsMethodVerifiedWithoutFailures(uint32_t method_idx, + uint16_t class_def_idx, + const DexFile& dex_file) const; + + // Get memory usage during compilation. + std::string GetMemoryUsageString(bool extended) const; + + bool IsStringTypeIndex(uint16_t type_index, const DexFile* dex_file); + bool IsStringInit(uint32_t method_index, const DexFile* dex_file, int32_t* offset); + + void SetHadHardVerifierFailure() { + had_hard_verifier_failure_ = true; + } + + Compiler::Kind GetCompilerKind() { + return compiler_kind_; + } + + CompiledMethodStorage* GetCompiledMethodStorage() { + return &compiled_method_storage_; + } + + // Can we assume that the klass is loaded? + bool CanAssumeClassIsLoaded(mirror::Class* klass) + SHARED_REQUIRES(Locks::mutator_lock_); + + bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const { + if (!kIsTargetBuild) { + return MayInlineInternal(inlined_from, inlined_into); + } + return true; + } + + void MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref) + REQUIRES(!dex_to_dex_references_lock_); + + const BitVector* GetCurrentDexToDexMethods() const { + return current_dex_to_dex_methods_; + } + + private: + // Return whether the declaring class of `resolved_member` is + // available to `referrer_class` for read or write access using two + // Boolean values returned as a pair. If is true at least for read + // access, compute the type index of the declaring class in the + // referrer's dex file and return it through the out argument + // `storage_index`; otherwise return DexFile::kDexNoIndex through + // `storage_index`. + template + std::pair IsClassOfStaticMemberAvailableToReferrer(mirror::DexCache* dex_cache, + mirror::Class* referrer_class, + ArtMember* resolved_member, + uint16_t member_idx, + uint32_t* storage_index) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Can `referrer_class` access the resolved `member`? + // Dispatch call to mirror::Class::CanAccessResolvedField or + // mirror::Class::CanAccessResolvedMember depending on the value of + // ArtMember. + template + static bool CanAccessResolvedMember(mirror::Class* referrer_class, + mirror::Class* access_to, + ArtMember* member, + mirror::DexCache* dex_cache, + uint32_t field_idx) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Can we assume that the klass is initialized? + bool CanAssumeClassIsInitialized(mirror::Class* klass) + SHARED_REQUIRES(Locks::mutator_lock_); + bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass) + SHARED_REQUIRES(Locks::mutator_lock_); + + // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics. + // The only external contract is that unresolved method has flags 0 and resolved non-0. + enum { + kBitMethodResolved = 0, + kBitVirtualMadeDirect, + kBitPreciseTypeDevirtualization, + kBitDirectCallToBoot, + kBitDirectMethodToBoot + }; + static constexpr int kFlagMethodResolved = 1 << kBitMethodResolved; + static constexpr int kFlagVirtualMadeDirect = 1 << kBitVirtualMadeDirect; + static constexpr int kFlagPreciseTypeDevirtualization = 1 << kBitPreciseTypeDevirtualization; + static constexpr int kFlagDirectCallToBoot = 1 << kBitDirectCallToBoot; + static constexpr int kFlagDirectMethodToBoot = 1 << kBitDirectMethodToBoot; + static constexpr int kFlagsMethodResolvedVirtualMadeDirect = + kFlagMethodResolved | kFlagVirtualMadeDirect; + static constexpr int kFlagsMethodResolvedPreciseTypeDevirtualization = + kFlagsMethodResolvedVirtualMadeDirect | kFlagPreciseTypeDevirtualization; + + public: // TODO make private or eliminate. + // Compute constant code and method pointers when possible. + void GetCodeAndMethodForDirectCall(/*out*/InvokeType* type, + InvokeType sharp_type, + bool no_guarantee_of_dex_cache_entry, + const mirror::Class* referrer_class, + ArtMethod* method, + /*out*/int* stats_flags, + MethodReference* target_method, + uintptr_t* direct_code, uintptr_t* direct_method) + SHARED_REQUIRES(Locks::mutator_lock_); + + private: + void PreCompile(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); + + void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); + + // Attempt to resolve all type, methods, fields, and strings + // referenced from code in the dex file following PathClassLoader + // ordering semantics. + void Resolve(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_); + void ResolveDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_); + + void Verify(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings); + void VerifyDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_); + + void SetVerified(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings); + void SetVerifiedDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_); + + void InitializeClasses(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); + void InitializeClasses(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_, !compiled_classes_lock_); + + void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); + static void FindClinitImageClassesCallback(mirror::Object* object, void* arg) + SHARED_REQUIRES(Locks::mutator_lock_); + + void Compile(jobject class_loader, + const std::vector& dex_files, + TimingLogger* timings) REQUIRES(!dex_to_dex_references_lock_); + void CompileDexFile(jobject class_loader, + const DexFile& dex_file, + const std::vector& dex_files, + ThreadPool* thread_pool, + size_t thread_count, + TimingLogger* timings) + REQUIRES(!Locks::mutator_lock_); + + bool MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const; + + void InitializeThreadPools(); + void FreeThreadPools(); + void CheckThreadPools(); + + bool RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const; + + const CompilerOptions* const compiler_options_; + VerificationResults* const verification_results_; + DexFileToMethodInlinerMap* const method_inliner_map_; + + std::unique_ptr compiler_; + Compiler::Kind compiler_kind_; + + const InstructionSet instruction_set_; + const InstructionSetFeatures* const instruction_set_features_; + + // All class references that require constructor barriers. If the class reference is not in the + // set then the result has not yet been computed. + mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + std::map requires_constructor_barrier_ + GUARDED_BY(requires_constructor_barrier_lock_); + + typedef SafeMap ClassTable; + // All class references that this compiler has compiled. + mutable Mutex compiled_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + ClassTable compiled_classes_ GUARDED_BY(compiled_classes_lock_); + + typedef SafeMap MethodTable; + + public: + // Lock is public so that non-members can have lock annotations. + mutable Mutex compiled_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + + private: + // All method references that this compiler has compiled. + MethodTable compiled_methods_ GUARDED_BY(compiled_methods_lock_); + // Number of non-relative patches in all compiled methods. These patches need space + // in the .oat_patches ELF section if requested in the compiler options. + size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_); + + const bool boot_image_; + const bool app_image_; + + // If image_ is true, specifies the classes that will be included in the image. + // Note if image_classes_ is null, all classes are included in the image. + std::unique_ptr> image_classes_; + + // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null, + // all classes are eligible for compilation (duplication filters etc. will still apply). + // This option may be restricted to the boot image, depending on a flag in the implementation. + std::unique_ptr> classes_to_compile_; + + // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null, + // all methods are eligible for compilation (compilation filters etc. will still apply). + // This option may be restricted to the boot image, depending on a flag in the implementation. + std::unique_ptr> methods_to_compile_; + + bool had_hard_verifier_failure_; + + // A thread pool that can (potentially) run tasks in parallel. + std::unique_ptr parallel_thread_pool_; + size_t parallel_thread_count_; + + // A thread pool that guarantees running single-threaded on the main thread. + std::unique_ptr single_thread_pool_; + + class AOTCompilationStats; + std::unique_ptr stats_; + + bool dump_stats_; + const bool dump_passes_; + + CumulativeLogger* const timings_logger_; + + typedef void (*CompilerCallbackFn)(CompilerDriver& driver); + typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver); + + void* compiler_context_; + + bool support_boot_image_fixup_; + + // List of dex files that will be stored in the oat file. + const std::vector* dex_files_for_oat_file_; + + CompiledMethodStorage compiled_method_storage_; + + // Info for profile guided compilation. + const ProfileCompilationInfo* const profile_compilation_info_; + + size_t max_arena_alloc_; + + // Data for delaying dex-to-dex compilation. + Mutex dex_to_dex_references_lock_; + // In the first phase, dex_to_dex_references_ collects methods for dex-to-dex compilation. + class DexFileMethodSet; + std::vector dex_to_dex_references_ GUARDED_BY(dex_to_dex_references_lock_); + // In the second phase, current_dex_to_dex_methods_ points to the BitVector with method + // indexes for dex-to-dex compilation in the current dex file. + const BitVector* current_dex_to_dex_methods_; + + friend class CompileClassVisitor; + DISALLOW_COPY_AND_ASSIGN(CompilerDriver); +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_H_ diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc new file mode 100644 index 000000000..b9a5a781d --- /dev/null +++ b/compiler/driver/compiler_driver_test.cc @@ -0,0 +1,320 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "driver/compiler_driver.h" + +#include +#include +#include + +#include "art_method-inl.h" +#include "class_linker-inl.h" +#include "common_compiler_test.h" +#include "dex_file.h" +#include "gc/heap.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "handle_scope-inl.h" +#include "jit/offline_profiling_info.h" +#include "scoped_thread_state_change.h" + +namespace art { + +class CompilerDriverTest : public CommonCompilerTest { + protected: + void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) { + TimingLogger timings("CompilerDriverTest::CompileAll", false, false); + TimingLogger::ScopedTiming t(__FUNCTION__, &timings); + compiler_driver_->CompileAll(class_loader, + GetDexFiles(class_loader), + &timings); + t.NewTiming("MakeAllExecutable"); + MakeAllExecutable(class_loader); + } + + void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, + const char* signature, bool is_virtual) + REQUIRES(!Locks::mutator_lock_) { + CompileAll(class_loader); + Thread::Current()->TransitionFromSuspendedToRunnable(); + bool started = runtime_->Start(); + CHECK(started); + env_ = Thread::Current()->GetJniEnv(); + class_ = env_->FindClass(class_name); + CHECK(class_ != nullptr) << "Class not found: " << class_name; + if (is_virtual) { + mid_ = env_->GetMethodID(class_, method, signature); + } else { + mid_ = env_->GetStaticMethodID(class_, method, signature); + } + CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature; + } + + void MakeAllExecutable(jobject class_loader) { + const std::vector class_path = GetDexFiles(class_loader); + for (size_t i = 0; i != class_path.size(); ++i) { + const DexFile* dex_file = class_path[i]; + CHECK(dex_file != nullptr); + MakeDexFileExecutable(class_loader, *dex_file); + } + } + + void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + for (size_t i = 0; i < dex_file.NumClassDefs(); i++) { + const DexFile::ClassDef& class_def = dex_file.GetClassDef(i); + const char* descriptor = dex_file.GetClassDescriptor(class_def); + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + Handle loader( + hs.NewHandle(soa.Decode(class_loader))); + mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader); + CHECK(c != nullptr); + const auto pointer_size = class_linker->GetImagePointerSize(); + for (auto& m : c->GetMethods(pointer_size)) { + MakeExecutable(&m); + } + } + } + + JNIEnv* env_; + jclass class_; + jmethodID mid_; +}; + +// Disabled due to 10 second runtime on host +TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { + CompileAll(nullptr); + + // All libcore references should resolve + ScopedObjectAccess soa(Thread::Current()); + ASSERT_TRUE(java_lang_dex_file_ != nullptr); + const DexFile& dex = *java_lang_dex_file_; + mirror::DexCache* dex_cache = class_linker_->FindDexCache(soa.Self(), dex); + EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings()); + for (size_t i = 0; i < dex_cache->NumStrings(); i++) { + const mirror::String* string = dex_cache->GetResolvedString(i); + EXPECT_TRUE(string != nullptr) << "string_idx=" << i; + } + EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes()); + for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { + mirror::Class* type = dex_cache->GetResolvedType(i); + EXPECT_TRUE(type != nullptr) << "type_idx=" << i + << " " << dex.GetTypeDescriptor(dex.GetTypeId(i)); + } + EXPECT_EQ(dex.NumMethodIds(), dex_cache->NumResolvedMethods()); + auto* cl = Runtime::Current()->GetClassLinker(); + auto pointer_size = cl->GetImagePointerSize(); + for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { + ArtMethod* method = dex_cache->GetResolvedMethod(i, pointer_size); + EXPECT_TRUE(method != nullptr) << "method_idx=" << i + << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) + << " " << dex.GetMethodName(dex.GetMethodId(i)); + EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i + << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " " + << dex.GetMethodName(dex.GetMethodId(i)); + } + EXPECT_EQ(dex.NumFieldIds(), dex_cache->NumResolvedFields()); + for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { + ArtField* field = cl->GetResolvedField(i, dex_cache); + EXPECT_TRUE(field != nullptr) << "field_idx=" << i + << " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i)) + << " " << dex.GetFieldName(dex.GetFieldId(i)); + } + + // TODO check Class::IsVerified for all classes + + // TODO: check that all Method::GetCode() values are non-null +} + +TEST_F(CompilerDriverTest, AbstractMethodErrorStub) { + TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS(); + jobject class_loader; + { + ScopedObjectAccess soa(Thread::Current()); + class_loader = LoadDex("AbstractMethod"); + } + ASSERT_TRUE(class_loader != nullptr); + EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true); + + // Create a jobj_ of ConcreteClass, NOT AbstractClass. + jclass c_class = env_->FindClass("ConcreteClass"); + + jmethodID constructor = env_->GetMethodID(c_class, "", "()V"); + + jobject jobj_ = env_->NewObject(c_class, constructor); + ASSERT_TRUE(jobj_ != nullptr); + + // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception. + env_->CallNonvirtualVoidMethod(jobj_, class_, mid_); + + EXPECT_EQ(env_->ExceptionCheck(), JNI_TRUE); + jthrowable exception = env_->ExceptionOccurred(); + env_->ExceptionClear(); + jclass jlame = env_->FindClass("java/lang/AbstractMethodError"); + EXPECT_TRUE(env_->IsInstanceOf(exception, jlame)); + { + ScopedObjectAccess soa(Thread::Current()); + Thread::Current()->ClearException(); + } +} + +class CompilerDriverMethodsTest : public CompilerDriverTest { + protected: + std::unordered_set* GetCompiledMethods() OVERRIDE { + return new std::unordered_set({ + "byte StaticLeafMethods.identity(byte)", + "int StaticLeafMethods.sum(int, int, int)", + "double StaticLeafMethods.sum(double, double, double, double)" + }); + } +}; + +TEST_F(CompilerDriverMethodsTest, Selection) { + TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS(); + Thread* self = Thread::Current(); + jobject class_loader; + { + ScopedObjectAccess soa(self); + class_loader = LoadDex("StaticLeafMethods"); + } + ASSERT_NE(class_loader, nullptr); + + // Need to enable dex-file writability. Methods rejected to be compiled will run through the + // dex-to-dex compiler. + for (const DexFile* dex_file : GetDexFiles(class_loader)) { + ASSERT_TRUE(dex_file->EnableWrite()); + } + + CompileAll(class_loader); + + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ScopedObjectAccess soa(self); + StackHandleScope<1> hs(self); + Handle h_loader(hs.NewHandle( + reinterpret_cast(self->DecodeJObject(class_loader)))); + mirror::Class* klass = class_linker->FindClass(self, "LStaticLeafMethods;", h_loader); + ASSERT_NE(klass, nullptr); + + std::unique_ptr> expected(GetCompiledMethods()); + + const auto pointer_size = class_linker->GetImagePointerSize(); + for (auto& m : klass->GetDirectMethods(pointer_size)) { + std::string name = PrettyMethod(&m, true); + const void* code = m.GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); + ASSERT_NE(code, nullptr); + if (expected->find(name) != expected->end()) { + expected->erase(name); + EXPECT_FALSE(class_linker->IsQuickToInterpreterBridge(code)); + } else { + EXPECT_TRUE(class_linker->IsQuickToInterpreterBridge(code)); + } + } + EXPECT_TRUE(expected->empty()); +} + +class CompilerDriverProfileTest : public CompilerDriverTest { + protected: + ProfileCompilationInfo* GetProfileCompilationInfo() OVERRIDE { + ScopedObjectAccess soa(Thread::Current()); + std::vector> dex_files = OpenTestDexFiles("ProfileTestMultiDex"); + + ProfileCompilationInfo info; + for (const std::unique_ptr& dex_file : dex_files) { + std::string key = ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation()); + profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 1); + profile_info_.AddMethodIndex(key, dex_file->GetLocationChecksum(), 2); + } + return &profile_info_; + } + + std::unordered_set GetExpectedMethodsForClass(const std::string& clazz) { + if (clazz == "Main") { + return std::unordered_set({ + "java.lang.String Main.getA()", + "java.lang.String Main.getB()"}); + } else if (clazz == "Second") { + return std::unordered_set({ + "java.lang.String Second.getX()", + "java.lang.String Second.getY()"}); + } else { + return std::unordered_set(); + } + } + + void CheckCompiledMethods(jobject class_loader, + const std::string& clazz, + const std::unordered_set& expected_methods) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Thread* self = Thread::Current(); + ScopedObjectAccess soa(self); + StackHandleScope<1> hs(self); + Handle h_loader(hs.NewHandle( + reinterpret_cast(self->DecodeJObject(class_loader)))); + mirror::Class* klass = class_linker->FindClass(self, clazz.c_str(), h_loader); + ASSERT_NE(klass, nullptr); + + const auto pointer_size = class_linker->GetImagePointerSize(); + size_t number_of_compiled_methods = 0; + for (auto& m : klass->GetVirtualMethods(pointer_size)) { + std::string name = PrettyMethod(&m, true); + const void* code = m.GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); + ASSERT_NE(code, nullptr); + if (expected_methods.find(name) != expected_methods.end()) { + number_of_compiled_methods++; + EXPECT_FALSE(class_linker->IsQuickToInterpreterBridge(code)); + } else { + EXPECT_TRUE(class_linker->IsQuickToInterpreterBridge(code)); + } + } + EXPECT_EQ(expected_methods.size(), number_of_compiled_methods); + } + + private: + ProfileCompilationInfo profile_info_; +}; + +TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) { + TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS(); + Thread* self = Thread::Current(); + jobject class_loader; + { + ScopedObjectAccess soa(self); + class_loader = LoadDex("ProfileTestMultiDex"); + } + ASSERT_NE(class_loader, nullptr); + + // Need to enable dex-file writability. Methods rejected to be compiled will run through the + // dex-to-dex compiler. + ProfileCompilationInfo info; + for (const DexFile* dex_file : GetDexFiles(class_loader)) { + ASSERT_TRUE(dex_file->EnableWrite()); + } + + CompileAll(class_loader); + + std::unordered_set m = GetExpectedMethodsForClass("Main"); + std::unordered_set s = GetExpectedMethodsForClass("Second"); + CheckCompiledMethods(class_loader, "LMain;", m); + CheckCompiledMethods(class_loader, "LSecond;", s); +} + +// TODO: need check-cast test (when stub complete & we can throw/catch + +} // namespace art diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc new file mode 100644 index 000000000..1bd4c3ad8 --- /dev/null +++ b/compiler/driver/compiler_options.cc @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "compiler_options.h" + +#include + +namespace art { + +CompilerOptions::CompilerOptions() + : compiler_filter_(kDefaultCompilerFilter), + huge_method_threshold_(kDefaultHugeMethodThreshold), + large_method_threshold_(kDefaultLargeMethodThreshold), + small_method_threshold_(kDefaultSmallMethodThreshold), + tiny_method_threshold_(kDefaultTinyMethodThreshold), + num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold), + inline_depth_limit_(kUnsetInlineDepthLimit), + inline_max_code_units_(kUnsetInlineMaxCodeUnits), + no_inline_from_(nullptr), + include_patch_information_(kDefaultIncludePatchInformation), + top_k_profile_threshold_(kDefaultTopKProfileThreshold), + debuggable_(false), + generate_debug_info_(kDefaultGenerateDebugInfo), + generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo), + implicit_null_checks_(true), + implicit_so_checks_(true), + implicit_suspend_checks_(false), + compile_pic_(false), + verbose_methods_(nullptr), + abort_on_hard_verifier_failure_(false), + init_failure_output_(nullptr), + dump_cfg_file_name_(""), + dump_cfg_append_(false), + force_determinism_(false) { +} + +CompilerOptions::~CompilerOptions() { + // The destructor looks empty but it destroys a PassManagerOptions object. We keep it here + // because we don't want to include the PassManagerOptions definition from the header file. +} + +CompilerOptions::CompilerOptions(CompilerFilter::Filter compiler_filter, + size_t huge_method_threshold, + size_t large_method_threshold, + size_t small_method_threshold, + size_t tiny_method_threshold, + size_t num_dex_methods_threshold, + size_t inline_depth_limit, + size_t inline_max_code_units, + const std::vector* no_inline_from, + bool include_patch_information, + double top_k_profile_threshold, + bool debuggable, + bool generate_debug_info, + bool implicit_null_checks, + bool implicit_so_checks, + bool implicit_suspend_checks, + bool compile_pic, + const std::vector* verbose_methods, + std::ostream* init_failure_output, + bool abort_on_hard_verifier_failure, + const std::string& dump_cfg_file_name, + bool dump_cfg_append, + bool force_determinism + ) : // NOLINT(whitespace/parens) + compiler_filter_(compiler_filter), + huge_method_threshold_(huge_method_threshold), + large_method_threshold_(large_method_threshold), + small_method_threshold_(small_method_threshold), + tiny_method_threshold_(tiny_method_threshold), + num_dex_methods_threshold_(num_dex_methods_threshold), + inline_depth_limit_(inline_depth_limit), + inline_max_code_units_(inline_max_code_units), + no_inline_from_(no_inline_from), + include_patch_information_(include_patch_information), + top_k_profile_threshold_(top_k_profile_threshold), + debuggable_(debuggable), + generate_debug_info_(generate_debug_info), + generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo), + implicit_null_checks_(implicit_null_checks), + implicit_so_checks_(implicit_so_checks), + implicit_suspend_checks_(implicit_suspend_checks), + compile_pic_(compile_pic), + verbose_methods_(verbose_methods), + abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure), + init_failure_output_(init_failure_output), + dump_cfg_file_name_(dump_cfg_file_name), + dump_cfg_append_(dump_cfg_append), + force_determinism_(force_determinism) { +} + +void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage); +} + +void CompilerOptions::ParseLargeMethodMax(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--large-method-max", &large_method_threshold_, Usage); +} + +void CompilerOptions::ParseSmallMethodMax(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--small-method-max", &small_method_threshold_, Usage); +} + +void CompilerOptions::ParseTinyMethodMax(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--tiny-method-max", &tiny_method_threshold_, Usage); +} + +void CompilerOptions::ParseNumDexMethods(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage); +} + +void CompilerOptions::ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--inline-depth-limit", &inline_depth_limit_, Usage); +} + +void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) { + ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage); +} + +void CompilerOptions::ParseDumpInitFailures(const StringPiece& option, + UsageFn Usage ATTRIBUTE_UNUSED) { + DCHECK(option.starts_with("--dump-init-failures=")); + std::string file_name = option.substr(strlen("--dump-init-failures=")).data(); + init_failure_output_.reset(new std::ofstream(file_name)); + if (init_failure_output_.get() == nullptr) { + LOG(ERROR) << "Failed to allocate ofstream"; + } else if (init_failure_output_->fail()) { + LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization " + << "failures."; + init_failure_output_.reset(); + } +} + +bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) { + if (option.starts_with("--compiler-filter=")) { + const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data(); + if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, &compiler_filter_)) { + Usage("Unknown --compiler-filter value %s", compiler_filter_string); + } + } else if (option == "--compile-pic") { + compile_pic_ = true; + } else if (option.starts_with("--huge-method-max=")) { + ParseHugeMethodMax(option, Usage); + } else if (option.starts_with("--large-method-max=")) { + ParseLargeMethodMax(option, Usage); + } else if (option.starts_with("--small-method-max=")) { + ParseSmallMethodMax(option, Usage); + } else if (option.starts_with("--tiny-method-max=")) { + ParseTinyMethodMax(option, Usage); + } else if (option.starts_with("--num-dex-methods=")) { + ParseNumDexMethods(option, Usage); + } else if (option.starts_with("--inline-depth-limit=")) { + ParseInlineDepthLimit(option, Usage); + } else if (option.starts_with("--inline-max-code-units=")) { + ParseInlineMaxCodeUnits(option, Usage); + } else if (option == "--generate-debug-info" || option == "-g") { + generate_debug_info_ = true; + } else if (option == "--no-generate-debug-info") { + generate_debug_info_ = false; + } else if (option == "--generate-mini-debug-info") { + generate_mini_debug_info_ = true; + } else if (option == "--no-generate-mini-debug-info") { + generate_mini_debug_info_ = false; + } else if (option == "--debuggable") { + debuggable_ = true; + } else if (option.starts_with("--top-k-profile-threshold=")) { + ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage); + } else if (option == "--include-patch-information") { + include_patch_information_ = true; + } else if (option == "--no-include-patch-information") { + include_patch_information_ = false; + } else if (option == "--abort-on-hard-verifier-error") { + abort_on_hard_verifier_failure_ = true; + } else if (option.starts_with("--dump-init-failures=")) { + ParseDumpInitFailures(option, Usage); + } else if (option.starts_with("--dump-cfg=")) { + dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data(); + } else if (option.starts_with("--dump-cfg-append")) { + dump_cfg_append_ = true; + } else { + // Option not recognized. + return false; + } + return true; +} + +} // namespace art diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h new file mode 100644 index 000000000..6d4455e65 --- /dev/null +++ b/compiler/driver/compiler_options.h @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ +#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ + +#include +#include +#include + +#include "base/macros.h" +#include "compiler_filter.h" +#include "globals.h" +#include "utils.h" + +namespace art { + +class CompilerOptions FINAL { + public: + // Guide heuristics to determine whether to compile method if profile data not available. + static const CompilerFilter::Filter kDefaultCompilerFilter = CompilerFilter::kSpeed; + static const size_t kDefaultHugeMethodThreshold = 10000; + static const size_t kDefaultLargeMethodThreshold = 600; + static const size_t kDefaultSmallMethodThreshold = 60; + static const size_t kDefaultTinyMethodThreshold = 20; + static const size_t kDefaultNumDexMethodsThreshold = 900; + static constexpr double kDefaultTopKProfileThreshold = 90.0; + static const bool kDefaultGenerateDebugInfo = false; + static const bool kDefaultGenerateMiniDebugInfo = false; + static const bool kDefaultIncludePatchInformation = false; + static const size_t kDefaultInlineDepthLimit = 3; + static const size_t kDefaultInlineMaxCodeUnits = 32; + static constexpr size_t kUnsetInlineDepthLimit = -1; + static constexpr size_t kUnsetInlineMaxCodeUnits = -1; + + // Default inlining settings when the space filter is used. + static constexpr size_t kSpaceFilterInlineDepthLimit = 3; + static constexpr size_t kSpaceFilterInlineMaxCodeUnits = 10; + + CompilerOptions(); + ~CompilerOptions(); + + CompilerOptions(CompilerFilter::Filter compiler_filter, + size_t huge_method_threshold, + size_t large_method_threshold, + size_t small_method_threshold, + size_t tiny_method_threshold, + size_t num_dex_methods_threshold, + size_t inline_depth_limit, + size_t inline_max_code_units, + const std::vector* no_inline_from, + bool include_patch_information, + double top_k_profile_threshold, + bool debuggable, + bool generate_debug_info, + bool implicit_null_checks, + bool implicit_so_checks, + bool implicit_suspend_checks, + bool compile_pic, + const std::vector* verbose_methods, + std::ostream* init_failure_output, + bool abort_on_hard_verifier_failure, + const std::string& dump_cfg_file_name, + bool dump_cfg_append, + bool force_determinism); + + CompilerFilter::Filter GetCompilerFilter() const { + return compiler_filter_; + } + + void SetCompilerFilter(CompilerFilter::Filter compiler_filter) { + compiler_filter_ = compiler_filter; + } + + bool VerifyAtRuntime() const { + return compiler_filter_ == CompilerFilter::kVerifyAtRuntime; + } + + bool IsBytecodeCompilationEnabled() const { + return CompilerFilter::IsBytecodeCompilationEnabled(compiler_filter_); + } + + bool IsJniCompilationEnabled() const { + return CompilerFilter::IsJniCompilationEnabled(compiler_filter_); + } + + bool IsVerificationEnabled() const { + return CompilerFilter::IsVerificationEnabled(compiler_filter_); + } + + bool NeverVerify() const { + return compiler_filter_ == CompilerFilter::kVerifyNone; + } + + bool VerifyOnlyProfile() const { + return compiler_filter_ == CompilerFilter::kVerifyProfile; + } + + size_t GetHugeMethodThreshold() const { + return huge_method_threshold_; + } + + size_t GetLargeMethodThreshold() const { + return large_method_threshold_; + } + + size_t GetSmallMethodThreshold() const { + return small_method_threshold_; + } + + size_t GetTinyMethodThreshold() const { + return tiny_method_threshold_; + } + + bool IsHugeMethod(size_t num_dalvik_instructions) const { + return num_dalvik_instructions > huge_method_threshold_; + } + + bool IsLargeMethod(size_t num_dalvik_instructions) const { + return num_dalvik_instructions > large_method_threshold_; + } + + bool IsSmallMethod(size_t num_dalvik_instructions) const { + return num_dalvik_instructions > small_method_threshold_; + } + + bool IsTinyMethod(size_t num_dalvik_instructions) const { + return num_dalvik_instructions > tiny_method_threshold_; + } + + size_t GetNumDexMethodsThreshold() const { + return num_dex_methods_threshold_; + } + + size_t GetInlineDepthLimit() const { + return inline_depth_limit_; + } + void SetInlineDepthLimit(size_t limit) { + inline_depth_limit_ = limit; + } + + size_t GetInlineMaxCodeUnits() const { + return inline_max_code_units_; + } + void SetInlineMaxCodeUnits(size_t units) { + inline_max_code_units_ = units; + } + + double GetTopKProfileThreshold() const { + return top_k_profile_threshold_; + } + + bool GetDebuggable() const { + return debuggable_; + } + + bool GetNativeDebuggable() const { + return GetDebuggable() && GetGenerateDebugInfo(); + } + + // This flag controls whether the compiler collects debugging information. + // The other flags control how the information is written to disk. + bool GenerateAnyDebugInfo() const { + return GetGenerateDebugInfo() || GetGenerateMiniDebugInfo(); + } + + bool GetGenerateDebugInfo() const { + return generate_debug_info_; + } + + bool GetGenerateMiniDebugInfo() const { + return generate_mini_debug_info_; + } + + bool GetImplicitNullChecks() const { + return implicit_null_checks_; + } + + bool GetImplicitStackOverflowChecks() const { + return implicit_so_checks_; + } + + bool GetImplicitSuspendChecks() const { + return implicit_suspend_checks_; + } + + bool GetIncludePatchInformation() const { + return include_patch_information_; + } + + // Should the code be compiled as position independent? + bool GetCompilePic() const { + return compile_pic_; + } + + bool HasVerboseMethods() const { + return verbose_methods_ != nullptr && !verbose_methods_->empty(); + } + + bool IsVerboseMethod(const std::string& pretty_method) const { + for (const std::string& cur_method : *verbose_methods_) { + if (pretty_method.find(cur_method) != std::string::npos) { + return true; + } + } + return false; + } + + std::ostream* GetInitFailureOutput() const { + return init_failure_output_.get(); + } + + bool AbortOnHardVerifierFailure() const { + return abort_on_hard_verifier_failure_; + } + + const std::vector* GetNoInlineFromDexFile() const { + return no_inline_from_; + } + + bool ParseCompilerOption(const StringPiece& option, UsageFn Usage); + + const std::string& GetDumpCfgFileName() const { + return dump_cfg_file_name_; + } + + bool GetDumpCfgAppend() const { + return dump_cfg_append_; + } + + bool IsForceDeterminism() const { + return force_determinism_; + } + + private: + void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage); + void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage); + void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage); + void ParseInlineDepthLimit(const StringPiece& option, UsageFn Usage); + void ParseNumDexMethods(const StringPiece& option, UsageFn Usage); + void ParseTinyMethodMax(const StringPiece& option, UsageFn Usage); + void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage); + void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage); + void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage); + + CompilerFilter::Filter compiler_filter_; + size_t huge_method_threshold_; + size_t large_method_threshold_; + size_t small_method_threshold_; + size_t tiny_method_threshold_; + size_t num_dex_methods_threshold_; + size_t inline_depth_limit_; + size_t inline_max_code_units_; + + // Dex files from which we should not inline code. + // This is usually a very short list (i.e. a single dex file), so we + // prefer vector<> over a lookup-oriented container, such as set<>. + const std::vector* no_inline_from_; + + bool include_patch_information_; + // When using a profile file only the top K% of the profiled samples will be compiled. + double top_k_profile_threshold_; + bool debuggable_; + bool generate_debug_info_; + bool generate_mini_debug_info_; + bool implicit_null_checks_; + bool implicit_so_checks_; + bool implicit_suspend_checks_; + bool compile_pic_; + + // Vector of methods to have verbose output enabled for. + const std::vector* verbose_methods_; + + // Abort compilation with an error if we find a class that fails verification with a hard + // failure. + bool abort_on_hard_verifier_failure_; + + // Log initialization of initialization failures to this stream if not null. + std::unique_ptr init_failure_output_; + + std::string dump_cfg_file_name_; + bool dump_cfg_append_; + + // Whether the compiler should trade performance for determinism to guarantee exactly reproducable + // outcomes. + bool force_determinism_; + + friend class Dex2Oat; + + DISALLOW_COPY_AND_ASSIGN(CompilerOptions); +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc new file mode 100644 index 000000000..b0ee448ea --- /dev/null +++ b/compiler/driver/dex_compilation_unit.cc @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_compilation_unit.h" + +#include "base/stringprintf.h" +#include "mirror/dex_cache.h" +#include "utils.h" + +namespace art { + +DexCompilationUnit::DexCompilationUnit(jobject class_loader, + ClassLinker* class_linker, + const DexFile& dex_file, + const DexFile::CodeItem* code_item, + uint16_t class_def_idx, + uint32_t method_idx, + uint32_t access_flags, + const VerifiedMethod* verified_method, + Handle dex_cache) + : class_loader_(class_loader), + class_linker_(class_linker), + dex_file_(&dex_file), + code_item_(code_item), + class_def_idx_(class_def_idx), + dex_method_idx_(method_idx), + access_flags_(access_flags), + verified_method_(verified_method), + dex_cache_(dex_cache) { +} + +const std::string& DexCompilationUnit::GetSymbol() { + if (symbol_.empty()) { + symbol_ = "dex_"; + symbol_ += MangleForJni(PrettyMethod(dex_method_idx_, *dex_file_)); + } + return symbol_; +} + +} // namespace art diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h new file mode 100644 index 000000000..854927d74 --- /dev/null +++ b/compiler/driver/dex_compilation_unit.h @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ +#define ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ + +#include + +#include "base/arena_object.h" +#include "dex_file.h" +#include "handle.h" +#include "jni.h" + +namespace art { +namespace mirror { +class ClassLoader; +class DexCache; +} // namespace mirror +class ClassLinker; +class VerifiedMethod; + +class DexCompilationUnit : public DeletableArenaObject { + public: + DexCompilationUnit(jobject class_loader, + ClassLinker* class_linker, + const DexFile& dex_file, + const DexFile::CodeItem* code_item, + uint16_t class_def_idx, + uint32_t method_idx, + uint32_t access_flags, + const VerifiedMethod* verified_method, + Handle dex_cache); + + jobject GetClassLoader() const { + return class_loader_; + } + + ClassLinker* GetClassLinker() const { + return class_linker_; + } + + const DexFile* GetDexFile() const { + return dex_file_; + } + + uint16_t GetClassDefIndex() const { + return class_def_idx_; + } + + uint32_t GetDexMethodIndex() const { + return dex_method_idx_; + } + + const DexFile::CodeItem* GetCodeItem() const { + return code_item_; + } + + const char* GetShorty() const { + const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); + return dex_file_->GetMethodShorty(method_id); + } + + const char* GetShorty(uint32_t* shorty_len) const { + const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); + return dex_file_->GetMethodShorty(method_id, shorty_len); + } + + uint32_t GetAccessFlags() const { + return access_flags_; + } + + bool IsConstructor() const { + return ((access_flags_ & kAccConstructor) != 0); + } + + bool IsNative() const { + return ((access_flags_ & kAccNative) != 0); + } + + bool IsStatic() const { + return ((access_flags_ & kAccStatic) != 0); + } + + bool IsSynchronized() const { + return ((access_flags_ & kAccSynchronized) != 0); + } + + const VerifiedMethod* GetVerifiedMethod() const { + return verified_method_; + } + + void ClearVerifiedMethod() { + verified_method_ = nullptr; + } + + const std::string& GetSymbol(); + + Handle GetDexCache() const { + return dex_cache_; + } + + private: + const jobject class_loader_; + + ClassLinker* const class_linker_; + + const DexFile* const dex_file_; + + const DexFile::CodeItem* const code_item_; + const uint16_t class_def_idx_; + const uint32_t dex_method_idx_; + const uint32_t access_flags_; + const VerifiedMethod* verified_method_; + + Handle dex_cache_; + + std::string symbol_; +}; + +} // namespace art + +#endif // ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h new file mode 100644 index 000000000..26ab28174 --- /dev/null +++ b/compiler/elf_builder.h @@ -0,0 +1,943 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_ELF_BUILDER_H_ +#define ART_COMPILER_ELF_BUILDER_H_ + +#include + +#include "arch/instruction_set.h" +#include "arch/mips/instruction_set_features_mips.h" +#include "base/bit_utils.h" +#include "base/casts.h" +#include "base/unix_file/fd_file.h" +#include "elf_utils.h" +#include "leb128.h" +#include "linker/error_delaying_output_stream.h" +#include "utils/array_ref.h" + +namespace art { + +// Writes ELF file. +// +// The basic layout of the elf file: +// Elf_Ehdr - The ELF header. +// Elf_Phdr[] - Program headers for the linker. +// .rodata - DEX files and oat metadata. +// .text - Compiled code. +// .bss - Zero-initialized writeable section. +// .MIPS.abiflags - MIPS specific section. +// .dynstr - Names for .dynsym. +// .dynsym - A few oat-specific dynamic symbols. +// .hash - Hash-table for .dynsym. +// .dynamic - Tags which let the linker locate .dynsym. +// .strtab - Names for .symtab. +// .symtab - Debug symbols. +// .eh_frame - Unwind information (CFI). +// .eh_frame_hdr - Index of .eh_frame. +// .debug_frame - Unwind information (CFI). +// .debug_frame.oat_patches - Addresses for relocation. +// .debug_info - Debug information. +// .debug_info.oat_patches - Addresses for relocation. +// .debug_abbrev - Decoding information for .debug_info. +// .debug_str - Strings for .debug_info. +// .debug_line - Line number tables. +// .debug_line.oat_patches - Addresses for relocation. +// .text.oat_patches - Addresses for relocation. +// .shstrtab - Names of ELF sections. +// Elf_Shdr[] - Section headers. +// +// Some section are optional (the debug sections in particular). +// +// We try write the section data directly into the file without much +// in-memory buffering. This means we generally write sections based on the +// dependency order (e.g. .dynamic points to .dynsym which points to .text). +// +// In the cases where we need to buffer, we write the larger section first +// and buffer the smaller one (e.g. .strtab is bigger than .symtab). +// +// The debug sections are written last for easier stripping. +// +template +class ElfBuilder FINAL { + public: + static constexpr size_t kMaxProgramHeaders = 16; + using Elf_Addr = typename ElfTypes::Addr; + using Elf_Off = typename ElfTypes::Off; + using Elf_Word = typename ElfTypes::Word; + using Elf_Sword = typename ElfTypes::Sword; + using Elf_Ehdr = typename ElfTypes::Ehdr; + using Elf_Shdr = typename ElfTypes::Shdr; + using Elf_Sym = typename ElfTypes::Sym; + using Elf_Phdr = typename ElfTypes::Phdr; + using Elf_Dyn = typename ElfTypes::Dyn; + + // Base class of all sections. + class Section : public OutputStream { + public: + Section(ElfBuilder* owner, + const std::string& name, + Elf_Word type, + Elf_Word flags, + const Section* link, + Elf_Word info, + Elf_Word align, + Elf_Word entsize) + : OutputStream(name), + owner_(owner), + header_(), + section_index_(0), + name_(name), + link_(link), + started_(false), + finished_(false), + phdr_flags_(PF_R), + phdr_type_(0) { + DCHECK_GE(align, 1u); + header_.sh_type = type; + header_.sh_flags = flags; + header_.sh_info = info; + header_.sh_addralign = align; + header_.sh_entsize = entsize; + } + + // Start writing of this section. + void Start() { + CHECK(!started_); + CHECK(!finished_); + started_ = true; + auto& sections = owner_->sections_; + // Check that the previous section is complete. + CHECK(sections.empty() || sections.back()->finished_); + // The first ELF section index is 1. Index 0 is reserved for NULL. + section_index_ = sections.size() + 1; + // Page-align if we switch between allocated and non-allocated sections, + // or if we change the type of allocation (e.g. executable vs non-executable). + if (!sections.empty()) { + if (header_.sh_flags != sections.back()->header_.sh_flags) { + header_.sh_addralign = kPageSize; + } + } + // Align file position. + if (header_.sh_type != SHT_NOBITS) { + header_.sh_offset = owner_->AlignFileOffset(header_.sh_addralign); + } else { + header_.sh_offset = 0; + } + // Align virtual memory address. + if ((header_.sh_flags & SHF_ALLOC) != 0) { + header_.sh_addr = owner_->AlignVirtualAddress(header_.sh_addralign); + } else { + header_.sh_addr = 0; + } + // Push this section on the list of written sections. + sections.push_back(this); + } + + // Finish writing of this section. + void End() { + CHECK(started_); + CHECK(!finished_); + finished_ = true; + if (header_.sh_type == SHT_NOBITS) { + CHECK_GT(header_.sh_size, 0u); + } else { + // Use the current file position to determine section size. + off_t file_offset = owner_->stream_.Seek(0, kSeekCurrent); + CHECK_GE(file_offset, (off_t)header_.sh_offset); + header_.sh_size = file_offset - header_.sh_offset; + } + if ((header_.sh_flags & SHF_ALLOC) != 0) { + owner_->virtual_address_ += header_.sh_size; + } + } + + // Get the location of this section in virtual memory. + Elf_Addr GetAddress() const { + CHECK(started_); + return header_.sh_addr; + } + + // Returns the size of the content of this section. + Elf_Word GetSize() const { + if (finished_) { + return header_.sh_size; + } else { + CHECK(started_); + CHECK_NE(header_.sh_type, (Elf_Word)SHT_NOBITS); + return owner_->stream_.Seek(0, kSeekCurrent) - header_.sh_offset; + } + } + + // Write this section as "NOBITS" section. (used for the .bss section) + // This means that the ELF file does not contain the initial data for this section + // and it will be zero-initialized when the ELF file is loaded in the running program. + void WriteNoBitsSection(Elf_Word size) { + DCHECK_NE(header_.sh_flags & SHF_ALLOC, 0u); + header_.sh_type = SHT_NOBITS; + Start(); + header_.sh_size = size; + End(); + } + + // This function always succeeds to simplify code. + // Use builder's Good() to check the actual status. + bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE { + CHECK(started_); + CHECK(!finished_); + return owner_->stream_.WriteFully(buffer, byte_count); + } + + // This function always succeeds to simplify code. + // Use builder's Good() to check the actual status. + off_t Seek(off_t offset, Whence whence) OVERRIDE { + // Forward the seek as-is and trust the caller to use it reasonably. + return owner_->stream_.Seek(offset, whence); + } + + // This function flushes the output and returns whether it succeeded. + // If there was a previous failure, this does nothing and returns false, i.e. failed. + bool Flush() OVERRIDE { + return owner_->stream_.Flush(); + } + + Elf_Word GetSectionIndex() const { + DCHECK(started_); + DCHECK_NE(section_index_, 0u); + return section_index_; + } + + private: + ElfBuilder* owner_; + Elf_Shdr header_; + Elf_Word section_index_; + const std::string name_; + const Section* const link_; + bool started_; + bool finished_; + Elf_Word phdr_flags_; + Elf_Word phdr_type_; + + friend class ElfBuilder; + + DISALLOW_COPY_AND_ASSIGN(Section); + }; + + class CachedSection : public Section { + public: + CachedSection(ElfBuilder* owner, + const std::string& name, + Elf_Word type, + Elf_Word flags, + const Section* link, + Elf_Word info, + Elf_Word align, + Elf_Word entsize) + : Section(owner, name, type, flags, link, info, align, entsize), cache_() { } + + Elf_Word Add(const void* data, size_t length) { + Elf_Word offset = cache_.size(); + const uint8_t* d = reinterpret_cast(data); + cache_.insert(cache_.end(), d, d + length); + return offset; + } + + Elf_Word GetCacheSize() { + return cache_.size(); + } + + void Write() { + this->WriteFully(cache_.data(), cache_.size()); + cache_.clear(); + cache_.shrink_to_fit(); + } + + void WriteCachedSection() { + this->Start(); + Write(); + this->End(); + } + + private: + std::vector cache_; + }; + + // Writer of .dynstr section. + class CachedStringSection FINAL : public CachedSection { + public: + CachedStringSection(ElfBuilder* owner, + const std::string& name, + Elf_Word flags, + Elf_Word align) + : CachedSection(owner, + name, + SHT_STRTAB, + flags, + /* link */ nullptr, + /* info */ 0, + align, + /* entsize */ 0) { } + + Elf_Word Add(const std::string& name) { + if (CachedSection::GetCacheSize() == 0u) { + DCHECK(name.empty()); + } + return CachedSection::Add(name.c_str(), name.length() + 1); + } + }; + + // Writer of .strtab and .shstrtab sections. + class StringSection FINAL : public Section { + public: + StringSection(ElfBuilder* owner, + const std::string& name, + Elf_Word flags, + Elf_Word align) + : Section(owner, + name, + SHT_STRTAB, + flags, + /* link */ nullptr, + /* info */ 0, + align, + /* entsize */ 0), + current_offset_(0) { + } + + Elf_Word Write(const std::string& name) { + if (current_offset_ == 0) { + DCHECK(name.empty()); + } + Elf_Word offset = current_offset_; + this->WriteFully(name.c_str(), name.length() + 1); + current_offset_ += name.length() + 1; + return offset; + } + + private: + Elf_Word current_offset_; + }; + + // Writer of .dynsym and .symtab sections. + class SymbolSection FINAL : public CachedSection { + public: + SymbolSection(ElfBuilder* owner, + const std::string& name, + Elf_Word type, + Elf_Word flags, + Section* strtab) + : CachedSection(owner, + name, + type, + flags, + strtab, + /* info */ 0, + sizeof(Elf_Off), + sizeof(Elf_Sym)) { + // The symbol table always has to start with NULL symbol. + Elf_Sym null_symbol = Elf_Sym(); + CachedSection::Add(&null_symbol, sizeof(null_symbol)); + } + + // Buffer symbol for this section. It will be written later. + // If the symbol's section is null, it will be considered absolute (SHN_ABS). + // (we use this in JIT to reference code which is stored outside the debug ELF file) + void Add(Elf_Word name, + const Section* section, + Elf_Addr addr, + Elf_Word size, + uint8_t binding, + uint8_t type) { + Elf_Word section_index; + if (section != nullptr) { + DCHECK_LE(section->GetAddress(), addr); + DCHECK_LE(addr, section->GetAddress() + section->GetSize()); + section_index = section->GetSectionIndex(); + } else { + section_index = static_cast(SHN_ABS); + } + Add(name, section_index, addr, size, binding, type); + } + + void Add(Elf_Word name, + Elf_Word section_index, + Elf_Addr addr, + Elf_Word size, + uint8_t binding, + uint8_t type) { + Elf_Sym sym = Elf_Sym(); + sym.st_name = name; + sym.st_value = addr; + sym.st_size = size; + sym.st_other = 0; + sym.st_shndx = section_index; + sym.st_info = (binding << 4) + (type & 0xf); + CachedSection::Add(&sym, sizeof(sym)); + } + }; + + class AbiflagsSection FINAL : public Section { + public: + // Section with Mips abiflag info. + static constexpr uint8_t MIPS_AFL_REG_NONE = 0; // no registers + static constexpr uint8_t MIPS_AFL_REG_32 = 1; // 32-bit registers + static constexpr uint8_t MIPS_AFL_REG_64 = 2; // 64-bit registers + static constexpr uint32_t MIPS_AFL_FLAGS1_ODDSPREG = 1; // Uses odd single-prec fp regs + static constexpr uint8_t MIPS_ABI_FP_DOUBLE = 1; // -mdouble-float + static constexpr uint8_t MIPS_ABI_FP_XX = 5; // -mfpxx + static constexpr uint8_t MIPS_ABI_FP_64A = 7; // -mips32r* -mfp64 -mno-odd-spreg + + AbiflagsSection(ElfBuilder* owner, + const std::string& name, + Elf_Word type, + Elf_Word flags, + const Section* link, + Elf_Word info, + Elf_Word align, + Elf_Word entsize, + InstructionSet isa, + const InstructionSetFeatures* features) + : Section(owner, name, type, flags, link, info, align, entsize) { + if (isa == kMips || isa == kMips64) { + bool fpu32 = false; // assume mips64 values + uint8_t isa_rev = 6; // assume mips64 values + if (isa == kMips) { + // adjust for mips32 values + fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint(); + isa_rev = features->AsMipsInstructionSetFeatures()->IsR6() + ? 6 + : features->AsMipsInstructionSetFeatures()->IsMipsIsaRevGreaterThanEqual2() + ? (fpu32 ? 2 : 5) + : 1; + } + abiflags_.version = 0; // version of flags structure + abiflags_.isa_level = (isa == kMips) ? 32 : 64; + abiflags_.isa_rev = isa_rev; + abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; + abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; + abiflags_.cpr2_size = MIPS_AFL_REG_NONE; + // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6). + // Otherwise set to MIPS_ABI_FP_DOUBLE. + abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE; + abiflags_.isa_ext = 0; + abiflags_.ases = 0; + // To keep the code simple, we are not using odd FP reg for single floats for both + // mips32 and mips64 ART. Therefore we are not setting the MIPS_AFL_FLAGS1_ODDSPREG bit. + abiflags_.flags1 = 0; + abiflags_.flags2 = 0; + } + } + + Elf_Word GetSize() const { + return sizeof(abiflags_); + } + + void Write() { + this->WriteFully(&abiflags_, sizeof(abiflags_)); + } + + private: + struct { + uint16_t version; // version of this structure + uint8_t isa_level, isa_rev, gpr_size, cpr1_size, cpr2_size; + uint8_t fp_abi; + uint32_t isa_ext, ases, flags1, flags2; + } abiflags_; + }; + + ElfBuilder(InstructionSet isa, const InstructionSetFeatures* features, OutputStream* output) + : isa_(isa), + features_(features), + stream_(output), + rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), + text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0), + bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), + dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize), + dynsym_(this, ".dynsym", SHT_DYNSYM, SHF_ALLOC, &dynstr_), + hash_(this, ".hash", SHT_HASH, SHF_ALLOC, &dynsym_, 0, sizeof(Elf_Word), sizeof(Elf_Word)), + dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)), + eh_frame_(this, ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), + eh_frame_hdr_(this, ".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0), + strtab_(this, ".strtab", 0, 1), + symtab_(this, ".symtab", SHT_SYMTAB, 0, &strtab_), + debug_frame_(this, ".debug_frame", SHT_PROGBITS, 0, nullptr, 0, sizeof(Elf_Addr), 0), + debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0), + debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0), + shstrtab_(this, ".shstrtab", 0, 1), + abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0, + isa, features), + started_(false), + write_program_headers_(false), + loaded_size_(0u), + virtual_address_(0) { + text_.phdr_flags_ = PF_R | PF_X; + bss_.phdr_flags_ = PF_R | PF_W; + dynamic_.phdr_flags_ = PF_R | PF_W; + dynamic_.phdr_type_ = PT_DYNAMIC; + eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME; + abiflags_.phdr_type_ = PT_MIPS_ABIFLAGS; + } + ~ElfBuilder() {} + + InstructionSet GetIsa() { return isa_; } + Section* GetRoData() { return &rodata_; } + Section* GetText() { return &text_; } + Section* GetBss() { return &bss_; } + StringSection* GetStrTab() { return &strtab_; } + SymbolSection* GetSymTab() { return &symtab_; } + Section* GetEhFrame() { return &eh_frame_; } + Section* GetEhFrameHdr() { return &eh_frame_hdr_; } + Section* GetDebugFrame() { return &debug_frame_; } + Section* GetDebugInfo() { return &debug_info_; } + Section* GetDebugLine() { return &debug_line_; } + + // Encode patch locations as LEB128 list of deltas between consecutive addresses. + // (exposed publicly for tests) + static void EncodeOatPatches(const ArrayRef& locations, + std::vector* buffer) { + buffer->reserve(buffer->size() + locations.size() * 2); // guess 2 bytes per ULEB128. + uintptr_t address = 0; // relative to start of section. + for (uintptr_t location : locations) { + DCHECK_GE(location, address) << "Patch locations are not in sorted order"; + EncodeUnsignedLeb128(buffer, dchecked_integral_cast(location - address)); + address = location; + } + } + + void WritePatches(const char* name, const ArrayRef& patch_locations) { + std::vector buffer; + EncodeOatPatches(patch_locations, &buffer); + std::unique_ptr
s(new Section(this, name, SHT_OAT_PATCH, 0, nullptr, 0, 1, 0)); + s->Start(); + s->WriteFully(buffer.data(), buffer.size()); + s->End(); + other_sections_.push_back(std::move(s)); + } + + void WriteSection(const char* name, const std::vector* buffer) { + std::unique_ptr
s(new Section(this, name, SHT_PROGBITS, 0, nullptr, 0, 1, 0)); + s->Start(); + s->WriteFully(buffer->data(), buffer->size()); + s->End(); + other_sections_.push_back(std::move(s)); + } + + // Reserve space for ELF header and program headers. + // We do not know the number of headers until later, so + // it is easiest to just reserve a fixed amount of space. + // Program headers are required for loading by the linker. + // It is possible to omit them for ELF files used for debugging. + void Start(bool write_program_headers = true) { + int size = sizeof(Elf_Ehdr); + if (write_program_headers) { + size += sizeof(Elf_Phdr) * kMaxProgramHeaders; + } + stream_.Seek(size, kSeekSet); + started_ = true; + virtual_address_ += size; + write_program_headers_ = write_program_headers; + } + + void End() { + DCHECK(started_); + + // Note: loaded_size_ == 0 for tests that don't write .rodata, .text, .bss, + // .dynstr, dynsym, .hash and .dynamic. These tests should not read loaded_size_. + // TODO: Either refactor the .eh_frame creation so that it counts towards loaded_size_, + // or remove all support for .eh_frame. (The currently unused .eh_frame counts towards + // the virtual_address_ but we don't consider it for loaded_size_.) + CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize)) + << loaded_size_ << " " << virtual_address_; + + // Write section names and finish the section headers. + shstrtab_.Start(); + shstrtab_.Write(""); + for (auto* section : sections_) { + section->header_.sh_name = shstrtab_.Write(section->name_); + if (section->link_ != nullptr) { + section->header_.sh_link = section->link_->GetSectionIndex(); + } + } + shstrtab_.End(); + + // Write section headers at the end of the ELF file. + std::vector shdrs; + shdrs.reserve(1u + sections_.size()); + shdrs.push_back(Elf_Shdr()); // NULL at index 0. + for (auto* section : sections_) { + shdrs.push_back(section->header_); + } + Elf_Off section_headers_offset; + section_headers_offset = AlignFileOffset(sizeof(Elf_Off)); + stream_.WriteFully(shdrs.data(), shdrs.size() * sizeof(shdrs[0])); + + // Flush everything else before writing the program headers. This should prevent + // the OS from reordering writes, so that we don't end up with valid headers + // and partially written data if we suddenly lose power, for example. + stream_.Flush(); + + // The main ELF header. + Elf_Ehdr elf_header = MakeElfHeader(isa_, features_); + elf_header.e_shoff = section_headers_offset; + elf_header.e_shnum = shdrs.size(); + elf_header.e_shstrndx = shstrtab_.GetSectionIndex(); + + // Program headers (i.e. mmap instructions). + std::vector phdrs; + if (write_program_headers_) { + phdrs = MakeProgramHeaders(); + CHECK_LE(phdrs.size(), kMaxProgramHeaders); + elf_header.e_phoff = sizeof(Elf_Ehdr); + elf_header.e_phnum = phdrs.size(); + } + + stream_.Seek(0, kSeekSet); + stream_.WriteFully(&elf_header, sizeof(elf_header)); + stream_.WriteFully(phdrs.data(), phdrs.size() * sizeof(phdrs[0])); + stream_.Flush(); + } + + // The running program does not have access to section headers + // and the loader is not supposed to use them either. + // The dynamic sections therefore replicates some of the layout + // information like the address and size of .rodata and .text. + // It also contains other metadata like the SONAME. + // The .dynamic section is found using the PT_DYNAMIC program header. + void PrepareDynamicSection(const std::string& elf_file_path, + Elf_Word rodata_size, + Elf_Word text_size, + Elf_Word bss_size) { + std::string soname(elf_file_path); + size_t directory_separator_pos = soname.rfind('/'); + if (directory_separator_pos != std::string::npos) { + soname = soname.substr(directory_separator_pos + 1); + } + + // Calculate addresses of .text, .bss and .dynstr. + DCHECK_EQ(rodata_.header_.sh_addralign, static_cast(kPageSize)); + DCHECK_EQ(text_.header_.sh_addralign, static_cast(kPageSize)); + DCHECK_EQ(bss_.header_.sh_addralign, static_cast(kPageSize)); + DCHECK_EQ(dynstr_.header_.sh_addralign, static_cast(kPageSize)); + Elf_Word rodata_address = rodata_.GetAddress(); + Elf_Word text_address = RoundUp(rodata_address + rodata_size, kPageSize); + Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize); + Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize); + Elf_Word abiflags_size = 0; + if (isa_ == kMips || isa_ == kMips64) { + abiflags_size = abiflags_.GetSize(); + } + Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize); + + // Cache .dynstr, .dynsym and .hash data. + dynstr_.Add(""); // dynstr should start with empty string. + Elf_Word rodata_index = rodata_.GetSectionIndex(); + Elf_Word oatdata = dynstr_.Add("oatdata"); + dynsym_.Add(oatdata, rodata_index, rodata_address, rodata_size, STB_GLOBAL, STT_OBJECT); + if (text_size != 0u) { + Elf_Word text_index = rodata_index + 1u; + Elf_Word oatexec = dynstr_.Add("oatexec"); + dynsym_.Add(oatexec, text_index, text_address, text_size, STB_GLOBAL, STT_OBJECT); + Elf_Word oatlastword = dynstr_.Add("oatlastword"); + Elf_Word oatlastword_address = text_address + text_size - 4; + dynsym_.Add(oatlastword, text_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); + } else if (rodata_size != 0) { + // rodata_ can be size 0 for dwarf_test. + Elf_Word oatlastword = dynstr_.Add("oatlastword"); + Elf_Word oatlastword_address = rodata_address + rodata_size - 4; + dynsym_.Add(oatlastword, rodata_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); + } + if (bss_size != 0u) { + Elf_Word bss_index = rodata_index + 1u + (text_size != 0 ? 1u : 0u); + Elf_Word oatbss = dynstr_.Add("oatbss"); + dynsym_.Add(oatbss, bss_index, bss_address, bss_size, STB_GLOBAL, STT_OBJECT); + Elf_Word oatbsslastword = dynstr_.Add("oatbsslastword"); + Elf_Word bsslastword_address = bss_address + bss_size - 4; + dynsym_.Add(oatbsslastword, bss_index, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT); + } + Elf_Word soname_offset = dynstr_.Add(soname); + + // We do not really need a hash-table since there is so few entries. + // However, the hash-table is the only way the linker can actually + // determine the number of symbols in .dynsym so it is required. + int count = dynsym_.GetCacheSize() / sizeof(Elf_Sym); // Includes NULL. + std::vector hash; + hash.push_back(1); // Number of buckets. + hash.push_back(count); // Number of chains. + // Buckets. Having just one makes it linear search. + hash.push_back(1); // Point to first non-NULL symbol. + // Chains. This creates linked list of symbols. + hash.push_back(0); // Dummy entry for the NULL symbol. + for (int i = 1; i < count - 1; i++) { + hash.push_back(i + 1); // Each symbol points to the next one. + } + hash.push_back(0); // Last symbol terminates the chain. + hash_.Add(hash.data(), hash.size() * sizeof(hash[0])); + + // Calculate addresses of .dynsym, .hash and .dynamic. + DCHECK_EQ(dynstr_.header_.sh_flags, dynsym_.header_.sh_flags); + DCHECK_EQ(dynsym_.header_.sh_flags, hash_.header_.sh_flags); + Elf_Word dynsym_address = + RoundUp(dynstr_address + dynstr_.GetCacheSize(), dynsym_.header_.sh_addralign); + Elf_Word hash_address = + RoundUp(dynsym_address + dynsym_.GetCacheSize(), hash_.header_.sh_addralign); + DCHECK_EQ(dynamic_.header_.sh_addralign, static_cast(kPageSize)); + Elf_Word dynamic_address = RoundUp(hash_address + dynsym_.GetCacheSize(), kPageSize); + + Elf_Dyn dyns[] = { + { DT_HASH, { hash_address } }, + { DT_STRTAB, { dynstr_address } }, + { DT_SYMTAB, { dynsym_address } }, + { DT_SYMENT, { sizeof(Elf_Sym) } }, + { DT_STRSZ, { dynstr_.GetCacheSize() } }, + { DT_SONAME, { soname_offset } }, + { DT_NULL, { 0 } }, + }; + dynamic_.Add(&dyns, sizeof(dyns)); + + loaded_size_ = RoundUp(dynamic_address + dynamic_.GetCacheSize(), kPageSize); + } + + void WriteDynamicSection() { + dynstr_.WriteCachedSection(); + dynsym_.WriteCachedSection(); + hash_.WriteCachedSection(); + dynamic_.WriteCachedSection(); + + CHECK_EQ(loaded_size_, RoundUp(dynamic_.GetAddress() + dynamic_.GetSize(), kPageSize)); + } + + Elf_Word GetLoadedSize() { + CHECK_NE(loaded_size_, 0u); + return loaded_size_; + } + + void WriteMIPSabiflagsSection() { + abiflags_.Start(); + abiflags_.Write(); + abiflags_.End(); + } + + // Returns true if all writes and seeks on the output stream succeeded. + bool Good() { + return stream_.Good(); + } + + // Returns the builder's internal stream. + OutputStream* GetStream() { + return &stream_; + } + + off_t AlignFileOffset(size_t alignment) { + return stream_.Seek(RoundUp(stream_.Seek(0, kSeekCurrent), alignment), kSeekSet); + } + + Elf_Addr AlignVirtualAddress(size_t alignment) { + return virtual_address_ = RoundUp(virtual_address_, alignment); + } + + private: + static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) { + Elf_Ehdr elf_header = Elf_Ehdr(); + switch (isa) { + case kArm: + // Fall through. + case kThumb2: { + elf_header.e_machine = EM_ARM; + elf_header.e_flags = EF_ARM_EABI_VER5; + break; + } + case kArm64: { + elf_header.e_machine = EM_AARCH64; + elf_header.e_flags = 0; + break; + } + case kX86: { + elf_header.e_machine = EM_386; + elf_header.e_flags = 0; + break; + } + case kX86_64: { + elf_header.e_machine = EM_X86_64; + elf_header.e_flags = 0; + break; + } + case kMips: { + elf_header.e_machine = EM_MIPS; + elf_header.e_flags = (EF_MIPS_NOREORDER | + EF_MIPS_PIC | + EF_MIPS_CPIC | + EF_MIPS_ABI_O32 | + features->AsMipsInstructionSetFeatures()->IsR6() + ? EF_MIPS_ARCH_32R6 + : EF_MIPS_ARCH_32R2); + break; + } + case kMips64: { + elf_header.e_machine = EM_MIPS; + elf_header.e_flags = (EF_MIPS_NOREORDER | + EF_MIPS_PIC | + EF_MIPS_CPIC | + EF_MIPS_ARCH_64R6); + break; + } + case kNone: { + LOG(FATAL) << "No instruction set"; + break; + } + default: { + LOG(FATAL) << "Unknown instruction set " << isa; + } + } + + elf_header.e_ident[EI_MAG0] = ELFMAG0; + elf_header.e_ident[EI_MAG1] = ELFMAG1; + elf_header.e_ident[EI_MAG2] = ELFMAG2; + elf_header.e_ident[EI_MAG3] = ELFMAG3; + elf_header.e_ident[EI_CLASS] = (sizeof(Elf_Addr) == sizeof(Elf32_Addr)) + ? ELFCLASS32 : ELFCLASS64;; + elf_header.e_ident[EI_DATA] = ELFDATA2LSB; + elf_header.e_ident[EI_VERSION] = EV_CURRENT; + elf_header.e_ident[EI_OSABI] = ELFOSABI_LINUX; + elf_header.e_ident[EI_ABIVERSION] = 0; + elf_header.e_type = ET_DYN; + elf_header.e_version = 1; + elf_header.e_entry = 0; + elf_header.e_ehsize = sizeof(Elf_Ehdr); + elf_header.e_phentsize = sizeof(Elf_Phdr); + elf_header.e_shentsize = sizeof(Elf_Shdr); + elf_header.e_phoff = sizeof(Elf_Ehdr); + return elf_header; + } + + // Create program headers based on written sections. + std::vector MakeProgramHeaders() { + CHECK(!sections_.empty()); + std::vector phdrs; + { + // The program headers must start with PT_PHDR which is used in + // loaded process to determine the number of program headers. + Elf_Phdr phdr = Elf_Phdr(); + phdr.p_type = PT_PHDR; + phdr.p_flags = PF_R; + phdr.p_offset = phdr.p_vaddr = phdr.p_paddr = sizeof(Elf_Ehdr); + phdr.p_filesz = phdr.p_memsz = 0; // We need to fill this later. + phdr.p_align = sizeof(Elf_Off); + phdrs.push_back(phdr); + // Tell the linker to mmap the start of file to memory. + Elf_Phdr load = Elf_Phdr(); + load.p_type = PT_LOAD; + load.p_flags = PF_R; + load.p_offset = load.p_vaddr = load.p_paddr = 0; + load.p_filesz = load.p_memsz = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * kMaxProgramHeaders; + load.p_align = kPageSize; + phdrs.push_back(load); + } + // Create program headers for sections. + for (auto* section : sections_) { + const Elf_Shdr& shdr = section->header_; + if ((shdr.sh_flags & SHF_ALLOC) != 0 && shdr.sh_size != 0) { + // PT_LOAD tells the linker to mmap part of the file. + // The linker can only mmap page-aligned sections. + // Single PT_LOAD may contain several ELF sections. + Elf_Phdr& prev = phdrs.back(); + Elf_Phdr load = Elf_Phdr(); + load.p_type = PT_LOAD; + load.p_flags = section->phdr_flags_; + load.p_offset = shdr.sh_offset; + load.p_vaddr = load.p_paddr = shdr.sh_addr; + load.p_filesz = (shdr.sh_type != SHT_NOBITS ? shdr.sh_size : 0u); + load.p_memsz = shdr.sh_size; + load.p_align = shdr.sh_addralign; + if (prev.p_type == load.p_type && + prev.p_flags == load.p_flags && + prev.p_filesz == prev.p_memsz && // Do not merge .bss + load.p_filesz == load.p_memsz) { // Do not merge .bss + // Merge this PT_LOAD with the previous one. + Elf_Word size = shdr.sh_offset + shdr.sh_size - prev.p_offset; + prev.p_filesz = size; + prev.p_memsz = size; + } else { + // If we are adding new load, it must be aligned. + CHECK_EQ(shdr.sh_addralign, (Elf_Word)kPageSize); + phdrs.push_back(load); + } + } + } + for (auto* section : sections_) { + const Elf_Shdr& shdr = section->header_; + if ((shdr.sh_flags & SHF_ALLOC) != 0 && shdr.sh_size != 0) { + // Other PT_* types allow the program to locate interesting + // parts of memory at runtime. They must overlap with PT_LOAD. + if (section->phdr_type_ != 0) { + Elf_Phdr phdr = Elf_Phdr(); + phdr.p_type = section->phdr_type_; + phdr.p_flags = section->phdr_flags_; + phdr.p_offset = shdr.sh_offset; + phdr.p_vaddr = phdr.p_paddr = shdr.sh_addr; + phdr.p_filesz = phdr.p_memsz = shdr.sh_size; + phdr.p_align = shdr.sh_addralign; + phdrs.push_back(phdr); + } + } + } + // Set the size of the initial PT_PHDR. + CHECK_EQ(phdrs[0].p_type, (Elf_Word)PT_PHDR); + phdrs[0].p_filesz = phdrs[0].p_memsz = phdrs.size() * sizeof(Elf_Phdr); + + return phdrs; + } + + InstructionSet isa_; + const InstructionSetFeatures* features_; + + ErrorDelayingOutputStream stream_; + + Section rodata_; + Section text_; + Section bss_; + CachedStringSection dynstr_; + SymbolSection dynsym_; + CachedSection hash_; + CachedSection dynamic_; + Section eh_frame_; + Section eh_frame_hdr_; + StringSection strtab_; + SymbolSection symtab_; + Section debug_frame_; + Section debug_info_; + Section debug_line_; + StringSection shstrtab_; + AbiflagsSection abiflags_; + std::vector> other_sections_; + + // List of used section in the order in which they were written. + std::vector sections_; + + bool started_; + bool write_program_headers_; + + // The size of the memory taken by the ELF file when loaded. + size_t loaded_size_; + + // Used for allocation of virtual address space. + Elf_Addr virtual_address_; + + DISALLOW_COPY_AND_ASSIGN(ElfBuilder); +}; + +} // namespace art + +#endif // ART_COMPILER_ELF_BUILDER_H_ diff --git a/compiler/elf_writer.cc b/compiler/elf_writer.cc new file mode 100644 index 000000000..ca0869a83 --- /dev/null +++ b/compiler/elf_writer.cc @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "elf_writer.h" + +#include "art_method-inl.h" +#include "base/unix_file/fd_file.h" +#include "class_linker.h" +#include "dex_file-inl.h" +#include "dex_method_iterator.h" +#include "driver/compiler_driver.h" +#include "elf_file.h" +#include "invoke_type.h" +#include "mirror/object-inl.h" +#include "oat.h" +#include "scoped_thread_state_change.h" + +namespace art { + +uintptr_t ElfWriter::GetOatDataAddress(ElfFile* elf_file) { + uintptr_t oatdata_address = elf_file->FindSymbolAddress(SHT_DYNSYM, + "oatdata", + false); + CHECK_NE(0U, oatdata_address); + return oatdata_address; +} + +void ElfWriter::GetOatElfInformation(File* file, + size_t* oat_loaded_size, + size_t* oat_data_offset) { + std::string error_msg; + std::unique_ptr elf_file(ElfFile::Open(file, + false, + false, + /*low_4gb*/false, + &error_msg)); + CHECK(elf_file.get() != nullptr) << error_msg; + + bool success = elf_file->GetLoadedSize(oat_loaded_size, &error_msg); + CHECK(success) << error_msg; + CHECK_NE(0U, *oat_loaded_size); + *oat_data_offset = GetOatDataAddress(elf_file.get()); + CHECK_NE(0U, *oat_data_offset); +} + +bool ElfWriter::Fixup(File* file, uintptr_t oat_data_begin) { + std::string error_msg; + std::unique_ptr elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, &error_msg)); + CHECK(elf_file.get() != nullptr) << error_msg; + + // Lookup "oatdata" symbol address. + uintptr_t oatdata_address = ElfWriter::GetOatDataAddress(elf_file.get()); + uintptr_t base_address = oat_data_begin - oatdata_address; + + return elf_file->Fixup(base_address); +} + +} // namespace art diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h new file mode 100644 index 000000000..c9ea0083d --- /dev/null +++ b/compiler/elf_writer.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_ELF_WRITER_H_ +#define ART_COMPILER_ELF_WRITER_H_ + +#include +#include +#include +#include + +#include "base/macros.h" +#include "base/mutex.h" +#include "os.h" +#include "utils/array_ref.h" + +namespace art { + +class ElfFile; +class OutputStream; + +namespace debug { +struct MethodDebugInfo; +} // namespace debug + +class ElfWriter { + public: + // Looks up information about location of oat file in elf file container. + // Used for ImageWriter to perform memory layout. + static void GetOatElfInformation(File* file, + size_t* oat_loaded_size, + size_t* oat_data_offset); + + // Returns runtime oat_data runtime address for an opened ElfFile. + static uintptr_t GetOatDataAddress(ElfFile* elf_file); + + static bool Fixup(File* file, uintptr_t oat_data_begin); + + virtual ~ElfWriter() {} + + virtual void Start() = 0; + virtual void SetLoadedSectionSizes(size_t rodata_size, size_t text_size, size_t bss_size) = 0; + virtual void PrepareDebugInfo(const ArrayRef& method_infos) = 0; + virtual OutputStream* StartRoData() = 0; + virtual void EndRoData(OutputStream* rodata) = 0; + virtual OutputStream* StartText() = 0; + virtual void EndText(OutputStream* text) = 0; + virtual void WriteDynamicSection() = 0; + virtual void WriteDebugInfo(const ArrayRef& method_infos) = 0; + virtual void WritePatchLocations(const ArrayRef& patch_locations) = 0; + virtual bool End() = 0; + + // Get the ELF writer's stream. This stream can be used for writing data directly + // to a section after the section has been finished. When that's done, the user + // should Seek() back to the position where the stream was before this operation. + virtual OutputStream* GetStream() = 0; + + // Get the size that the loaded ELF file will occupy in memory. + virtual size_t GetLoadedSize() = 0; + + protected: + ElfWriter() = default; +}; + +} // namespace art + +#endif // ART_COMPILER_ELF_WRITER_H_ diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc new file mode 100644 index 000000000..bed864b53 --- /dev/null +++ b/compiler/elf_writer_quick.cc @@ -0,0 +1,288 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "elf_writer_quick.h" + +#include +#include + +#include "base/casts.h" +#include "base/logging.h" +#include "base/stl_util.h" +#include "compiled_method.h" +#include "debug/elf_debug_writer.h" +#include "debug/method_debug_info.h" +#include "driver/compiler_options.h" +#include "elf.h" +#include "elf_builder.h" +#include "elf_utils.h" +#include "globals.h" +#include "leb128.h" +#include "linker/buffered_output_stream.h" +#include "linker/file_output_stream.h" +#include "thread-inl.h" +#include "thread_pool.h" +#include "utils.h" + +namespace art { + +// .eh_frame and .debug_frame are almost identical. +// Except for some minor formatting differences, the main difference +// is that .eh_frame is allocated within the running program because +// it is used by C++ exception handling (which we do not use so we +// can choose either). C++ compilers generally tend to use .eh_frame +// because if they need it sometimes, they might as well always use it. +// Let's use .debug_frame because it is easier to strip or compress. +constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT; + +class DebugInfoTask : public Task { + public: + DebugInfoTask(InstructionSet isa, + const InstructionSetFeatures* features, + size_t rodata_section_size, + size_t text_section_size, + const ArrayRef& method_infos) + : isa_(isa), + instruction_set_features_(features), + rodata_section_size_(rodata_section_size), + text_section_size_(text_section_size), + method_infos_(method_infos) { + } + + void Run(Thread*) { + result_ = debug::MakeMiniDebugInfo(isa_, + instruction_set_features_, + rodata_section_size_, + text_section_size_, + method_infos_); + } + + std::vector* GetResult() { + return &result_; + } + + private: + InstructionSet isa_; + const InstructionSetFeatures* instruction_set_features_; + size_t rodata_section_size_; + size_t text_section_size_; + const ArrayRef& method_infos_; + std::vector result_; +}; + +template +class ElfWriterQuick FINAL : public ElfWriter { + public: + ElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, + const CompilerOptions* compiler_options, + File* elf_file); + ~ElfWriterQuick(); + + void Start() OVERRIDE; + void SetLoadedSectionSizes(size_t rodata_size, size_t text_size, size_t bss_size) OVERRIDE; + void PrepareDebugInfo(const ArrayRef& method_infos) OVERRIDE; + OutputStream* StartRoData() OVERRIDE; + void EndRoData(OutputStream* rodata) OVERRIDE; + OutputStream* StartText() OVERRIDE; + void EndText(OutputStream* text) OVERRIDE; + void WriteDynamicSection() OVERRIDE; + void WriteDebugInfo(const ArrayRef& method_infos) OVERRIDE; + void WritePatchLocations(const ArrayRef& patch_locations) OVERRIDE; + bool End() OVERRIDE; + + virtual OutputStream* GetStream() OVERRIDE; + + size_t GetLoadedSize() OVERRIDE; + + static void EncodeOatPatches(const std::vector& locations, + std::vector* buffer); + + private: + const InstructionSetFeatures* instruction_set_features_; + const CompilerOptions* const compiler_options_; + File* const elf_file_; + size_t rodata_size_; + size_t text_size_; + size_t bss_size_; + std::unique_ptr output_stream_; + std::unique_ptr> builder_; + std::unique_ptr debug_info_task_; + std::unique_ptr debug_info_thread_pool_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick); +}; + +std::unique_ptr CreateElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, + const CompilerOptions* compiler_options, + File* elf_file) { + if (Is64BitInstructionSet(instruction_set)) { + return MakeUnique>(instruction_set, + features, + compiler_options, + elf_file); + } else { + return MakeUnique>(instruction_set, + features, + compiler_options, + elf_file); + } +} + +template +ElfWriterQuick::ElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, + const CompilerOptions* compiler_options, + File* elf_file) + : ElfWriter(), + instruction_set_features_(features), + compiler_options_(compiler_options), + elf_file_(elf_file), + rodata_size_(0u), + text_size_(0u), + bss_size_(0u), + output_stream_(MakeUnique(MakeUnique(elf_file))), + builder_(new ElfBuilder(instruction_set, features, output_stream_.get())) {} + +template +ElfWriterQuick::~ElfWriterQuick() {} + +template +void ElfWriterQuick::Start() { + builder_->Start(); +} + +template +void ElfWriterQuick::SetLoadedSectionSizes(size_t rodata_size, + size_t text_size, + size_t bss_size) { + DCHECK_EQ(rodata_size_, 0u); + rodata_size_ = rodata_size; + DCHECK_EQ(text_size_, 0u); + text_size_ = text_size; + DCHECK_EQ(bss_size_, 0u); + bss_size_ = bss_size; + builder_->PrepareDynamicSection(elf_file_->GetPath(), rodata_size_, text_size_, bss_size_); +} + +template +OutputStream* ElfWriterQuick::StartRoData() { + auto* rodata = builder_->GetRoData(); + rodata->Start(); + return rodata; +} + +template +void ElfWriterQuick::EndRoData(OutputStream* rodata) { + CHECK_EQ(builder_->GetRoData(), rodata); + builder_->GetRoData()->End(); +} + +template +OutputStream* ElfWriterQuick::StartText() { + auto* text = builder_->GetText(); + text->Start(); + return text; +} + +template +void ElfWriterQuick::EndText(OutputStream* text) { + CHECK_EQ(builder_->GetText(), text); + builder_->GetText()->End(); +} + +template +void ElfWriterQuick::WriteDynamicSection() { + if (bss_size_ != 0u) { + builder_->GetBss()->WriteNoBitsSection(bss_size_); + } + if (builder_->GetIsa() == kMips || builder_->GetIsa() == kMips64) { + builder_->WriteMIPSabiflagsSection(); + } + builder_->WriteDynamicSection(); +} + +template +void ElfWriterQuick::PrepareDebugInfo( + const ArrayRef& method_infos) { + if (!method_infos.empty() && compiler_options_->GetGenerateMiniDebugInfo()) { + // Prepare the mini-debug-info in background while we do other I/O. + Thread* self = Thread::Current(); + debug_info_task_ = std::unique_ptr( + new DebugInfoTask(builder_->GetIsa(), + instruction_set_features_, + rodata_size_, + text_size_, + method_infos)); + debug_info_thread_pool_ = std::unique_ptr( + new ThreadPool("Mini-debug-info writer", 1)); + debug_info_thread_pool_->AddTask(self, debug_info_task_.get()); + debug_info_thread_pool_->StartWorkers(self); + } +} + +template +void ElfWriterQuick::WriteDebugInfo( + const ArrayRef& method_infos) { + if (!method_infos.empty()) { + if (compiler_options_->GetGenerateDebugInfo()) { + // Generate all the debug information we can. + debug::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat, true /* write_oat_patches */); + } + if (compiler_options_->GetGenerateMiniDebugInfo()) { + // Wait for the mini-debug-info generation to finish and write it to disk. + Thread* self = Thread::Current(); + DCHECK(debug_info_thread_pool_ != nullptr); + debug_info_thread_pool_->Wait(self, true, false); + builder_->WriteSection(".gnu_debugdata", debug_info_task_->GetResult()); + } + } +} + +template +void ElfWriterQuick::WritePatchLocations( + const ArrayRef& patch_locations) { + // Add relocation section for .text. + if (compiler_options_->GetIncludePatchInformation()) { + // Note that ElfWriter::Fixup will be called regardless and therefore + // we need to include oat_patches for debug sections unconditionally. + builder_->WritePatches(".text.oat_patches", patch_locations); + } +} + +template +bool ElfWriterQuick::End() { + builder_->End(); + + return builder_->Good(); +} + +template +OutputStream* ElfWriterQuick::GetStream() { + return builder_->GetStream(); +} + +template +size_t ElfWriterQuick::GetLoadedSize() { + return builder_->GetLoadedSize(); +} + +// Explicit instantiations +template class ElfWriterQuick; +template class ElfWriterQuick; + +} // namespace art diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h new file mode 100644 index 000000000..3d5dd39a6 --- /dev/null +++ b/compiler/elf_writer_quick.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_ELF_WRITER_QUICK_H_ +#define ART_COMPILER_ELF_WRITER_QUICK_H_ + +#include + +#include "arch/instruction_set.h" +#include "elf_writer.h" +#include "os.h" + +namespace art { + +class CompilerOptions; +class InstructionSetFeatures; + +std::unique_ptr CreateElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, + const CompilerOptions* compiler_options, + File* elf_file); + +} // namespace art + +#endif // ART_COMPILER_ELF_WRITER_QUICK_H_ diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc new file mode 100644 index 000000000..a768e0f64 --- /dev/null +++ b/compiler/elf_writer_test.cc @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "elf_file.h" + +#include "base/stringprintf.h" +#include "base/unix_file/fd_file.h" +#include "common_compiler_test.h" +#include "elf_file.h" +#include "elf_file_impl.h" +#include "elf_builder.h" +#include "elf_writer_quick.h" +#include "oat.h" +#include "utils.h" + +namespace art { + +class ElfWriterTest : public CommonCompilerTest { + protected: + virtual void SetUp() { + ReserveImageSpace(); + CommonCompilerTest::SetUp(); + } +}; + +#define EXPECT_ELF_FILE_ADDRESS(ef, expected_value, symbol_name, build_map) \ + do { \ + void* addr = reinterpret_cast(ef->FindSymbolAddress(SHT_DYNSYM, \ + symbol_name, \ + build_map)); \ + EXPECT_NE(nullptr, addr); \ + EXPECT_LT(static_cast(ART_BASE_ADDRESS), reinterpret_cast(addr)); \ + if (expected_value == nullptr) { \ + expected_value = addr; \ + } \ + EXPECT_EQ(expected_value, addr); \ + EXPECT_EQ(expected_value, ef->FindDynamicSymbolAddress(symbol_name)); \ + } while (false) + +TEST_F(ElfWriterTest, dlsym) { + std::string elf_location = GetCoreOatLocation(); + std::string elf_filename = GetSystemImageFilename(elf_location.c_str(), kRuntimeISA); + LOG(INFO) << "elf_filename=" << elf_filename; + + UnreserveImageSpace(); + void* dl_oatdata = nullptr; + void* dl_oatexec = nullptr; + void* dl_oatlastword = nullptr; + + std::unique_ptr file(OS::OpenFileForReading(elf_filename.c_str())); + ASSERT_TRUE(file.get() != nullptr); + { + std::string error_msg; + std::unique_ptr ef(ElfFile::Open(file.get(), + false, + false, + /*low_4gb*/false, + &error_msg)); + CHECK(ef.get() != nullptr) << error_msg; + EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false); + EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", false); + EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", false); + } + { + std::string error_msg; + std::unique_ptr ef(ElfFile::Open(file.get(), + false, + false, + /*low_4gb*/false, + &error_msg)); + CHECK(ef.get() != nullptr) << error_msg; + EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true); + EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", true); + EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", true); + } + { + std::string error_msg; + std::unique_ptr ef(ElfFile::Open(file.get(), + false, + true, + /*low_4gb*/false, + &error_msg)); + CHECK(ef.get() != nullptr) << error_msg; + CHECK(ef->Load(file.get(), false, /*low_4gb*/false, &error_msg)) << error_msg; + EXPECT_EQ(dl_oatdata, ef->FindDynamicSymbolAddress("oatdata")); + EXPECT_EQ(dl_oatexec, ef->FindDynamicSymbolAddress("oatexec")); + EXPECT_EQ(dl_oatlastword, ef->FindDynamicSymbolAddress("oatlastword")); + } +} + +TEST_F(ElfWriterTest, EncodeDecodeOatPatches) { + const std::vector> test_data { + { 0, 4, 8, 15, 128, 200 }, + { 8, 8 + 127 }, + { 8, 8 + 128 }, + { }, + }; + for (const auto& patch_locations : test_data) { + constexpr int32_t delta = 0x11235813; + + // Encode patch locations. + std::vector oat_patches; + ElfBuilder::EncodeOatPatches(ArrayRef(patch_locations), + &oat_patches); + + // Create buffer to be patched. + std::vector initial_data(256); + for (size_t i = 0; i < initial_data.size(); i++) { + initial_data[i] = i; + } + + // Patch manually. + std::vector expected = initial_data; + for (uintptr_t location : patch_locations) { + typedef __attribute__((__aligned__(1))) uint32_t UnalignedAddress; + *reinterpret_cast(expected.data() + location) += delta; + } + + // Decode and apply patch locations. + std::vector actual = initial_data; + ElfFileImpl32::ApplyOatPatches( + oat_patches.data(), oat_patches.data() + oat_patches.size(), delta, + actual.data(), actual.data() + actual.size()); + + EXPECT_EQ(expected, actual); + } +} + +} // namespace art diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc new file mode 100644 index 000000000..38ac05283 --- /dev/null +++ b/compiler/exception_test.cc @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "base/arena_allocator.h" +#include "class_linker.h" +#include "common_runtime_test.h" +#include "dex_file.h" +#include "dex_file-inl.h" +#include "gtest/gtest.h" +#include "leb128.h" +#include "mirror/class-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "mirror/stack_trace_element.h" +#include "oat_quick_method_header.h" +#include "optimizing/stack_map_stream.h" +#include "runtime.h" +#include "scoped_thread_state_change.h" +#include "handle_scope-inl.h" +#include "thread.h" + +namespace art { + +class ExceptionTest : public CommonRuntimeTest { + protected: + virtual void SetUp() { + CommonRuntimeTest::SetUp(); + + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<2> hs(soa.Self()); + Handle class_loader( + hs.NewHandle(soa.Decode(LoadDex("ExceptionHandle")))); + my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader); + ASSERT_TRUE(my_klass_ != nullptr); + Handle klass(hs.NewHandle(my_klass_)); + class_linker_->EnsureInitialized(soa.Self(), klass, true, true); + my_klass_ = klass.Get(); + + dex_ = my_klass_->GetDexCache()->GetDexFile(); + + uint32_t code_size = 12; + for (size_t i = 0 ; i < code_size; i++) { + fake_code_.push_back(0x70 | i); + } + + ArenaPool pool; + ArenaAllocator allocator(&pool); + StackMapStream stack_maps(&allocator); + stack_maps.BeginStackMapEntry(/* dex_pc */ 3u, + /* native_pc_offset */ 3u, + /* register_mask */ 0u, + /* sp_mask */ nullptr, + /* num_dex_registers */ 0u, + /* inlining_depth */ 0u); + stack_maps.EndStackMapEntry(); + size_t stack_maps_size = stack_maps.PrepareForFillIn(); + size_t stack_maps_offset = stack_maps_size + sizeof(OatQuickMethodHeader); + + fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size()); + MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size); + stack_maps.FillIn(stack_maps_region); + OatQuickMethodHeader method_header(stack_maps_offset, 4 * sizeof(void*), 0u, 0u, code_size); + memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header)); + std::copy(fake_code_.begin(), + fake_code_.end(), + fake_header_code_and_maps_.begin() + stack_maps_offset); + + // Align the code. + const size_t alignment = GetInstructionSetAlignment(kRuntimeISA); + fake_header_code_and_maps_.reserve(fake_header_code_and_maps_.size() + alignment); + const void* unaligned_code_ptr = + fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size); + size_t offset = dchecked_integral_cast(reinterpret_cast(unaligned_code_ptr)); + size_t padding = RoundUp(offset, alignment) - offset; + // Make sure no resizing takes place. + CHECK_GE(fake_header_code_and_maps_.capacity(), fake_header_code_and_maps_.size() + padding); + fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), padding, 0); + const void* code_ptr = reinterpret_cast(unaligned_code_ptr) + padding; + CHECK_EQ(code_ptr, + static_cast(fake_header_code_and_maps_.data() + + (fake_header_code_and_maps_.size() - code_size))); + + if (kRuntimeISA == kArm) { + // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). + CHECK_ALIGNED(stack_maps_offset, 2); + } + + method_f_ = my_klass_->FindVirtualMethod("f", "()I", sizeof(void*)); + ASSERT_TRUE(method_f_ != nullptr); + method_f_->SetEntryPointFromQuickCompiledCode(code_ptr); + + method_g_ = my_klass_->FindVirtualMethod("g", "(I)V", sizeof(void*)); + ASSERT_TRUE(method_g_ != nullptr); + method_g_->SetEntryPointFromQuickCompiledCode(code_ptr); + } + + const DexFile* dex_; + + std::vector fake_code_; + std::vector fake_header_code_and_maps_; + + ArtMethod* method_f_; + ArtMethod* method_g_; + + private: + mirror::Class* my_klass_; +}; + +TEST_F(ExceptionTest, FindCatchHandler) { + ScopedObjectAccess soa(Thread::Current()); + const DexFile::CodeItem* code_item = dex_->GetCodeItem(method_f_->GetCodeItemOffset()); + + ASSERT_TRUE(code_item != nullptr); + + ASSERT_EQ(2u, code_item->tries_size_); + ASSERT_NE(0u, code_item->insns_size_in_code_units_); + + const DexFile::TryItem *t0, *t1; + t0 = dex_->GetTryItems(*code_item, 0); + t1 = dex_->GetTryItems(*code_item, 1); + EXPECT_LE(t0->start_addr_, t1->start_addr_); + { + CatchHandlerIterator iter(*code_item, 4 /* Dex PC in the first try block */); + EXPECT_STREQ("Ljava/io/IOException;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); + ASSERT_TRUE(iter.HasNext()); + iter.Next(); + EXPECT_STREQ("Ljava/lang/Exception;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); + ASSERT_TRUE(iter.HasNext()); + iter.Next(); + EXPECT_FALSE(iter.HasNext()); + } + { + CatchHandlerIterator iter(*code_item, 8 /* Dex PC in the second try block */); + EXPECT_STREQ("Ljava/io/IOException;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); + ASSERT_TRUE(iter.HasNext()); + iter.Next(); + EXPECT_FALSE(iter.HasNext()); + } + { + CatchHandlerIterator iter(*code_item, 11 /* Dex PC not in any try block */); + EXPECT_FALSE(iter.HasNext()); + } +} + +TEST_F(ExceptionTest, StackTraceElement) { + Thread* thread = Thread::Current(); + thread->TransitionFromSuspendedToRunnable(); + bool started = runtime_->Start(); + CHECK(started); + JNIEnv* env = thread->GetJniEnv(); + ScopedObjectAccess soa(env); + + std::vector fake_stack; + Runtime* r = Runtime::Current(); + r->SetInstructionSet(kRuntimeISA); + ArtMethod* save_method = r->CreateCalleeSaveMethod(); + r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll); + QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method); + + ASSERT_EQ(kStackAlignment, 16U); + // ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t)); + + + // Create three fake stack frames with mapping data created in SetUp. We map offset 3 in the + // code to dex pc 3. + const uint32_t dex_pc = 3; + + // Create the stack frame for the callee save method, expected by the runtime. + fake_stack.push_back(reinterpret_cast(save_method)); + for (size_t i = 0; i < frame_info.FrameSizeInBytes() - 2 * sizeof(uintptr_t); + i += sizeof(uintptr_t)) { + fake_stack.push_back(0); + } + + fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc( + method_g_, dex_pc, /* is_catch_handler */ false)); // return pc + + // Create/push fake 16byte stack frame for method g + fake_stack.push_back(reinterpret_cast(method_g_)); + fake_stack.push_back(0); + fake_stack.push_back(0); + fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc( + method_g_, dex_pc, /* is_catch_handler */ false)); // return pc + + // Create/push fake 16byte stack frame for method f + fake_stack.push_back(reinterpret_cast(method_f_)); + fake_stack.push_back(0); + fake_stack.push_back(0); + fake_stack.push_back(0xEBAD6070); // return pc + + // Push Method* of null to terminate the trace + fake_stack.push_back(0); + + // Push null values which will become null incoming arguments. + fake_stack.push_back(0); + fake_stack.push_back(0); + fake_stack.push_back(0); + + // Set up thread to appear as if we called out of method_g_ at pc dex 3 + thread->SetTopOfStack(reinterpret_cast(&fake_stack[0])); + + jobject internal = thread->CreateInternalStackTrace(soa); + ASSERT_TRUE(internal != nullptr); + jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); + ASSERT_TRUE(ste_array != nullptr); + auto* trace_array = soa.Decode*>(ste_array); + + ASSERT_TRUE(trace_array != nullptr); + ASSERT_TRUE(trace_array->Get(0) != nullptr); + EXPECT_STREQ("ExceptionHandle", + trace_array->Get(0)->GetDeclaringClass()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("ExceptionHandle.java", + trace_array->Get(0)->GetFileName()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("g", trace_array->Get(0)->GetMethodName()->ToModifiedUtf8().c_str()); + EXPECT_EQ(37, trace_array->Get(0)->GetLineNumber()); + + ASSERT_TRUE(trace_array->Get(1) != nullptr); + EXPECT_STREQ("ExceptionHandle", + trace_array->Get(1)->GetDeclaringClass()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("ExceptionHandle.java", + trace_array->Get(1)->GetFileName()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("f", trace_array->Get(1)->GetMethodName()->ToModifiedUtf8().c_str()); + EXPECT_EQ(22, trace_array->Get(1)->GetLineNumber()); + + thread->SetTopOfStack(nullptr); // Disarm the assertion that no code is running when we detach. +} + +} // namespace art diff --git a/compiler/image_test.cc b/compiler/image_test.cc new file mode 100644 index 000000000..a68ab7cc9 --- /dev/null +++ b/compiler/image_test.cc @@ -0,0 +1,520 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "image.h" + +#include +#include +#include + +#include "base/unix_file/fd_file.h" +#include "class_linker-inl.h" +#include "common_compiler_test.h" +#include "debug/method_debug_info.h" +#include "driver/compiler_options.h" +#include "elf_writer.h" +#include "elf_writer_quick.h" +#include "gc/space/image_space.h" +#include "image_writer.h" +#include "linker/multi_oat_relative_patcher.h" +#include "lock_word.h" +#include "mirror/object-inl.h" +#include "oat_writer.h" +#include "scoped_thread_state_change.h" +#include "signal_catcher.h" +#include "utils.h" + +namespace art { + +static const uintptr_t kRequestedImageBase = ART_BASE_ADDRESS; + +struct CompilationHelper { + std::vector dex_file_locations; + std::vector image_locations; + std::vector> extra_dex_files; + std::vector image_files; + std::vector oat_files; + std::string image_dir; + + void Compile(CompilerDriver* driver, + ImageHeader::StorageMode storage_mode); + + std::vector GetImageObjectSectionSizes(); + + ~CompilationHelper(); +}; + +class ImageTest : public CommonCompilerTest { + protected: + virtual void SetUp() { + ReserveImageSpace(); + CommonCompilerTest::SetUp(); + } + + void TestWriteRead(ImageHeader::StorageMode storage_mode); + + void Compile(ImageHeader::StorageMode storage_mode, + CompilationHelper& out_helper, + const std::string& extra_dex = "", + const std::string& image_class = ""); + + std::unordered_set* GetImageClasses() OVERRIDE { + return new std::unordered_set(image_classes_); + } + + private: + std::unordered_set image_classes_; +}; + +CompilationHelper::~CompilationHelper() { + for (ScratchFile& image_file : image_files) { + image_file.Unlink(); + } + for (ScratchFile& oat_file : oat_files) { + oat_file.Unlink(); + } + const int rmdir_result = rmdir(image_dir.c_str()); + CHECK_EQ(0, rmdir_result); +} + +std::vector CompilationHelper::GetImageObjectSectionSizes() { + std::vector ret; + for (ScratchFile& image_file : image_files) { + std::unique_ptr file(OS::OpenFileForReading(image_file.GetFilename().c_str())); + CHECK(file.get() != nullptr); + ImageHeader image_header; + CHECK_EQ(file->ReadFully(&image_header, sizeof(image_header)), true); + CHECK(image_header.IsValid()); + ret.push_back(image_header.GetImageSize()); + } + return ret; +} + +void CompilationHelper::Compile(CompilerDriver* driver, + ImageHeader::StorageMode storage_mode) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + std::vector class_path = class_linker->GetBootClassPath(); + + for (const std::unique_ptr& dex_file : extra_dex_files) { + { + ScopedObjectAccess soa(Thread::Current()); + // Inject in boot class path so that the compiler driver can see it. + class_linker->AppendToBootClassPath(soa.Self(), *dex_file.get()); + } + class_path.push_back(dex_file.get()); + } + + // Enable write for dex2dex. + for (const DexFile* dex_file : class_path) { + dex_file_locations.push_back(dex_file->GetLocation()); + if (dex_file->IsReadOnly()) { + dex_file->EnableWrite(); + } + } + + { + // Create a generic tmp file, to be the base of the .art and .oat temporary files. + ScratchFile location; + for (int i = 0; i < static_cast(class_path.size()); ++i) { + std::string cur_location(StringPrintf("%s-%d.art", location.GetFilename().c_str(), i)); + image_locations.push_back(ScratchFile(cur_location)); + } + } + std::vector image_filenames; + for (ScratchFile& file : image_locations) { + std::string image_filename(GetSystemImageFilename(file.GetFilename().c_str(), kRuntimeISA)); + image_filenames.push_back(image_filename); + size_t pos = image_filename.rfind('/'); + CHECK_NE(pos, std::string::npos) << image_filename; + if (image_dir.empty()) { + image_dir = image_filename.substr(0, pos); + int mkdir_result = mkdir(image_dir.c_str(), 0700); + CHECK_EQ(0, mkdir_result) << image_dir; + } + image_files.push_back(ScratchFile(OS::CreateEmptyFile(image_filename.c_str()))); + } + + std::vector oat_filenames; + for (const std::string& image_filename : image_filenames) { + std::string oat_filename(image_filename.substr(0, image_filename.size() - strlen("art")) + "oat"); + oat_files.push_back(ScratchFile(OS::CreateEmptyFile(oat_filename.c_str()))); + oat_filenames.push_back(oat_filename); + } + + std::unordered_map dex_file_to_oat_index_map; + std::vector oat_filename_vector; + for (const std::string& file : oat_filenames) { + oat_filename_vector.push_back(file.c_str()); + } + std::vector image_filename_vector; + for (const std::string& file : image_filenames) { + image_filename_vector.push_back(file.c_str()); + } + size_t image_idx = 0; + for (const DexFile* dex_file : class_path) { + dex_file_to_oat_index_map.emplace(dex_file, image_idx); + ++image_idx; + } + // TODO: compile_pic should be a test argument. + std::unique_ptr writer(new ImageWriter(*driver, + kRequestedImageBase, + /*compile_pic*/false, + /*compile_app_image*/false, + storage_mode, + oat_filename_vector, + dex_file_to_oat_index_map)); + { + { + jobject class_loader = nullptr; + TimingLogger timings("ImageTest::WriteRead", false, false); + TimingLogger::ScopedTiming t("CompileAll", &timings); + driver->SetDexFilesForOatFile(class_path); + driver->CompileAll(class_loader, class_path, &timings); + + t.NewTiming("WriteElf"); + SafeMap key_value_store; + std::vector dex_filename_vector; + for (size_t i = 0; i < class_path.size(); ++i) { + dex_filename_vector.push_back(""); + } + key_value_store.Put(OatHeader::kBootClassPathKey, + gc::space::ImageSpace::GetMultiImageBootClassPath( + dex_filename_vector, + oat_filename_vector, + image_filename_vector)); + + std::vector> elf_writers; + std::vector> oat_writers; + for (ScratchFile& oat_file : oat_files) { + elf_writers.emplace_back(CreateElfWriterQuick(driver->GetInstructionSet(), + driver->GetInstructionSetFeatures(), + &driver->GetCompilerOptions(), + oat_file.GetFile())); + elf_writers.back()->Start(); + oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true, &timings)); + } + + std::vector rodata; + std::vector> opened_dex_files_map; + std::vector> opened_dex_files; + // Now that we have finalized key_value_store_, start writing the oat file. + for (size_t i = 0, size = oat_writers.size(); i != size; ++i) { + const DexFile* dex_file = class_path[i]; + rodata.push_back(elf_writers[i]->StartRoData()); + ArrayRef raw_dex_file( + reinterpret_cast(&dex_file->GetHeader()), + dex_file->GetHeader().file_size_); + oat_writers[i]->AddRawDexFileSource(raw_dex_file, + dex_file->GetLocation().c_str(), + dex_file->GetLocationChecksum()); + + std::unique_ptr cur_opened_dex_files_map; + std::vector> cur_opened_dex_files; + bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles( + rodata.back(), + oat_files[i].GetFile(), + driver->GetInstructionSet(), + driver->GetInstructionSetFeatures(), + &key_value_store, + /* verify */ false, // Dex files may be dex-to-dex-ed, don't verify. + &cur_opened_dex_files_map, + &cur_opened_dex_files); + ASSERT_TRUE(dex_files_ok); + + if (cur_opened_dex_files_map != nullptr) { + opened_dex_files_map.push_back(std::move(cur_opened_dex_files_map)); + for (std::unique_ptr& cur_dex_file : cur_opened_dex_files) { + // dex_file_oat_index_map_.emplace(dex_file.get(), i); + opened_dex_files.push_back(std::move(cur_dex_file)); + } + } else { + ASSERT_TRUE(cur_opened_dex_files.empty()); + } + } + + bool image_space_ok = writer->PrepareImageAddressSpace(); + ASSERT_TRUE(image_space_ok); + + for (size_t i = 0, size = oat_files.size(); i != size; ++i) { + linker::MultiOatRelativePatcher patcher(driver->GetInstructionSet(), + driver->GetInstructionSetFeatures()); + OatWriter* const oat_writer = oat_writers[i].get(); + ElfWriter* const elf_writer = elf_writers[i].get(); + std::vector cur_dex_files(1u, class_path[i]); + oat_writer->PrepareLayout(driver, writer.get(), cur_dex_files, &patcher); + size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset(); + size_t text_size = oat_writer->GetSize() - rodata_size; + elf_writer->SetLoadedSectionSizes(rodata_size, text_size, oat_writer->GetBssSize()); + + writer->UpdateOatFileLayout(i, + elf_writer->GetLoadedSize(), + oat_writer->GetOatDataOffset(), + oat_writer->GetSize()); + + bool rodata_ok = oat_writer->WriteRodata(rodata[i]); + ASSERT_TRUE(rodata_ok); + elf_writer->EndRoData(rodata[i]); + + OutputStream* text = elf_writer->StartText(); + bool text_ok = oat_writer->WriteCode(text); + ASSERT_TRUE(text_ok); + elf_writer->EndText(text); + + bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u); + ASSERT_TRUE(header_ok); + + writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader()); + + elf_writer->WriteDynamicSection(); + elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo()); + elf_writer->WritePatchLocations(oat_writer->GetAbsolutePatchLocations()); + + bool success = elf_writer->End(); + ASSERT_TRUE(success); + } + } + + bool success_image = writer->Write(kInvalidFd, + image_filename_vector, + oat_filename_vector); + ASSERT_TRUE(success_image); + + for (size_t i = 0, size = oat_filenames.size(); i != size; ++i) { + const char* oat_filename = oat_filenames[i].c_str(); + std::unique_ptr oat_file(OS::OpenFileReadWrite(oat_filename)); + ASSERT_TRUE(oat_file != nullptr); + bool success_fixup = ElfWriter::Fixup(oat_file.get(), + writer->GetOatDataBegin(i)); + ASSERT_TRUE(success_fixup); + ASSERT_EQ(oat_file->FlushCloseOrErase(), 0) << "Could not flush and close oat file " + << oat_filename; + } + } +} + +void ImageTest::Compile(ImageHeader::StorageMode storage_mode, + CompilationHelper& helper, + const std::string& extra_dex, + const std::string& image_class) { + if (!image_class.empty()) { + image_classes_.insert(image_class); + } + CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U); + // Set inline filter values. + compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit); + compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits); + image_classes_.clear(); + if (!extra_dex.empty()) { + helper.extra_dex_files = OpenTestDexFiles(extra_dex.c_str()); + } + helper.Compile(compiler_driver_.get(), storage_mode); + if (!image_class.empty()) { + // Make sure the class got initialized. + ScopedObjectAccess soa(Thread::Current()); + ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->FindSystemClass(Thread::Current(), image_class.c_str()); + EXPECT_TRUE(klass != nullptr); + EXPECT_TRUE(klass->IsInitialized()); + } +} + +void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) { + CompilationHelper helper; + Compile(storage_mode, /*out*/ helper); + std::vector image_file_sizes; + for (ScratchFile& image_file : helper.image_files) { + std::unique_ptr file(OS::OpenFileForReading(image_file.GetFilename().c_str())); + ASSERT_TRUE(file.get() != nullptr); + ImageHeader image_header; + ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true); + ASSERT_TRUE(image_header.IsValid()); + const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap); + ASSERT_GE(bitmap_section.Offset(), sizeof(image_header)); + ASSERT_NE(0U, bitmap_section.Size()); + + gc::Heap* heap = Runtime::Current()->GetHeap(); + ASSERT_TRUE(heap->HaveContinuousSpaces()); + gc::space::ContinuousSpace* space = heap->GetNonMovingSpace(); + ASSERT_FALSE(space->IsImageSpace()); + ASSERT_TRUE(space != nullptr); + ASSERT_TRUE(space->IsMallocSpace()); + + image_file_sizes.push_back(file->GetLength()); + } + + ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr); + std::unordered_set image_classes(*compiler_driver_->GetImageClasses()); + + // Need to delete the compiler since it has worker threads which are attached to runtime. + compiler_driver_.reset(); + + // Tear down old runtime before making a new one, clearing out misc state. + + // Remove the reservation of the memory for use to load the image. + // Need to do this before we reset the runtime. + UnreserveImageSpace(); + + helper.extra_dex_files.clear(); + runtime_.reset(); + java_lang_dex_file_ = nullptr; + + MemMap::Init(); + + RuntimeOptions options; + std::string image("-Ximage:"); + image.append(helper.image_locations[0].GetFilename()); + options.push_back(std::make_pair(image.c_str(), static_cast(nullptr))); + // By default the compiler this creates will not include patch information. + options.push_back(std::make_pair("-Xnorelocate", nullptr)); + + if (!Runtime::Create(options, false)) { + LOG(FATAL) << "Failed to create runtime"; + return; + } + runtime_.reset(Runtime::Current()); + // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, + // give it away now and then switch to a more managable ScopedObjectAccess. + Thread::Current()->TransitionFromRunnableToSuspended(kNative); + ScopedObjectAccess soa(Thread::Current()); + ASSERT_TRUE(runtime_.get() != nullptr); + class_linker_ = runtime_->GetClassLinker(); + + gc::Heap* heap = Runtime::Current()->GetHeap(); + ASSERT_TRUE(heap->HasBootImageSpace()); + ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace()); + + // We loaded the runtime with an explicit image, so it must exist. + ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size()); + for (size_t i = 0; i < helper.dex_file_locations.size(); ++i) { + std::unique_ptr dex( + LoadExpectSingleDexFile(helper.dex_file_locations[i].c_str())); + ASSERT_TRUE(dex != nullptr); + uint64_t image_file_size = image_file_sizes[i]; + gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[i]; + ASSERT_TRUE(image_space != nullptr); + if (storage_mode == ImageHeader::kStorageModeUncompressed) { + // Uncompressed, image should be smaller than file. + ASSERT_LE(image_space->GetImageHeader().GetImageSize(), image_file_size); + } else if (image_file_size > 16 * KB) { + // Compressed, file should be smaller than image. Not really valid for small images. + ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize()); + } + + image_space->VerifyImageAllocations(); + uint8_t* image_begin = image_space->Begin(); + uint8_t* image_end = image_space->End(); + if (i == 0) { + // This check is only valid for image 0. + CHECK_EQ(kRequestedImageBase, reinterpret_cast(image_begin)); + } + for (size_t j = 0; j < dex->NumClassDefs(); ++j) { + const DexFile::ClassDef& class_def = dex->GetClassDef(j); + const char* descriptor = dex->GetClassDescriptor(class_def); + mirror::Class* klass = class_linker_->FindSystemClass(soa.Self(), descriptor); + EXPECT_TRUE(klass != nullptr) << descriptor; + if (image_classes.find(descriptor) == image_classes.end()) { + EXPECT_TRUE(reinterpret_cast(klass) >= image_end || + reinterpret_cast(klass) < image_begin) << descriptor; + } else { + // Image classes should be located inside the image. + EXPECT_LT(image_begin, reinterpret_cast(klass)) << descriptor; + EXPECT_LT(reinterpret_cast(klass), image_end) << descriptor; + } + EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false))); + } + } +} + +TEST_F(ImageTest, WriteReadUncompressed) { + TestWriteRead(ImageHeader::kStorageModeUncompressed); +} + +TEST_F(ImageTest, WriteReadLZ4) { + TestWriteRead(ImageHeader::kStorageModeLZ4); +} + +TEST_F(ImageTest, WriteReadLZ4HC) { + TestWriteRead(ImageHeader::kStorageModeLZ4HC); +} + +TEST_F(ImageTest, TestImageLayout) { + std::vector image_sizes; + std::vector image_sizes_extra; + // Compile multi-image with ImageLayoutA being the last image. + { + CompilationHelper helper; + Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutA", "LMyClass;"); + image_sizes = helper.GetImageObjectSectionSizes(); + } + TearDown(); + runtime_.reset(); + SetUp(); + // Compile multi-image with ImageLayoutB being the last image. + { + CompilationHelper helper; + Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutB", "LMyClass;"); + image_sizes_extra = helper.GetImageObjectSectionSizes(); + } + // Make sure that the new stuff in the clinit in ImageLayoutB is in the last image and not in the + // first two images. + ASSERT_EQ(image_sizes.size(), image_sizes.size()); + // Sizes of the images should be the same. These sizes are for the whole image unrounded. + for (size_t i = 0; i < image_sizes.size() - 1; ++i) { + EXPECT_EQ(image_sizes[i], image_sizes_extra[i]); + } + // Last image should be larger since it has a hash map and a string. + EXPECT_LT(image_sizes.back(), image_sizes_extra.back()); +} + +TEST_F(ImageTest, ImageHeaderIsValid) { + uint32_t image_begin = ART_BASE_ADDRESS; + uint32_t image_size_ = 16 * KB; + uint32_t image_roots = ART_BASE_ADDRESS + (1 * KB); + uint32_t oat_checksum = 0; + uint32_t oat_file_begin = ART_BASE_ADDRESS + (4 * KB); // page aligned + uint32_t oat_data_begin = ART_BASE_ADDRESS + (8 * KB); // page aligned + uint32_t oat_data_end = ART_BASE_ADDRESS + (9 * KB); + uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB); + ImageSection sections[ImageHeader::kSectionCount]; + ImageHeader image_header(image_begin, + image_size_, + sections, + image_roots, + oat_checksum, + oat_file_begin, + oat_data_begin, + oat_data_end, + oat_file_end, + /*boot_image_begin*/0U, + /*boot_image_size*/0U, + /*boot_oat_begin*/0U, + /*boot_oat_size_*/0U, + sizeof(void*), + /*compile_pic*/false, + /*is_pic*/false, + ImageHeader::kDefaultStorageMode, + /*data_size*/0u); + ASSERT_TRUE(image_header.IsValid()); + ASSERT_TRUE(!image_header.IsAppImage()); + + char* magic = const_cast(image_header.GetMagic()); + strcpy(magic, ""); // bad magic + ASSERT_FALSE(image_header.IsValid()); + strcpy(magic, "art\n000"); // bad version + ASSERT_FALSE(image_header.IsValid()); +} + +} // namespace art diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc new file mode 100644 index 000000000..2d6c4dab6 --- /dev/null +++ b/compiler/image_writer.cc @@ -0,0 +1,2474 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "image_writer.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include "art_field-inl.h" +#include "art_method-inl.h" +#include "base/logging.h" +#include "base/unix_file/fd_file.h" +#include "class_linker-inl.h" +#include "compiled_method.h" +#include "dex_file-inl.h" +#include "driver/compiler_driver.h" +#include "elf_file.h" +#include "elf_utils.h" +#include "elf_writer.h" +#include "gc/accounting/card_table-inl.h" +#include "gc/accounting/heap_bitmap.h" +#include "gc/accounting/space_bitmap-inl.h" +#include "gc/heap.h" +#include "gc/space/large_object_space.h" +#include "gc/space/space-inl.h" +#include "globals.h" +#include "image.h" +#include "intern_table.h" +#include "linear_alloc.h" +#include "lock_word.h" +#include "mirror/abstract_method.h" +#include "mirror/array-inl.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/method.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/string-inl.h" +#include "oat.h" +#include "oat_file.h" +#include "oat_file_manager.h" +#include "runtime.h" +#include "scoped_thread_state_change.h" +#include "handle_scope-inl.h" +#include "utils/dex_cache_arrays_layout-inl.h" + +using ::art::mirror::Class; +using ::art::mirror::DexCache; +using ::art::mirror::Object; +using ::art::mirror::ObjectArray; +using ::art::mirror::String; + +namespace art { + +// Separate objects into multiple bins to optimize dirty memory use. +static constexpr bool kBinObjects = true; + +// Return true if an object is already in an image space. +bool ImageWriter::IsInBootImage(const void* obj) const { + gc::Heap* const heap = Runtime::Current()->GetHeap(); + if (!compile_app_image_) { + DCHECK(heap->GetBootImageSpaces().empty()); + return false; + } + for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) { + const uint8_t* image_begin = boot_image_space->Begin(); + // Real image end including ArtMethods and ArtField sections. + const uint8_t* image_end = image_begin + boot_image_space->GetImageHeader().GetImageSize(); + if (image_begin <= obj && obj < image_end) { + return true; + } + } + return false; +} + +bool ImageWriter::IsInBootOatFile(const void* ptr) const { + gc::Heap* const heap = Runtime::Current()->GetHeap(); + if (!compile_app_image_) { + DCHECK(heap->GetBootImageSpaces().empty()); + return false; + } + for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) { + const ImageHeader& image_header = boot_image_space->GetImageHeader(); + if (image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd()) { + return true; + } + } + return false; +} + +static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) + SHARED_REQUIRES(Locks::mutator_lock_) { + Class* klass = obj->GetClass(); + CHECK_NE(PrettyClass(klass), "com.android.dex.Dex"); +} + +static void CheckNoDexObjects() { + ScopedObjectAccess soa(Thread::Current()); + Runtime::Current()->GetHeap()->VisitObjects(CheckNoDexObjectsCallback, nullptr); +} + +bool ImageWriter::PrepareImageAddressSpace() { + target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet()); + gc::Heap* const heap = Runtime::Current()->GetHeap(); + { + ScopedObjectAccess soa(Thread::Current()); + PruneNonImageClasses(); // Remove junk + if (!compile_app_image_) { + // Avoid for app image since this may increase RAM and image size. + ComputeLazyFieldsForImageClasses(); // Add useful information + } + } + heap->CollectGarbage(false); // Remove garbage. + + // Dex caches must not have their dex fields set in the image. These are memory buffers of mapped + // dex files. + // + // We may open them in the unstarted-runtime code for class metadata. Their fields should all be + // reset in PruneNonImageClasses and the objects reclaimed in the GC. Make sure that's actually + // true. + if (kIsDebugBuild) { + CheckNoDexObjects(); + } + + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); + CheckNonImageClassesRemoved(); + } + + { + ScopedObjectAccess soa(Thread::Current()); + CalculateNewObjectOffsets(); + } + + // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and + // bin size sums being calculated. + if (!AllocMemory()) { + return false; + } + + return true; +} + +bool ImageWriter::Write(int image_fd, + const std::vector& image_filenames, + const std::vector& oat_filenames) { + // If image_fd or oat_fd are not kInvalidFd then we may have empty strings in image_filenames or + // oat_filenames. + CHECK(!image_filenames.empty()); + if (image_fd != kInvalidFd) { + CHECK_EQ(image_filenames.size(), 1u); + } + CHECK(!oat_filenames.empty()); + CHECK_EQ(image_filenames.size(), oat_filenames.size()); + + { + ScopedObjectAccess soa(Thread::Current()); + for (size_t i = 0; i < oat_filenames.size(); ++i) { + CreateHeader(i); + CopyAndFixupNativeData(i); + } + } + + { + // TODO: heap validation can't handle these fix up passes. + ScopedObjectAccess soa(Thread::Current()); + Runtime::Current()->GetHeap()->DisableObjectValidation(); + CopyAndFixupObjects(); + } + + for (size_t i = 0; i < image_filenames.size(); ++i) { + const char* image_filename = image_filenames[i]; + ImageInfo& image_info = GetImageInfo(i); + std::unique_ptr image_file; + if (image_fd != kInvalidFd) { + if (strlen(image_filename) == 0u) { + image_file.reset(new File(image_fd, unix_file::kCheckSafeUsage)); + // Empty the file in case it already exists. + if (image_file != nullptr) { + TEMP_FAILURE_RETRY(image_file->SetLength(0)); + TEMP_FAILURE_RETRY(image_file->Flush()); + } + } else { + LOG(ERROR) << "image fd " << image_fd << " name " << image_filename; + } + } else { + image_file.reset(OS::CreateEmptyFile(image_filename)); + } + + if (image_file == nullptr) { + LOG(ERROR) << "Failed to open image file " << image_filename; + return false; + } + + if (!compile_app_image_ && fchmod(image_file->Fd(), 0644) != 0) { + PLOG(ERROR) << "Failed to make image file world readable: " << image_filename; + image_file->Erase(); + return EXIT_FAILURE; + } + + std::unique_ptr compressed_data; + // Image data size excludes the bitmap and the header. + ImageHeader* const image_header = reinterpret_cast(image_info.image_->Begin()); + const size_t image_data_size = image_header->GetImageSize() - sizeof(ImageHeader); + char* image_data = reinterpret_cast(image_info.image_->Begin()) + sizeof(ImageHeader); + size_t data_size; + const char* image_data_to_write; + const uint64_t compress_start_time = NanoTime(); + + CHECK_EQ(image_header->storage_mode_, image_storage_mode_); + switch (image_storage_mode_) { + case ImageHeader::kStorageModeLZ4HC: // Fall-through. + case ImageHeader::kStorageModeLZ4: { + const size_t compressed_max_size = LZ4_compressBound(image_data_size); + compressed_data.reset(new char[compressed_max_size]); + data_size = LZ4_compress( + reinterpret_cast(image_info.image_->Begin()) + sizeof(ImageHeader), + &compressed_data[0], + image_data_size); + + break; + } + /* + * Disabled due to image_test64 flakyness. Both use same decompression. b/27560444 + case ImageHeader::kStorageModeLZ4HC: { + // Bound is same as non HC. + const size_t compressed_max_size = LZ4_compressBound(image_data_size); + compressed_data.reset(new char[compressed_max_size]); + data_size = LZ4_compressHC( + reinterpret_cast(image_info.image_->Begin()) + sizeof(ImageHeader), + &compressed_data[0], + image_data_size); + break; + } + */ + case ImageHeader::kStorageModeUncompressed: { + data_size = image_data_size; + image_data_to_write = image_data; + break; + } + default: { + LOG(FATAL) << "Unsupported"; + UNREACHABLE(); + } + } + + if (compressed_data != nullptr) { + image_data_to_write = &compressed_data[0]; + VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in " + << PrettyDuration(NanoTime() - compress_start_time); + if (kIsDebugBuild) { + std::unique_ptr temp(new uint8_t[image_data_size]); + const size_t decompressed_size = LZ4_decompress_safe( + reinterpret_cast(&compressed_data[0]), + reinterpret_cast(&temp[0]), + data_size, + image_data_size); + CHECK_EQ(decompressed_size, image_data_size); + CHECK_EQ(memcmp(image_data, &temp[0], image_data_size), 0) << image_storage_mode_; + } + } + + // Write out the image + fields + methods. + const bool is_compressed = compressed_data != nullptr; + if (!image_file->PwriteFully(image_data_to_write, data_size, sizeof(ImageHeader))) { + PLOG(ERROR) << "Failed to write image file data " << image_filename; + image_file->Erase(); + return false; + } + + // Write out the image bitmap at the page aligned start of the image end, also uncompressed for + // convenience. + const ImageSection& bitmap_section = image_header->GetImageSection( + ImageHeader::kSectionImageBitmap); + // Align up since data size may be unaligned if the image is compressed. + size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + data_size, kPageSize); + if (!is_compressed) { + CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset()); + } + if (!image_file->PwriteFully(reinterpret_cast(image_info.image_bitmap_->Begin()), + bitmap_section.Size(), + bitmap_position_in_file)) { + PLOG(ERROR) << "Failed to write image file " << image_filename; + image_file->Erase(); + return false; + } + + int err = image_file->Flush(); + if (err < 0) { + PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err; + image_file->Erase(); + return false; + } + + // Write header last in case the compiler gets killed in the middle of image writing. + // We do not want to have a corrupted image with a valid header. + // The header is uncompressed since it contains whether the image is compressed or not. + image_header->data_size_ = data_size; + if (!image_file->PwriteFully(reinterpret_cast(image_info.image_->Begin()), + sizeof(ImageHeader), + 0)) { + PLOG(ERROR) << "Failed to write image file header " << image_filename; + image_file->Erase(); + return false; + } + + CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(), + static_cast(image_file->GetLength())); + if (image_file->FlushCloseOrErase() != 0) { + PLOG(ERROR) << "Failed to flush and close image file " << image_filename; + return false; + } + } + return true; +} + +void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) { + DCHECK(object != nullptr); + DCHECK_NE(offset, 0U); + + // The object is already deflated from when we set the bin slot. Just overwrite the lock word. + object->SetLockWord(LockWord::FromForwardingAddress(offset), false); + DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u); + DCHECK(IsImageOffsetAssigned(object)); +} + +void ImageWriter::UpdateImageOffset(mirror::Object* obj, uintptr_t offset) { + DCHECK(IsImageOffsetAssigned(obj)) << obj << " " << offset; + obj->SetLockWord(LockWord::FromForwardingAddress(offset), false); + DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0u); +} + +void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) { + DCHECK(object != nullptr); + DCHECK_NE(image_objects_offset_begin_, 0u); + + size_t oat_index = GetOatIndex(object); + ImageInfo& image_info = GetImageInfo(oat_index); + size_t bin_slot_offset = image_info.bin_slot_offsets_[bin_slot.GetBin()]; + size_t new_offset = bin_slot_offset + bin_slot.GetIndex(); + DCHECK_ALIGNED(new_offset, kObjectAlignment); + + SetImageOffset(object, new_offset); + DCHECK_LT(new_offset, image_info.image_end_); +} + +bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const { + // Will also return true if the bin slot was assigned since we are reusing the lock word. + DCHECK(object != nullptr); + return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress; +} + +size_t ImageWriter::GetImageOffset(mirror::Object* object) const { + DCHECK(object != nullptr); + DCHECK(IsImageOffsetAssigned(object)); + LockWord lock_word = object->GetLockWord(false); + size_t offset = lock_word.ForwardingAddress(); + size_t oat_index = GetOatIndex(object); + const ImageInfo& image_info = GetImageInfo(oat_index); + DCHECK_LT(offset, image_info.image_end_); + return offset; +} + +void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) { + DCHECK(object != nullptr); + DCHECK(!IsImageOffsetAssigned(object)); + DCHECK(!IsImageBinSlotAssigned(object)); + + // Before we stomp over the lock word, save the hash code for later. + LockWord lw(object->GetLockWord(false)); + switch (lw.GetState()) { + case LockWord::kFatLocked: { + LOG(FATAL) << "Fat locked object " << object << " found during object copy"; + break; + } + case LockWord::kThinLocked: { + LOG(FATAL) << "Thin locked object " << object << " found during object copy"; + break; + } + case LockWord::kUnlocked: + // No hash, don't need to save it. + break; + case LockWord::kHashCode: + DCHECK(saved_hashcode_map_.find(object) == saved_hashcode_map_.end()); + saved_hashcode_map_.emplace(object, lw.GetHashCode()); + break; + default: + LOG(FATAL) << "Unreachable."; + UNREACHABLE(); + } + object->SetLockWord(LockWord::FromForwardingAddress(bin_slot.Uint32Value()), false); + DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u); + DCHECK(IsImageBinSlotAssigned(object)); +} + +void ImageWriter::PrepareDexCacheArraySlots() { + // Prepare dex cache array starts based on the ordering specified in the CompilerDriver. + // Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned() + // when AssignImageBinSlot() assigns their indexes out or order. + for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) { + auto it = dex_file_oat_index_map_.find(dex_file); + DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation(); + ImageInfo& image_info = GetImageInfo(it->second); + image_info.dex_cache_array_starts_.Put(dex_file, image_info.bin_slot_sizes_[kBinDexCacheArray]); + DexCacheArraysLayout layout(target_ptr_size_, dex_file); + image_info.bin_slot_sizes_[kBinDexCacheArray] += layout.Size(); + } + + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + Thread* const self = Thread::Current(); + ReaderMutexLock mu(self, *class_linker->DexLock()); + for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { + mirror::DexCache* dex_cache = + down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr || IsInBootImage(dex_cache)) { + continue; + } + const DexFile* dex_file = dex_cache->GetDexFile(); + CHECK(dex_file_oat_index_map_.find(dex_file) != dex_file_oat_index_map_.end()) + << "Dex cache should have been pruned " << dex_file->GetLocation() + << "; possibly in class path"; + DexCacheArraysLayout layout(target_ptr_size_, dex_file); + DCHECK(layout.Valid()); + size_t oat_index = GetOatIndexForDexCache(dex_cache); + ImageInfo& image_info = GetImageInfo(oat_index); + uint32_t start = image_info.dex_cache_array_starts_.Get(dex_file); + DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr); + AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(), + start + layout.TypesOffset(), + dex_cache); + DCHECK_EQ(dex_file->NumMethodIds() != 0u, dex_cache->GetResolvedMethods() != nullptr); + AddDexCacheArrayRelocation(dex_cache->GetResolvedMethods(), + start + layout.MethodsOffset(), + dex_cache); + DCHECK_EQ(dex_file->NumFieldIds() != 0u, dex_cache->GetResolvedFields() != nullptr); + AddDexCacheArrayRelocation(dex_cache->GetResolvedFields(), + start + layout.FieldsOffset(), + dex_cache); + DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr); + AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), dex_cache); + } +} + +void ImageWriter::AddDexCacheArrayRelocation(void* array, size_t offset, DexCache* dex_cache) { + if (array != nullptr) { + DCHECK(!IsInBootImage(array)); + size_t oat_index = GetOatIndexForDexCache(dex_cache); + native_object_relocations_.emplace(array, + NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeDexCacheArray }); + } +} + +void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) { + DCHECK(arr != nullptr); + if (kIsDebugBuild) { + for (size_t i = 0, len = arr->GetLength(); i < len; i++) { + ArtMethod* method = arr->GetElementPtrSize(i, target_ptr_size_); + if (method != nullptr && !method->IsRuntimeMethod()) { + mirror::Class* klass = method->GetDeclaringClass(); + CHECK(klass == nullptr || KeepClass(klass)) + << PrettyClass(klass) << " should be a kept class"; + } + } + } + // kBinArtMethodClean picked arbitrarily, just required to differentiate between ArtFields and + // ArtMethods. + pointer_arrays_.emplace(arr, kBinArtMethodClean); +} + +void ImageWriter::AssignImageBinSlot(mirror::Object* object, size_t oat_index) { + DCHECK(object != nullptr); + size_t object_size = object->SizeOf(); + + // The magic happens here. We segregate objects into different bins based + // on how likely they are to get dirty at runtime. + // + // Likely-to-dirty objects get packed together into the same bin so that + // at runtime their page dirtiness ratio (how many dirty objects a page has) is + // maximized. + // + // This means more pages will stay either clean or shared dirty (with zygote) and + // the app will use less of its own (private) memory. + Bin bin = kBinRegular; + size_t current_offset = 0u; + + if (kBinObjects) { + // + // Changing the bin of an object is purely a memory-use tuning. + // It has no change on runtime correctness. + // + // Memory analysis has determined that the following types of objects get dirtied + // the most: + // + // * Dex cache arrays are stored in a special bin. The arrays for each dex cache have + // a fixed layout which helps improve generated code (using PC-relative addressing), + // so we pre-calculate their offsets separately in PrepareDexCacheArraySlots(). + // Since these arrays are huge, most pages do not overlap other objects and it's not + // really important where they are for the clean/dirty separation. Due to their + // special PC-relative addressing, we arbitrarily keep them at the end. + // * Class'es which are verified [their clinit runs only at runtime] + // - classes in general [because their static fields get overwritten] + // - initialized classes with all-final statics are unlikely to be ever dirty, + // so bin them separately + // * Art Methods that are: + // - native [their native entry point is not looked up until runtime] + // - have declaring classes that aren't initialized + // [their interpreter/quick entry points are trampolines until the class + // becomes initialized] + // + // We also assume the following objects get dirtied either never or extremely rarely: + // * Strings (they are immutable) + // * Art methods that aren't native and have initialized declared classes + // + // We assume that "regular" bin objects are highly unlikely to become dirtied, + // so packing them together will not result in a noticeably tighter dirty-to-clean ratio. + // + if (object->IsClass()) { + bin = kBinClassVerified; + mirror::Class* klass = object->AsClass(); + + // Add non-embedded vtable to the pointer array table if there is one. + auto* vtable = klass->GetVTable(); + if (vtable != nullptr) { + AddMethodPointerArray(vtable); + } + auto* iftable = klass->GetIfTable(); + if (iftable != nullptr) { + for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { + if (iftable->GetMethodArrayCount(i) > 0) { + AddMethodPointerArray(iftable->GetMethodArray(i)); + } + } + } + + if (klass->GetStatus() == Class::kStatusInitialized) { + bin = kBinClassInitialized; + + // If the class's static fields are all final, put it into a separate bin + // since it's very likely it will stay clean. + uint32_t num_static_fields = klass->NumStaticFields(); + if (num_static_fields == 0) { + bin = kBinClassInitializedFinalStatics; + } else { + // Maybe all the statics are final? + bool all_final = true; + for (uint32_t i = 0; i < num_static_fields; ++i) { + ArtField* field = klass->GetStaticField(i); + if (!field->IsFinal()) { + all_final = false; + break; + } + } + + if (all_final) { + bin = kBinClassInitializedFinalStatics; + } + } + } + } else if (object->GetClass()->IsStringClass()) { + bin = kBinString; // Strings are almost always immutable (except for object header). + } else if (object->GetClass() == + Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangObject)) { + // Instance of java lang object, probably a lock object. This means it will be dirty when we + // synchronize on it. + bin = kBinMiscDirty; + } else if (object->IsDexCache()) { + // Dex file field becomes dirty when the image is loaded. + bin = kBinMiscDirty; + } + // else bin = kBinRegular + } + + // Assign the oat index too. + DCHECK(oat_index_map_.find(object) == oat_index_map_.end()); + oat_index_map_.emplace(object, oat_index); + + ImageInfo& image_info = GetImageInfo(oat_index); + + size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment + current_offset = image_info.bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned). + // Move the current bin size up to accommodate the object we just assigned a bin slot. + image_info.bin_slot_sizes_[bin] += offset_delta; + + BinSlot new_bin_slot(bin, current_offset); + SetImageBinSlot(object, new_bin_slot); + + ++image_info.bin_slot_count_[bin]; + + // Grow the image closer to the end by the object we just assigned. + image_info.image_end_ += offset_delta; +} + +bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const { + if (m->IsNative()) { + return true; + } + mirror::Class* declaring_class = m->GetDeclaringClass(); + // Initialized is highly unlikely to dirty since there's no entry points to mutate. + return declaring_class == nullptr || declaring_class->GetStatus() != Class::kStatusInitialized; +} + +bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const { + DCHECK(object != nullptr); + + // We always stash the bin slot into a lockword, in the 'forwarding address' state. + // If it's in some other state, then we haven't yet assigned an image bin slot. + if (object->GetLockWord(false).GetState() != LockWord::kForwardingAddress) { + return false; + } else if (kIsDebugBuild) { + LockWord lock_word = object->GetLockWord(false); + size_t offset = lock_word.ForwardingAddress(); + BinSlot bin_slot(offset); + size_t oat_index = GetOatIndex(object); + const ImageInfo& image_info = GetImageInfo(oat_index); + DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()]) + << "bin slot offset should not exceed the size of that bin"; + } + return true; +} + +ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const { + DCHECK(object != nullptr); + DCHECK(IsImageBinSlotAssigned(object)); + + LockWord lock_word = object->GetLockWord(false); + size_t offset = lock_word.ForwardingAddress(); // TODO: ForwardingAddress should be uint32_t + DCHECK_LE(offset, std::numeric_limits::max()); + + BinSlot bin_slot(static_cast(offset)); + size_t oat_index = GetOatIndex(object); + const ImageInfo& image_info = GetImageInfo(oat_index); + DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()]); + + return bin_slot; +} + +bool ImageWriter::AllocMemory() { + for (ImageInfo& image_info : image_infos_) { + ImageSection unused_sections[ImageHeader::kSectionCount]; + const size_t length = RoundUp( + image_info.CreateImageSections(unused_sections), kPageSize); + + std::string error_msg; + image_info.image_.reset(MemMap::MapAnonymous("image writer image", + nullptr, + length, + PROT_READ | PROT_WRITE, + false, + false, + &error_msg)); + if (UNLIKELY(image_info.image_.get() == nullptr)) { + LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg; + return false; + } + + // Create the image bitmap, only needs to cover mirror object section which is up to image_end_. + CHECK_LE(image_info.image_end_, length); + image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create( + "image bitmap", image_info.image_->Begin(), RoundUp(image_info.image_end_, kPageSize))); + if (image_info.image_bitmap_.get() == nullptr) { + LOG(ERROR) << "Failed to allocate memory for image bitmap"; + return false; + } + } + return true; +} + +class ComputeLazyFieldsForClassesVisitor : public ClassVisitor { + public: + bool operator()(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + StackHandleScope<1> hs(Thread::Current()); + mirror::Class::ComputeName(hs.NewHandle(c)); + return true; + } +}; + +void ImageWriter::ComputeLazyFieldsForImageClasses() { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + ComputeLazyFieldsForClassesVisitor visitor; + class_linker->VisitClassesWithoutClassesLock(&visitor); +} + +static bool IsBootClassLoaderClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + return klass->GetClassLoader() == nullptr; +} + +bool ImageWriter::IsBootClassLoaderNonImageClass(mirror::Class* klass) { + return IsBootClassLoaderClass(klass) && !IsInBootImage(klass); +} + +bool ImageWriter::PruneAppImageClass(mirror::Class* klass) { + bool early_exit = false; + std::unordered_set visited; + return PruneAppImageClassInternal(klass, &early_exit, &visited); +} + +bool ImageWriter::PruneAppImageClassInternal( + mirror::Class* klass, + bool* early_exit, + std::unordered_set* visited) { + DCHECK(early_exit != nullptr); + DCHECK(visited != nullptr); + DCHECK(compile_app_image_); + if (klass == nullptr || IsInBootImage(klass)) { + return false; + } + auto found = prune_class_memo_.find(klass); + if (found != prune_class_memo_.end()) { + // Already computed, return the found value. + return found->second; + } + // Circular dependencies, return false but do not store the result in the memoization table. + if (visited->find(klass) != visited->end()) { + *early_exit = true; + return false; + } + visited->emplace(klass); + bool result = IsBootClassLoaderClass(klass); + std::string temp; + // Prune if not an image class, this handles any broken sets of image classes such as having a + // class in the set but not it's superclass. + result = result || !compiler_driver_.IsImageClass(klass->GetDescriptor(&temp)); + bool my_early_exit = false; // Only for ourselves, ignore caller. + // Remove classes that failed to verify since we don't want to have java.lang.VerifyError in the + // app image. + if (klass->GetStatus() == mirror::Class::kStatusError) { + result = true; + } else { + CHECK(klass->GetVerifyError() == nullptr) << PrettyClass(klass); + } + if (!result) { + // Check interfaces since these wont be visited through VisitReferences.) + mirror::IfTable* if_table = klass->GetIfTable(); + for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) { + result = result || PruneAppImageClassInternal(if_table->GetInterface(i), + &my_early_exit, + visited); + } + } + if (klass->IsObjectArrayClass()) { + result = result || PruneAppImageClassInternal(klass->GetComponentType(), + &my_early_exit, + visited); + } + // Check static fields and their classes. + size_t num_static_fields = klass->NumReferenceStaticFields(); + if (num_static_fields != 0 && klass->IsResolved()) { + // Presumably GC can happen when we are cross compiling, it should not cause performance + // problems to do pointer size logic. + MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset( + Runtime::Current()->GetClassLinker()->GetImagePointerSize()); + for (size_t i = 0u; i < num_static_fields; ++i) { + mirror::Object* ref = klass->GetFieldObject(field_offset); + if (ref != nullptr) { + if (ref->IsClass()) { + result = result || PruneAppImageClassInternal(ref->AsClass(), + &my_early_exit, + visited); + } else { + result = result || PruneAppImageClassInternal(ref->GetClass(), + &my_early_exit, + visited); + } + } + field_offset = MemberOffset(field_offset.Uint32Value() + + sizeof(mirror::HeapReference)); + } + } + result = result || PruneAppImageClassInternal(klass->GetSuperClass(), + &my_early_exit, + visited); + // Remove the class if the dex file is not in the set of dex files. This happens for classes that + // are from uses library if there is no profile. b/30688277 + mirror::DexCache* dex_cache = klass->GetDexCache(); + if (dex_cache != nullptr) { + result = result || + dex_file_oat_index_map_.find(dex_cache->GetDexFile()) == dex_file_oat_index_map_.end(); + } + // Erase the element we stored earlier since we are exiting the function. + auto it = visited->find(klass); + DCHECK(it != visited->end()); + visited->erase(it); + // Only store result if it is true or none of the calls early exited due to circular + // dependencies. If visited is empty then we are the root caller, in this case the cycle was in + // a child call and we can remember the result. + if (result == true || !my_early_exit || visited->empty()) { + prune_class_memo_[klass] = result; + } + *early_exit |= my_early_exit; + return result; +} + +bool ImageWriter::KeepClass(Class* klass) { + if (klass == nullptr) { + return false; + } + if (compile_app_image_ && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) { + // Already in boot image, return true. + return true; + } + std::string temp; + if (!compiler_driver_.IsImageClass(klass->GetDescriptor(&temp))) { + return false; + } + if (compile_app_image_) { + // For app images, we need to prune boot loader classes that are not in the boot image since + // these may have already been loaded when the app image is loaded. + // Keep classes in the boot image space since we don't want to re-resolve these. + return !PruneAppImageClass(klass); + } + return true; +} + +class NonImageClassesVisitor : public ClassVisitor { + public: + explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {} + + bool operator()(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (!image_writer_->KeepClass(klass)) { + classes_to_prune_.insert(klass); + } + return true; + } + + std::unordered_set classes_to_prune_; + ImageWriter* const image_writer_; +}; + +void ImageWriter::PruneNonImageClasses() { + Runtime* runtime = Runtime::Current(); + ClassLinker* class_linker = runtime->GetClassLinker(); + Thread* self = Thread::Current(); + + // Clear class table strong roots so that dex caches can get pruned. We require pruning the class + // path dex caches. + class_linker->ClearClassTableStrongRoots(); + + // Make a list of classes we would like to prune. + NonImageClassesVisitor visitor(this); + class_linker->VisitClasses(&visitor); + + // Remove the undesired classes from the class roots. + VLOG(compiler) << "Pruning " << visitor.classes_to_prune_.size() << " classes"; + for (mirror::Class* klass : visitor.classes_to_prune_) { + std::string temp; + const char* name = klass->GetDescriptor(&temp); + VLOG(compiler) << "Pruning class " << name; + if (!compile_app_image_) { + DCHECK(IsBootClassLoaderClass(klass)); + } + bool result = class_linker->RemoveClass(name, klass->GetClassLoader()); + DCHECK(result); + } + + // Clear references to removed classes from the DexCaches. + ArtMethod* resolution_method = runtime->GetResolutionMethod(); + + ScopedAssertNoThreadSuspension sa(self, __FUNCTION__); + ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable + ReaderMutexLock mu2(self, *class_linker->DexLock()); + for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { + if (self->IsJWeakCleared(data.weak_root)) { + continue; + } + mirror::DexCache* dex_cache = self->DecodeJObject(data.weak_root)->AsDexCache(); + for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { + Class* klass = dex_cache->GetResolvedType(i); + if (klass != nullptr && !KeepClass(klass)) { + dex_cache->SetResolvedType(i, nullptr); + } + } + ArtMethod** resolved_methods = dex_cache->GetResolvedMethods(); + for (size_t i = 0, num = dex_cache->NumResolvedMethods(); i != num; ++i) { + ArtMethod* method = + mirror::DexCache::GetElementPtrSize(resolved_methods, i, target_ptr_size_); + DCHECK(method != nullptr) << "Expected resolution method instead of null method"; + mirror::Class* declaring_class = method->GetDeclaringClass(); + // Copied methods may be held live by a class which was not an image class but have a + // declaring class which is an image class. Set it to the resolution method to be safe and + // prevent dangling pointers. + if (method->IsCopied() || !KeepClass(declaring_class)) { + mirror::DexCache::SetElementPtrSize(resolved_methods, + i, + resolution_method, + target_ptr_size_); + } else { + // Check that the class is still in the classes table. + DCHECK(class_linker->ClassInClassTable(declaring_class)) << "Class " + << PrettyClass(declaring_class) << " not in class linker table"; + } + } + ArtField** resolved_fields = dex_cache->GetResolvedFields(); + for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { + ArtField* field = mirror::DexCache::GetElementPtrSize(resolved_fields, i, target_ptr_size_); + if (field != nullptr && !KeepClass(field->GetDeclaringClass())) { + dex_cache->SetResolvedField(i, nullptr, target_ptr_size_); + } + } + // Clean the dex field. It might have been populated during the initialization phase, but + // contains data only valid during a real run. + dex_cache->SetFieldObject(mirror::DexCache::DexOffset(), nullptr); + } + + // Drop the array class cache in the ClassLinker, as these are roots holding those classes live. + class_linker->DropFindArrayClassCache(); + + // Clear to save RAM. + prune_class_memo_.clear(); +} + +void ImageWriter::CheckNonImageClassesRemoved() { + if (compiler_driver_.GetImageClasses() != nullptr) { + gc::Heap* heap = Runtime::Current()->GetHeap(); + heap->VisitObjects(CheckNonImageClassesRemovedCallback, this); + } +} + +void ImageWriter::CheckNonImageClassesRemovedCallback(Object* obj, void* arg) { + ImageWriter* image_writer = reinterpret_cast(arg); + if (obj->IsClass() && !image_writer->IsInBootImage(obj)) { + Class* klass = obj->AsClass(); + if (!image_writer->KeepClass(klass)) { + image_writer->DumpImageClasses(); + std::string temp; + CHECK(image_writer->KeepClass(klass)) << klass->GetDescriptor(&temp) + << " " << PrettyDescriptor(klass); + } + } +} + +void ImageWriter::DumpImageClasses() { + auto image_classes = compiler_driver_.GetImageClasses(); + CHECK(image_classes != nullptr); + for (const std::string& image_class : *image_classes) { + LOG(INFO) << " " << image_class; + } +} + +mirror::String* ImageWriter::FindInternedString(mirror::String* string) { + Thread* const self = Thread::Current(); + for (const ImageInfo& image_info : image_infos_) { + mirror::String* const found = image_info.intern_table_->LookupStrong(self, string); + DCHECK(image_info.intern_table_->LookupWeak(self, string) == nullptr) + << string->ToModifiedUtf8(); + if (found != nullptr) { + return found; + } + } + if (compile_app_image_) { + Runtime* const runtime = Runtime::Current(); + mirror::String* found = runtime->GetInternTable()->LookupStrong(self, string); + // If we found it in the runtime intern table it could either be in the boot image or interned + // during app image compilation. If it was in the boot image return that, otherwise return null + // since it belongs to another image space. + if (found != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(found)) { + return found; + } + DCHECK(runtime->GetInternTable()->LookupWeak(self, string) == nullptr) + << string->ToModifiedUtf8(); + } + return nullptr; +} + + +ObjectArray* ImageWriter::CreateImageRoots(size_t oat_index) const { + Runtime* runtime = Runtime::Current(); + ClassLinker* class_linker = runtime->GetClassLinker(); + Thread* self = Thread::Current(); + StackHandleScope<3> hs(self); + Handle object_array_class(hs.NewHandle( + class_linker->FindSystemClass(self, "[Ljava/lang/Object;"))); + + std::unordered_set image_dex_files; + for (auto& pair : dex_file_oat_index_map_) { + const DexFile* image_dex_file = pair.first; + size_t image_oat_index = pair.second; + if (oat_index == image_oat_index) { + image_dex_files.insert(image_dex_file); + } + } + + // build an Object[] of all the DexCaches used in the source_space_. + // Since we can't hold the dex lock when allocating the dex_caches + // ObjectArray, we lock the dex lock twice, first to get the number + // of dex caches first and then lock it again to copy the dex + // caches. We check that the number of dex caches does not change. + size_t dex_cache_count = 0; + { + ReaderMutexLock mu(self, *class_linker->DexLock()); + // Count number of dex caches not in the boot image. + for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { + mirror::DexCache* dex_cache = + down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr) { + continue; + } + const DexFile* dex_file = dex_cache->GetDexFile(); + if (!IsInBootImage(dex_cache)) { + dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; + } + } + } + Handle> dex_caches( + hs.NewHandle(ObjectArray::Alloc(self, object_array_class.Get(), dex_cache_count))); + CHECK(dex_caches.Get() != nullptr) << "Failed to allocate a dex cache array."; + { + ReaderMutexLock mu(self, *class_linker->DexLock()); + size_t non_image_dex_caches = 0; + // Re-count number of non image dex caches. + for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { + mirror::DexCache* dex_cache = + down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr) { + continue; + } + const DexFile* dex_file = dex_cache->GetDexFile(); + if (!IsInBootImage(dex_cache)) { + non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; + } + } + CHECK_EQ(dex_cache_count, non_image_dex_caches) + << "The number of non-image dex caches changed."; + size_t i = 0; + for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { + mirror::DexCache* dex_cache = + down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr) { + continue; + } + const DexFile* dex_file = dex_cache->GetDexFile(); + if (!IsInBootImage(dex_cache) && image_dex_files.find(dex_file) != image_dex_files.end()) { + dex_caches->Set(i, dex_cache); + ++i; + } + } + } + + // build an Object[] of the roots needed to restore the runtime + auto image_roots(hs.NewHandle( + ObjectArray::Alloc(self, object_array_class.Get(), ImageHeader::kImageRootsMax))); + image_roots->Set(ImageHeader::kDexCaches, dex_caches.Get()); + image_roots->Set(ImageHeader::kClassRoots, class_linker->GetClassRoots()); + for (int i = 0; i < ImageHeader::kImageRootsMax; i++) { + CHECK(image_roots->Get(i) != nullptr); + } + return image_roots.Get(); +} + +mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack, + mirror::Object* obj, + size_t oat_index) { + if (obj == nullptr || IsInBootImage(obj)) { + // Object is null or already in the image, there is no work to do. + return obj; + } + if (!IsImageBinSlotAssigned(obj)) { + // We want to intern all strings but also assign offsets for the source string. Since the + // pruning phase has already happened, if we intern a string to one in the image we still + // end up copying an unreachable string. + if (obj->IsString()) { + // Need to check if the string is already interned in another image info so that we don't have + // the intern tables of two different images contain the same string. + mirror::String* interned = FindInternedString(obj->AsString()); + if (interned == nullptr) { + // Not in another image space, insert to our table. + interned = GetImageInfo(oat_index).intern_table_->InternStrongImageString(obj->AsString()); + DCHECK_EQ(interned, obj); + } + } else if (obj->IsDexCache()) { + oat_index = GetOatIndexForDexCache(obj->AsDexCache()); + } else if (obj->IsClass()) { + // Visit and assign offsets for fields and field arrays. + mirror::Class* as_klass = obj->AsClass(); + mirror::DexCache* dex_cache = as_klass->GetDexCache(); + DCHECK_NE(as_klass->GetStatus(), mirror::Class::kStatusError); + if (compile_app_image_) { + // Extra sanity, no boot loader classes should be left! + CHECK(!IsBootClassLoaderClass(as_klass)) << PrettyClass(as_klass); + } + LengthPrefixedArray* fields[] = { + as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(), + }; + // Overwrite the oat index value since the class' dex cache is more accurate of where it + // belongs. + oat_index = GetOatIndexForDexCache(dex_cache); + ImageInfo& image_info = GetImageInfo(oat_index); + { + // Note: This table is only accessed from the image writer, avoid locking to prevent lock + // order violations from root visiting. + image_info.class_table_->InsertWithoutLocks(as_klass); + } + for (LengthPrefixedArray* cur_fields : fields) { + // Total array length including header. + if (cur_fields != nullptr) { + const size_t header_size = LengthPrefixedArray::ComputeSize(0); + // Forward the entire array at once. + auto it = native_object_relocations_.find(cur_fields); + CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields + << " already forwarded"; + size_t& offset = image_info.bin_slot_sizes_[kBinArtField]; + DCHECK(!IsInBootImage(cur_fields)); + native_object_relocations_.emplace( + cur_fields, + NativeObjectRelocation { + oat_index, offset, kNativeObjectRelocationTypeArtFieldArray + }); + offset += header_size; + // Forward individual fields so that we can quickly find where they belong. + for (size_t i = 0, count = cur_fields->size(); i < count; ++i) { + // Need to forward arrays separate of fields. + ArtField* field = &cur_fields->At(i); + auto it2 = native_object_relocations_.find(field); + CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i + << " already assigned " << PrettyField(field) << " static=" << field->IsStatic(); + DCHECK(!IsInBootImage(field)); + native_object_relocations_.emplace( + field, + NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeArtField }); + offset += sizeof(ArtField); + } + } + } + // Visit and assign offsets for methods. + size_t num_methods = as_klass->NumMethods(); + if (num_methods != 0) { + bool any_dirty = false; + for (auto& m : as_klass->GetMethods(target_ptr_size_)) { + if (WillMethodBeDirty(&m)) { + any_dirty = true; + break; + } + } + NativeObjectRelocationType type = any_dirty + ? kNativeObjectRelocationTypeArtMethodDirty + : kNativeObjectRelocationTypeArtMethodClean; + Bin bin_type = BinTypeForNativeRelocationType(type); + // Forward the entire array at once, but header first. + const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); + const size_t method_size = ArtMethod::Size(target_ptr_size_); + const size_t header_size = LengthPrefixedArray::ComputeSize(0, + method_size, + method_alignment); + LengthPrefixedArray* array = as_klass->GetMethodsPtr(); + auto it = native_object_relocations_.find(array); + CHECK(it == native_object_relocations_.end()) + << "Method array " << array << " already forwarded"; + size_t& offset = image_info.bin_slot_sizes_[bin_type]; + DCHECK(!IsInBootImage(array)); + native_object_relocations_.emplace(array, + NativeObjectRelocation { + oat_index, + offset, + any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty + : kNativeObjectRelocationTypeArtMethodArrayClean }); + offset += header_size; + for (auto& m : as_klass->GetMethods(target_ptr_size_)) { + AssignMethodOffset(&m, type, oat_index); + } + (any_dirty ? dirty_methods_ : clean_methods_) += num_methods; + } + // Assign offsets for all runtime methods in the IMT since these may hold conflict tables + // live. + if (as_klass->ShouldHaveImt()) { + ImTable* imt = as_klass->GetImt(target_ptr_size_); + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* imt_method = imt->Get(i, target_ptr_size_); + DCHECK(imt_method != nullptr); + if (imt_method->IsRuntimeMethod() && + !IsInBootImage(imt_method) && + !NativeRelocationAssigned(imt_method)) { + AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index); + } + } + } + + if (as_klass->ShouldHaveImt()) { + ImTable* imt = as_klass->GetImt(target_ptr_size_); + TryAssignImTableOffset(imt, oat_index); + } + } else if (obj->IsClassLoader()) { + // Register the class loader if it has a class table. + // The fake boot class loader should not get registered and we should end up with only one + // class loader. + mirror::ClassLoader* class_loader = obj->AsClassLoader(); + if (class_loader->GetClassTable() != nullptr) { + class_loaders_.insert(class_loader); + } + } + AssignImageBinSlot(obj, oat_index); + work_stack.emplace(obj, oat_index); + } + if (obj->IsString()) { + // Always return the interned string if there exists one. + mirror::String* interned = FindInternedString(obj->AsString()); + if (interned != nullptr) { + return interned; + } + } + return obj; +} + +bool ImageWriter::NativeRelocationAssigned(void* ptr) const { + return native_object_relocations_.find(ptr) != native_object_relocations_.end(); +} + +void ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) { + // No offset, or already assigned. + if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) { + return; + } + // If the method is a conflict method we also want to assign the conflict table offset. + ImageInfo& image_info = GetImageInfo(oat_index); + const size_t size = ImTable::SizeInBytes(target_ptr_size_); + native_object_relocations_.emplace( + imt, + NativeObjectRelocation { + oat_index, + image_info.bin_slot_sizes_[kBinImTable], + kNativeObjectRelocationTypeIMTable}); + image_info.bin_slot_sizes_[kBinImTable] += size; +} + +void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { + // No offset, or already assigned. + if (table == nullptr || NativeRelocationAssigned(table)) { + return; + } + CHECK(!IsInBootImage(table)); + // If the method is a conflict method we also want to assign the conflict table offset. + ImageInfo& image_info = GetImageInfo(oat_index); + const size_t size = table->ComputeSize(target_ptr_size_); + native_object_relocations_.emplace( + table, + NativeObjectRelocation { + oat_index, + image_info.bin_slot_sizes_[kBinIMTConflictTable], + kNativeObjectRelocationTypeIMTConflictTable}); + image_info.bin_slot_sizes_[kBinIMTConflictTable] += size; +} + +void ImageWriter::AssignMethodOffset(ArtMethod* method, + NativeObjectRelocationType type, + size_t oat_index) { + DCHECK(!IsInBootImage(method)); + CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned " + << PrettyMethod(method); + if (method->IsRuntimeMethod()) { + TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index); + } + ImageInfo& image_info = GetImageInfo(oat_index); + size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)]; + native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type }); + offset += ArtMethod::Size(target_ptr_size_); +} + +void ImageWriter::EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg) { + ImageWriter* writer = reinterpret_cast(arg); + DCHECK(writer != nullptr); + if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) { + CHECK(writer->IsImageBinSlotAssigned(obj)) << PrettyTypeOf(obj) << " " << obj; + } +} + +void ImageWriter::DeflateMonitorCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) { + Monitor::Deflate(Thread::Current(), obj); +} + +void ImageWriter::UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) { + ImageWriter* writer = reinterpret_cast(arg); + DCHECK(writer != nullptr); + if (!writer->IsInBootImage(obj)) { + writer->UnbinObjectsIntoOffset(obj); + } +} + +void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) { + DCHECK(!IsInBootImage(obj)); + CHECK(obj != nullptr); + + // We know the bin slot, and the total bin sizes for all objects by now, + // so calculate the object's final image offset. + + DCHECK(IsImageBinSlotAssigned(obj)); + BinSlot bin_slot = GetImageBinSlot(obj); + // Change the lockword from a bin slot into an offset + AssignImageOffset(obj, bin_slot); +} + +class ImageWriter::VisitReferencesVisitor { + public: + VisitReferencesVisitor(ImageWriter* image_writer, WorkStack* work_stack, size_t oat_index) + : image_writer_(image_writer), work_stack_(work_stack), oat_index_(oat_index) {} + + // Fix up separately since we also need to fix up method entrypoints. + ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + if (!root->IsNull()) { + VisitRoot(root); + } + } + + ALWAYS_INLINE void VisitRoot(mirror::CompressedReference* root) const + SHARED_REQUIRES(Locks::mutator_lock_) { + root->Assign(VisitReference(root->AsMirrorPtr())); + } + + ALWAYS_INLINE void operator() (mirror::Object* obj, + MemberOffset offset, + bool is_static ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::Object* ref = + obj->GetFieldObject(offset); + obj->SetFieldObject(offset, VisitReference(ref)); + } + + ALWAYS_INLINE void operator() (mirror::Class* klass ATTRIBUTE_UNUSED, + mirror::Reference* ref) const + SHARED_REQUIRES(Locks::mutator_lock_) { + ref->SetReferent( + VisitReference(ref->GetReferent())); + } + + private: + mirror::Object* VisitReference(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) { + return image_writer_->TryAssignBinSlot(*work_stack_, ref, oat_index_); + } + + ImageWriter* const image_writer_; + WorkStack* const work_stack_; + const size_t oat_index_; +}; + +class ImageWriter::GetRootsVisitor : public RootVisitor { + public: + explicit GetRootsVisitor(std::vector* roots) : roots_(roots) {} + + void VisitRoots(mirror::Object*** roots, + size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { + for (size_t i = 0; i < count; ++i) { + roots_->push_back(*roots[i]); + } + } + + void VisitRoots(mirror::CompressedReference** roots, + size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE + SHARED_REQUIRES(Locks::mutator_lock_) { + for (size_t i = 0; i < count; ++i) { + roots_->push_back(roots[i]->AsMirrorPtr()); + } + } + + private: + std::vector* const roots_; +}; + +void ImageWriter::ProcessWorkStack(WorkStack* work_stack) { + while (!work_stack->empty()) { + std::pair pair(work_stack->top()); + work_stack->pop(); + VisitReferencesVisitor visitor(this, work_stack, /*oat_index*/ pair.second); + // Walk references and assign bin slots for them. + pair.first->VisitReferences( + visitor, + visitor); + } +} + +void ImageWriter::CalculateNewObjectOffsets() { + Thread* const self = Thread::Current(); + StackHandleScopeCollection handles(self); + std::vector>> image_roots; + for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) { + image_roots.push_back(handles.NewHandle(CreateImageRoots(i))); + } + + Runtime* const runtime = Runtime::Current(); + gc::Heap* const heap = runtime->GetHeap(); + + // Leave space for the header, but do not write it yet, we need to + // know where image_roots is going to end up + image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment + + const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); + // Write the image runtime methods. + image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod(); + image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod(); + image_methods_[ImageHeader::kImtUnimplementedMethod] = runtime->GetImtUnimplementedMethod(); + image_methods_[ImageHeader::kCalleeSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kSaveAll); + image_methods_[ImageHeader::kRefsOnlySaveMethod] = + runtime->GetCalleeSaveMethod(Runtime::kRefsOnly); + image_methods_[ImageHeader::kRefsAndArgsSaveMethod] = + runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs); + // Visit image methods first to have the main runtime methods in the first image. + for (auto* m : image_methods_) { + CHECK(m != nullptr); + CHECK(m->IsRuntimeMethod()); + DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image"; + if (!IsInBootImage(m)) { + AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex()); + } + } + + // Deflate monitors before we visit roots since deflating acquires the monitor lock. Acquiring + // this lock while holding other locks may cause lock order violations. + heap->VisitObjects(DeflateMonitorCallback, this); + + // Work list of for objects. Everything on the stack must already be + // assigned a bin slot. + WorkStack work_stack; + + // Special case interned strings to put them in the image they are likely to be resolved from. + for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) { + auto it = dex_file_oat_index_map_.find(dex_file); + DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation(); + const size_t oat_index = it->second; + InternTable* const intern_table = runtime->GetInternTable(); + for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) { + uint32_t utf16_length; + const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(i, &utf16_length); + mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data); + TryAssignBinSlot(work_stack, string, oat_index); + } + } + + // Get the GC roots and then visit them separately to avoid lock violations since the root visitor + // visits roots while holding various locks. + { + std::vector roots; + GetRootsVisitor root_visitor(&roots); + runtime->VisitRoots(&root_visitor); + for (mirror::Object* obj : roots) { + TryAssignBinSlot(work_stack, obj, GetDefaultOatIndex()); + } + } + ProcessWorkStack(&work_stack); + + // For app images, there may be objects that are only held live by the by the boot image. One + // example is finalizer references. Forward these objects so that EnsureBinSlotAssignedCallback + // does not fail any checks. TODO: We should probably avoid copying these objects. + if (compile_app_image_) { + for (gc::space::ImageSpace* space : heap->GetBootImageSpaces()) { + DCHECK(space->IsImageSpace()); + gc::accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); + live_bitmap->VisitMarkedRange(reinterpret_cast(space->Begin()), + reinterpret_cast(space->Limit()), + [this, &work_stack](mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_) { + VisitReferencesVisitor visitor(this, &work_stack, GetDefaultOatIndex()); + // Visit all references and try to assign bin slots for them (calls TryAssignBinSlot). + obj->VisitReferences( + visitor, + visitor); + }); + } + // Process the work stack in case anything was added by TryAssignBinSlot. + ProcessWorkStack(&work_stack); + } + + // Verify that all objects have assigned image bin slots. + heap->VisitObjects(EnsureBinSlotAssignedCallback, this); + + // Calculate size of the dex cache arrays slot and prepare offsets. + PrepareDexCacheArraySlots(); + + // Calculate the sizes of the intern tables and class tables. + for (ImageInfo& image_info : image_infos_) { + // Calculate how big the intern table will be after being serialized. + InternTable* const intern_table = image_info.intern_table_.get(); + CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings"; + image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr); + // Calculate the size of the class table. + ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); + image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr); + } + + // Calculate bin slot offsets. + for (ImageInfo& image_info : image_infos_) { + size_t bin_offset = image_objects_offset_begin_; + for (size_t i = 0; i != kBinSize; ++i) { + switch (i) { + case kBinArtMethodClean: + case kBinArtMethodDirty: { + bin_offset = RoundUp(bin_offset, method_alignment); + break; + } + case kBinImTable: + case kBinIMTConflictTable: { + bin_offset = RoundUp(bin_offset, target_ptr_size_); + break; + } + default: { + // Normal alignment. + } + } + image_info.bin_slot_offsets_[i] = bin_offset; + bin_offset += image_info.bin_slot_sizes_[i]; + } + // NOTE: There may be additional padding between the bin slots and the intern table. + DCHECK_EQ(image_info.image_end_, + GetBinSizeSum(image_info, kBinMirrorCount) + image_objects_offset_begin_); + } + + // Calculate image offsets. + size_t image_offset = 0; + for (ImageInfo& image_info : image_infos_) { + image_info.image_begin_ = global_image_begin_ + image_offset; + image_info.image_offset_ = image_offset; + ImageSection unused_sections[ImageHeader::kSectionCount]; + image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize); + // There should be no gaps until the next image. + image_offset += image_info.image_size_; + } + + // Transform each object's bin slot into an offset which will be used to do the final copy. + heap->VisitObjects(UnbinObjectsIntoOffsetCallback, this); + + // DCHECK_EQ(image_end_, GetBinSizeSum(kBinMirrorCount) + image_objects_offset_begin_); + + size_t i = 0; + for (ImageInfo& image_info : image_infos_) { + image_info.image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots[i].Get())); + i++; + } + + // Update the native relocations by adding their bin sums. + for (auto& pair : native_object_relocations_) { + NativeObjectRelocation& relocation = pair.second; + Bin bin_type = BinTypeForNativeRelocationType(relocation.type); + ImageInfo& image_info = GetImageInfo(relocation.oat_index); + relocation.offset += image_info.bin_slot_offsets_[bin_type]; + } + + // Note that image_info.image_end_ is left at end of used mirror object section. +} + +size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const { + DCHECK(out_sections != nullptr); + + // Do not round up any sections here that are represented by the bins since it will break + // offsets. + + // Objects section + ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects]; + *objects_section = ImageSection(0u, image_end_); + + // Add field section. + ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields]; + *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]); + CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset()); + + // Add method section. + ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; + *methods_section = ImageSection( + bin_slot_offsets_[kBinArtMethodClean], + bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); + + // IMT section. + ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables]; + *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]); + + // Conflict tables section. + ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; + *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], + bin_slot_sizes_[kBinIMTConflictTable]); + + // Runtime methods section. + ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods]; + *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod], + bin_slot_sizes_[kBinRuntimeMethod]); + + // Add dex cache arrays section. + ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; + *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray], + bin_slot_sizes_[kBinDexCacheArray]); + + // Round up to the alignment the string table expects. See HashSet::WriteToMemory. + size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t)); + // Calculate the size of the interned strings. + ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; + *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); + cur_pos = interned_strings_section->End(); + // Round up to the alignment the class table expects. See HashSet::WriteToMemory. + cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); + // Calculate the size of the class table section. + ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; + *class_table_section = ImageSection(cur_pos, class_table_bytes_); + cur_pos = class_table_section->End(); + // Image end goes right before the start of the image bitmap. + return cur_pos; +} + +void ImageWriter::CreateHeader(size_t oat_index) { + ImageInfo& image_info = GetImageInfo(oat_index); + const uint8_t* oat_file_begin = image_info.oat_file_begin_; + const uint8_t* oat_file_end = oat_file_begin + image_info.oat_loaded_size_; + const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_; + + // Create the image sections. + ImageSection sections[ImageHeader::kSectionCount]; + const size_t image_end = image_info.CreateImageSections(sections); + + // Finally bitmap section. + const size_t bitmap_bytes = image_info.image_bitmap_->Size(); + auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap]; + *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize)); + if (VLOG_IS_ON(compiler)) { + LOG(INFO) << "Creating header for " << oat_filenames_[oat_index]; + size_t idx = 0; + for (const ImageSection& section : sections) { + LOG(INFO) << static_cast(idx) << " " << section; + ++idx; + } + LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_; + LOG(INFO) << "Image roots address=" << std::hex << image_info.image_roots_address_ << std::dec; + LOG(INFO) << "Image begin=" << std::hex << reinterpret_cast(global_image_begin_) + << " Image offset=" << image_info.image_offset_ << std::dec; + LOG(INFO) << "Oat file begin=" << std::hex << reinterpret_cast(oat_file_begin) + << " Oat data begin=" << reinterpret_cast(image_info.oat_data_begin_) + << " Oat data end=" << reinterpret_cast(oat_data_end) + << " Oat file end=" << reinterpret_cast(oat_file_end); + } + // Store boot image info for app image so that we can relocate. + uint32_t boot_image_begin = 0; + uint32_t boot_image_end = 0; + uint32_t boot_oat_begin = 0; + uint32_t boot_oat_end = 0; + gc::Heap* const heap = Runtime::Current()->GetHeap(); + heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end); + + // Create the header, leave 0 for data size since we will fill this in as we are writing the + // image. + new (image_info.image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_info.image_begin_), + image_end, + sections, + image_info.image_roots_address_, + image_info.oat_checksum_, + PointerToLowMemUInt32(oat_file_begin), + PointerToLowMemUInt32(image_info.oat_data_begin_), + PointerToLowMemUInt32(oat_data_end), + PointerToLowMemUInt32(oat_file_end), + boot_image_begin, + boot_image_end - boot_image_begin, + boot_oat_begin, + boot_oat_end - boot_oat_begin, + target_ptr_size_, + compile_pic_, + /*is_pic*/compile_app_image_, + image_storage_mode_, + /*data_size*/0u); +} + +ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) { + auto it = native_object_relocations_.find(method); + CHECK(it != native_object_relocations_.end()) << PrettyMethod(method) << " @ " << method; + size_t oat_index = GetOatIndex(method->GetDexCache()); + ImageInfo& image_info = GetImageInfo(oat_index); + CHECK_GE(it->second.offset, image_info.image_end_) << "ArtMethods should be after Objects"; + return reinterpret_cast(image_info.image_begin_ + it->second.offset); +} + +class FixupRootVisitor : public RootVisitor { + public: + explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) { + } + + void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + for (size_t i = 0; i < count; ++i) { + *roots[i] = image_writer_->GetImageAddress(*roots[i]); + } + } + + void VisitRoots(mirror::CompressedReference** roots, size_t count, + const RootInfo& info ATTRIBUTE_UNUSED) + OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + for (size_t i = 0; i < count; ++i) { + roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr())); + } + } + + private: + ImageWriter* const image_writer_; +}; + +void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) { + for (size_t i = 0; i < ImTable::kSize; ++i) { + ArtMethod* method = orig->Get(i, target_ptr_size_); + copy->Set(i, NativeLocationInImage(method), target_ptr_size_); + } +} + +void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { + const size_t count = orig->NumEntries(target_ptr_size_); + for (size_t i = 0; i < count; ++i) { + ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_); + ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_); + copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method)); + copy->SetImplementationMethod(i, + target_ptr_size_, + NativeLocationInImage(implementation_method)); + } +} + +void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { + const ImageInfo& image_info = GetImageInfo(oat_index); + // Copy ArtFields and methods to their locations and update the array for convenience. + for (auto& pair : native_object_relocations_) { + NativeObjectRelocation& relocation = pair.second; + // Only work with fields and methods that are in the current oat file. + if (relocation.oat_index != oat_index) { + continue; + } + auto* dest = image_info.image_->Begin() + relocation.offset; + DCHECK_GE(dest, image_info.image_->Begin() + image_info.image_end_); + DCHECK(!IsInBootImage(pair.first)); + switch (relocation.type) { + case kNativeObjectRelocationTypeArtField: { + memcpy(dest, pair.first, sizeof(ArtField)); + reinterpret_cast(dest)->SetDeclaringClass( + GetImageAddress(reinterpret_cast(pair.first)->GetDeclaringClass())); + break; + } + case kNativeObjectRelocationTypeRuntimeMethod: + case kNativeObjectRelocationTypeArtMethodClean: + case kNativeObjectRelocationTypeArtMethodDirty: { + CopyAndFixupMethod(reinterpret_cast(pair.first), + reinterpret_cast(dest), + image_info); + break; + } + // For arrays, copy just the header since the elements will get copied by their corresponding + // relocations. + case kNativeObjectRelocationTypeArtFieldArray: { + memcpy(dest, pair.first, LengthPrefixedArray::ComputeSize(0)); + break; + } + case kNativeObjectRelocationTypeArtMethodArrayClean: + case kNativeObjectRelocationTypeArtMethodArrayDirty: { + size_t size = ArtMethod::Size(target_ptr_size_); + size_t alignment = ArtMethod::Alignment(target_ptr_size_); + memcpy(dest, pair.first, LengthPrefixedArray::ComputeSize(0, size, alignment)); + // Clear padding to avoid non-deterministic data in the image (and placate valgrind). + reinterpret_cast*>(dest)->ClearPadding(size, alignment); + break; + } + case kNativeObjectRelocationTypeDexCacheArray: + // Nothing to copy here, everything is done in FixupDexCache(). + break; + case kNativeObjectRelocationTypeIMTable: { + ImTable* orig_imt = reinterpret_cast(pair.first); + ImTable* dest_imt = reinterpret_cast(dest); + CopyAndFixupImTable(orig_imt, dest_imt); + break; + } + case kNativeObjectRelocationTypeIMTConflictTable: { + auto* orig_table = reinterpret_cast(pair.first); + CopyAndFixupImtConflictTable( + orig_table, + new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_)); + break; + } + } + } + // Fixup the image method roots. + auto* image_header = reinterpret_cast(image_info.image_->Begin()); + for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) { + ArtMethod* method = image_methods_[i]; + CHECK(method != nullptr); + if (!IsInBootImage(method)) { + method = NativeLocationInImage(method); + } + image_header->SetImageMethod(static_cast(i), method); + } + FixupRootVisitor root_visitor(this); + + // Write the intern table into the image. + if (image_info.intern_table_bytes_ > 0) { + const ImageSection& intern_table_section = image_header->GetImageSection( + ImageHeader::kSectionInternedStrings); + InternTable* const intern_table = image_info.intern_table_.get(); + uint8_t* const intern_table_memory_ptr = + image_info.image_->Begin() + intern_table_section.Offset(); + const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr); + CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_); + // Fixup the pointers in the newly written intern table to contain image addresses. + InternTable temp_intern_table; + // Note that we require that ReadFromMemory does not make an internal copy of the elements so that + // the VisitRoots() will update the memory directly rather than the copies. + // This also relies on visit roots not doing any verification which could fail after we update + // the roots to be the image addresses. + temp_intern_table.AddTableFromMemory(intern_table_memory_ptr); + CHECK_EQ(temp_intern_table.Size(), intern_table->Size()); + temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots); + } + // Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple + // class loaders. Writing multiple class tables into the image is currently unsupported. + if (image_info.class_table_bytes_ > 0u) { + const ImageSection& class_table_section = image_header->GetImageSection( + ImageHeader::kSectionClassTable); + uint8_t* const class_table_memory_ptr = + image_info.image_->Begin() + class_table_section.Offset(); + ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + + ClassTable* table = image_info.class_table_.get(); + CHECK(table != nullptr); + const size_t class_table_bytes = table->WriteToMemory(class_table_memory_ptr); + CHECK_EQ(class_table_bytes, image_info.class_table_bytes_); + // Fixup the pointers in the newly written class table to contain image addresses. See + // above comment for intern tables. + ClassTable temp_class_table; + temp_class_table.ReadFromMemory(class_table_memory_ptr); + CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() + + table->NumZygoteClasses()); + BufferedRootVisitor buffered_visitor(&root_visitor, + RootInfo(kRootUnknown)); + temp_class_table.VisitRoots(buffered_visitor); + } +} + +void ImageWriter::CopyAndFixupObjects() { + gc::Heap* heap = Runtime::Current()->GetHeap(); + heap->VisitObjects(CopyAndFixupObjectsCallback, this); + // Fix up the object previously had hash codes. + for (const auto& hash_pair : saved_hashcode_map_) { + Object* obj = hash_pair.first; + DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0U); + obj->SetLockWord(LockWord::FromHashCode(hash_pair.second, 0U), false); + } + saved_hashcode_map_.clear(); +} + +void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) { + DCHECK(obj != nullptr); + DCHECK(arg != nullptr); + reinterpret_cast(arg)->CopyAndFixupObject(obj); +} + +void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, + mirror::Class* klass, Bin array_type) { + CHECK(klass->IsArrayClass()); + CHECK(arr->IsIntArray() || arr->IsLongArray()) << PrettyClass(klass) << " " << arr; + // Fixup int and long pointers for the ArtMethod or ArtField arrays. + const size_t num_elements = arr->GetLength(); + dst->SetClass(GetImageAddress(arr->GetClass())); + auto* dest_array = down_cast(dst); + for (size_t i = 0, count = num_elements; i < count; ++i) { + void* elem = arr->GetElementPtrSize(i, target_ptr_size_); + if (elem != nullptr && !IsInBootImage(elem)) { + auto it = native_object_relocations_.find(elem); + if (UNLIKELY(it == native_object_relocations_.end())) { + if (it->second.IsArtMethodRelocation()) { + auto* method = reinterpret_cast(elem); + LOG(FATAL) << "No relocation entry for ArtMethod " << PrettyMethod(method) << " @ " + << method << " idx=" << i << "/" << num_elements << " with declaring class " + << PrettyClass(method->GetDeclaringClass()); + } else { + CHECK_EQ(array_type, kBinArtField); + auto* field = reinterpret_cast(elem); + LOG(FATAL) << "No relocation entry for ArtField " << PrettyField(field) << " @ " + << field << " idx=" << i << "/" << num_elements << " with declaring class " + << PrettyClass(field->GetDeclaringClass()); + } + UNREACHABLE(); + } else { + ImageInfo& image_info = GetImageInfo(it->second.oat_index); + elem = image_info.image_begin_ + it->second.offset; + } + } + dest_array->SetElementPtrSize(i, elem, target_ptr_size_); + } +} + +void ImageWriter::CopyAndFixupObject(Object* obj) { + if (IsInBootImage(obj)) { + return; + } + size_t offset = GetImageOffset(obj); + size_t oat_index = GetOatIndex(obj); + ImageInfo& image_info = GetImageInfo(oat_index); + auto* dst = reinterpret_cast(image_info.image_->Begin() + offset); + DCHECK_LT(offset, image_info.image_end_); + const auto* src = reinterpret_cast(obj); + + image_info.image_bitmap_->Set(dst); // Mark the obj as live. + + const size_t n = obj->SizeOf(); + DCHECK_LE(offset + n, image_info.image_->Size()); + memcpy(dst, src, n); + + // Write in a hash code of objects which have inflated monitors or a hash code in their monitor + // word. + const auto it = saved_hashcode_map_.find(obj); + dst->SetLockWord(it != saved_hashcode_map_.end() ? + LockWord::FromHashCode(it->second, 0u) : LockWord::Default(), false); + FixupObject(obj, dst); +} + +// Rewrite all the references in the copied object to point to their image address equivalent +class FixupVisitor { + public: + FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) { + } + + // Ignore class roots since we don't have a way to map them to the destination. These are handled + // with other logic. + void VisitRootIfNonNull(mirror::CompressedReference* root ATTRIBUTE_UNUSED) + const {} + void VisitRoot(mirror::CompressedReference* root ATTRIBUTE_UNUSED) const {} + + + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + Object* ref = obj->GetFieldObject(offset); + // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the + // image. + copy_->SetFieldObjectWithoutWriteBarrier( + offset, + image_writer_->GetImageAddress(ref)); + } + + // java.lang.ref.Reference visitor. + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { + copy_->SetFieldObjectWithoutWriteBarrier( + mirror::Reference::ReferentOffset(), + image_writer_->GetImageAddress(ref->GetReferent())); + } + + protected: + ImageWriter* const image_writer_; + mirror::Object* const copy_; +}; + +class FixupClassVisitor FINAL : public FixupVisitor { + public: + FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) { + } + + void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const + REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { + DCHECK(obj->IsClass()); + FixupVisitor::operator()(obj, offset, /*is_static*/false); + } + + void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, + mirror::Reference* ref ATTRIBUTE_UNUSED) const + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { + LOG(FATAL) << "Reference not expected here."; + } +}; + +uintptr_t ImageWriter::NativeOffsetInImage(void* obj) { + DCHECK(obj != nullptr); + DCHECK(!IsInBootImage(obj)); + auto it = native_object_relocations_.find(obj); + CHECK(it != native_object_relocations_.end()) << obj << " spaces " + << Runtime::Current()->GetHeap()->DumpSpaces(); + const NativeObjectRelocation& relocation = it->second; + return relocation.offset; +} + +template +std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) { + std::ostringstream oss; + oss << ptr; + return oss.str(); +} + +template <> +std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) { + return PrettyMethod(method); +} + +template +T* ImageWriter::NativeLocationInImage(T* obj) { + if (obj == nullptr || IsInBootImage(obj)) { + return obj; + } else { + auto it = native_object_relocations_.find(obj); + CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj) + << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces(); + const NativeObjectRelocation& relocation = it->second; + ImageInfo& image_info = GetImageInfo(relocation.oat_index); + return reinterpret_cast(image_info.image_begin_ + relocation.offset); + } +} + +template +T* ImageWriter::NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) { + if (obj == nullptr || IsInBootImage(obj)) { + return obj; + } else { + size_t oat_index = GetOatIndexForDexCache(dex_cache); + ImageInfo& image_info = GetImageInfo(oat_index); + return reinterpret_cast(image_info.image_->Begin() + NativeOffsetInImage(obj)); + } +} + +class NativeLocationVisitor { + public: + explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {} + + template + T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) { + return image_writer_->NativeLocationInImage(ptr); + } + + private: + ImageWriter* const image_writer_; +}; + +void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) { + orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this)); + FixupClassVisitor visitor(this, copy); + static_cast(orig)->VisitReferences(visitor, visitor); + + // Remove the clinitThreadId. This is required for image determinism. + copy->SetClinitThreadId(static_cast(0)); +} + +void ImageWriter::FixupObject(Object* orig, Object* copy) { + DCHECK(orig != nullptr); + DCHECK(copy != nullptr); + if (kUseBakerOrBrooksReadBarrier) { + orig->AssertReadBarrierPointer(); + if (kUseBrooksReadBarrier) { + // Note the address 'copy' isn't the same as the image address of 'orig'. + copy->SetReadBarrierPointer(GetImageAddress(orig)); + DCHECK_EQ(copy->GetReadBarrierPointer(), GetImageAddress(orig)); + } + } + auto* klass = orig->GetClass(); + if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) { + // Is this a native pointer array? + auto it = pointer_arrays_.find(down_cast(orig)); + if (it != pointer_arrays_.end()) { + // Should only need to fixup every pointer array exactly once. + FixupPointerArray(copy, down_cast(orig), klass, it->second); + pointer_arrays_.erase(it); + return; + } + } + if (orig->IsClass()) { + FixupClass(orig->AsClass(), down_cast(copy)); + } else { + if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) { + // Need to go update the ArtMethod. + auto* dest = down_cast(copy); + auto* src = down_cast(orig); + ArtMethod* src_method = src->GetArtMethod(); + auto it = native_object_relocations_.find(src_method); + CHECK(it != native_object_relocations_.end()) + << "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method); + dest->SetArtMethod( + reinterpret_cast(global_image_begin_ + it->second.offset)); + } else if (!klass->IsArrayClass()) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) { + FixupDexCache(down_cast(orig), down_cast(copy)); + } else if (klass->IsClassLoaderClass()) { + mirror::ClassLoader* copy_loader = down_cast(copy); + // If src is a ClassLoader, set the class table to null so that it gets recreated by the + // ClassLoader. + copy_loader->SetClassTable(nullptr); + // Also set allocator to null to be safe. The allocator is created when we create the class + // table. We also never expect to unload things in the image since they are held live as + // roots. + copy_loader->SetAllocator(nullptr); + } + } + FixupVisitor visitor(this, copy); + orig->VisitReferences(visitor, visitor); + } +} + + +class ImageAddressVisitor { + public: + explicit ImageAddressVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {} + + template + T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) { + return image_writer_->GetImageAddress(ptr); + } + + private: + ImageWriter* const image_writer_; +}; + + +void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache, + mirror::DexCache* copy_dex_cache) { + // Though the DexCache array fields are usually treated as native pointers, we set the full + // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is + // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e. + // static_cast(reinterpret_cast(image_begin_ + offset))). + GcRoot* orig_strings = orig_dex_cache->GetStrings(); + if (orig_strings != nullptr) { + copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::StringsOffset(), + NativeLocationInImage(orig_strings), + /*pointer size*/8u); + orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache), + ImageAddressVisitor(this)); + } + GcRoot* orig_types = orig_dex_cache->GetResolvedTypes(); + if (orig_types != nullptr) { + copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedTypesOffset(), + NativeLocationInImage(orig_types), + /*pointer size*/8u); + orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache), + ImageAddressVisitor(this)); + } + ArtMethod** orig_methods = orig_dex_cache->GetResolvedMethods(); + if (orig_methods != nullptr) { + copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedMethodsOffset(), + NativeLocationInImage(orig_methods), + /*pointer size*/8u); + ArtMethod** copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache); + for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) { + ArtMethod* orig = mirror::DexCache::GetElementPtrSize(orig_methods, i, target_ptr_size_); + // NativeLocationInImage also handles runtime methods since these have relocation info. + ArtMethod* copy = NativeLocationInImage(orig); + mirror::DexCache::SetElementPtrSize(copy_methods, i, copy, target_ptr_size_); + } + } + ArtField** orig_fields = orig_dex_cache->GetResolvedFields(); + if (orig_fields != nullptr) { + copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedFieldsOffset(), + NativeLocationInImage(orig_fields), + /*pointer size*/8u); + ArtField** copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache); + for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) { + ArtField* orig = mirror::DexCache::GetElementPtrSize(orig_fields, i, target_ptr_size_); + ArtField* copy = NativeLocationInImage(orig); + mirror::DexCache::SetElementPtrSize(copy_fields, i, copy, target_ptr_size_); + } + } + + // Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving + // compiler pointers in here will make the output non-deterministic. + copy_dex_cache->SetDexFile(nullptr); +} + +const uint8_t* ImageWriter::GetOatAddress(OatAddress type) const { + DCHECK_LT(type, kOatAddressCount); + // If we are compiling an app image, we need to use the stubs of the boot image. + if (compile_app_image_) { + // Use the current image pointers. + const std::vector& image_spaces = + Runtime::Current()->GetHeap()->GetBootImageSpaces(); + DCHECK(!image_spaces.empty()); + const OatFile* oat_file = image_spaces[0]->GetOatFile(); + CHECK(oat_file != nullptr); + const OatHeader& header = oat_file->GetOatHeader(); + switch (type) { + // TODO: We could maybe clean this up if we stored them in an array in the oat header. + case kOatAddressQuickGenericJNITrampoline: + return static_cast(header.GetQuickGenericJniTrampoline()); + case kOatAddressInterpreterToInterpreterBridge: + return static_cast(header.GetInterpreterToInterpreterBridge()); + case kOatAddressInterpreterToCompiledCodeBridge: + return static_cast(header.GetInterpreterToCompiledCodeBridge()); + case kOatAddressJNIDlsymLookup: + return static_cast(header.GetJniDlsymLookup()); + case kOatAddressQuickIMTConflictTrampoline: + return static_cast(header.GetQuickImtConflictTrampoline()); + case kOatAddressQuickResolutionTrampoline: + return static_cast(header.GetQuickResolutionTrampoline()); + case kOatAddressQuickToInterpreterBridge: + return static_cast(header.GetQuickToInterpreterBridge()); + default: + UNREACHABLE(); + } + } + const ImageInfo& primary_image_info = GetImageInfo(0); + return GetOatAddressForOffset(primary_image_info.oat_address_offsets_[type], primary_image_info); +} + +const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, + const ImageInfo& image_info, + bool* quick_is_interpreted) { + DCHECK(!method->IsResolutionMethod()) << PrettyMethod(method); + DCHECK_NE(method, Runtime::Current()->GetImtConflictMethod()) << PrettyMethod(method); + DCHECK(!method->IsImtUnimplementedMethod()) << PrettyMethod(method); + DCHECK(method->IsInvokable()) << PrettyMethod(method); + DCHECK(!IsInBootImage(method)) << PrettyMethod(method); + + // Use original code if it exists. Otherwise, set the code pointer to the resolution + // trampoline. + + // Quick entrypoint: + const void* quick_oat_entry_point = + method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_); + const uint8_t* quick_code; + + if (UNLIKELY(IsInBootImage(method->GetDeclaringClass()))) { + DCHECK(method->IsCopied()); + // If the code is not in the oat file corresponding to this image (e.g. default methods) + quick_code = reinterpret_cast(quick_oat_entry_point); + } else { + uint32_t quick_oat_code_offset = PointerToLowMemUInt32(quick_oat_entry_point); + quick_code = GetOatAddressForOffset(quick_oat_code_offset, image_info); + } + + *quick_is_interpreted = false; + if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() || + method->GetDeclaringClass()->IsInitialized())) { + // We have code for a non-static or initialized method, just use the code. + } else if (quick_code == nullptr && method->IsNative() && + (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) { + // Non-static or initialized native method missing compiled code, use generic JNI version. + quick_code = GetOatAddress(kOatAddressQuickGenericJNITrampoline); + } else if (quick_code == nullptr && !method->IsNative()) { + // We don't have code at all for a non-native method, use the interpreter. + quick_code = GetOatAddress(kOatAddressQuickToInterpreterBridge); + *quick_is_interpreted = true; + } else { + CHECK(!method->GetDeclaringClass()->IsInitialized()); + // We have code for a static method, but need to go through the resolution stub for class + // initialization. + quick_code = GetOatAddress(kOatAddressQuickResolutionTrampoline); + } + if (!IsInBootOatFile(quick_code)) { + // DCHECK_GE(quick_code, oat_data_begin_); + } + return quick_code; +} + +void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, + ArtMethod* copy, + const ImageInfo& image_info) { + memcpy(copy, orig, ArtMethod::Size(target_ptr_size_)); + + copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked())); + ArtMethod** orig_resolved_methods = orig->GetDexCacheResolvedMethods(target_ptr_size_); + copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods), target_ptr_size_); + GcRoot* orig_resolved_types = orig->GetDexCacheResolvedTypes(target_ptr_size_); + copy->SetDexCacheResolvedTypes(NativeLocationInImage(orig_resolved_types), target_ptr_size_); + + // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to + // oat_begin_ + + // The resolution method has a special trampoline to call. + Runtime* runtime = Runtime::Current(); + if (orig->IsRuntimeMethod()) { + ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_); + if (orig_table != nullptr) { + // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method. + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_); + copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_); + } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) { + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_); + } else { + bool found_one = false; + for (size_t i = 0; i < static_cast(Runtime::kLastCalleeSaveType); ++i) { + auto idx = static_cast(i); + if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { + found_one = true; + break; + } + } + CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig); + CHECK(copy->IsRuntimeMethod()); + } + } else { + // We assume all methods have code. If they don't currently then we set them to the use the + // resolution trampoline. Abstract methods never have code and so we need to make sure their + // use results in an AbstractMethodError. We use the interpreter to achieve this. + if (UNLIKELY(!orig->IsInvokable())) { + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickToInterpreterBridge), target_ptr_size_); + } else { + bool quick_is_interpreted; + const uint8_t* quick_code = GetQuickCode(orig, image_info, &quick_is_interpreted); + copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_); + + // JNI entrypoint: + if (orig->IsNative()) { + // The native method's pointer is set to a stub to lookup via dlsym. + // Note this is not the code_ pointer, that is handled above. + copy->SetEntryPointFromJniPtrSize( + GetOatAddress(kOatAddressJNIDlsymLookup), target_ptr_size_); + } + } + } +} + +size_t ImageWriter::GetBinSizeSum(ImageWriter::ImageInfo& image_info, ImageWriter::Bin up_to) const { + DCHECK_LE(up_to, kBinSize); + return std::accumulate(&image_info.bin_slot_sizes_[0], + &image_info.bin_slot_sizes_[up_to], + /*init*/0); +} + +ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) { + // These values may need to get updated if more bins are added to the enum Bin + static_assert(kBinBits == 3, "wrong number of bin bits"); + static_assert(kBinShift == 27, "wrong number of shift"); + static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes"); + + DCHECK_LT(GetBin(), kBinSize); + DCHECK_ALIGNED(GetIndex(), kObjectAlignment); +} + +ImageWriter::BinSlot::BinSlot(Bin bin, uint32_t index) + : BinSlot(index | (static_cast(bin) << kBinShift)) { + DCHECK_EQ(index, GetIndex()); +} + +ImageWriter::Bin ImageWriter::BinSlot::GetBin() const { + return static_cast((lockword_ & kBinMask) >> kBinShift); +} + +uint32_t ImageWriter::BinSlot::GetIndex() const { + return lockword_ & ~kBinMask; +} + +ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) { + switch (type) { + case kNativeObjectRelocationTypeArtField: + case kNativeObjectRelocationTypeArtFieldArray: + return kBinArtField; + case kNativeObjectRelocationTypeArtMethodClean: + case kNativeObjectRelocationTypeArtMethodArrayClean: + return kBinArtMethodClean; + case kNativeObjectRelocationTypeArtMethodDirty: + case kNativeObjectRelocationTypeArtMethodArrayDirty: + return kBinArtMethodDirty; + case kNativeObjectRelocationTypeDexCacheArray: + return kBinDexCacheArray; + case kNativeObjectRelocationTypeRuntimeMethod: + return kBinRuntimeMethod; + case kNativeObjectRelocationTypeIMTable: + return kBinImTable; + case kNativeObjectRelocationTypeIMTConflictTable: + return kBinIMTConflictTable; + } + UNREACHABLE(); +} + +size_t ImageWriter::GetOatIndex(mirror::Object* obj) const { + if (!IsMultiImage()) { + return GetDefaultOatIndex(); + } + auto it = oat_index_map_.find(obj); + DCHECK(it != oat_index_map_.end()); + return it->second; +} + +size_t ImageWriter::GetOatIndexForDexFile(const DexFile* dex_file) const { + if (!IsMultiImage()) { + return GetDefaultOatIndex(); + } + auto it = dex_file_oat_index_map_.find(dex_file); + DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation(); + return it->second; +} + +size_t ImageWriter::GetOatIndexForDexCache(mirror::DexCache* dex_cache) const { + if (dex_cache == nullptr) { + return GetDefaultOatIndex(); + } else { + return GetOatIndexForDexFile(dex_cache->GetDexFile()); + } +} + +void ImageWriter::UpdateOatFileLayout(size_t oat_index, + size_t oat_loaded_size, + size_t oat_data_offset, + size_t oat_data_size) { + const uint8_t* images_end = image_infos_.back().image_begin_ + image_infos_.back().image_size_; + for (const ImageInfo& info : image_infos_) { + DCHECK_LE(info.image_begin_ + info.image_size_, images_end); + } + DCHECK(images_end != nullptr); // Image space must be ready. + + ImageInfo& cur_image_info = GetImageInfo(oat_index); + cur_image_info.oat_file_begin_ = images_end + cur_image_info.oat_offset_; + cur_image_info.oat_loaded_size_ = oat_loaded_size; + cur_image_info.oat_data_begin_ = cur_image_info.oat_file_begin_ + oat_data_offset; + cur_image_info.oat_size_ = oat_data_size; + + if (compile_app_image_) { + CHECK_EQ(oat_filenames_.size(), 1u) << "App image should have no next image."; + return; + } + + // Update the oat_offset of the next image info. + if (oat_index + 1u != oat_filenames_.size()) { + // There is a following one. + ImageInfo& next_image_info = GetImageInfo(oat_index + 1u); + next_image_info.oat_offset_ = cur_image_info.oat_offset_ + oat_loaded_size; + } +} + +void ImageWriter::UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header) { + ImageInfo& cur_image_info = GetImageInfo(oat_index); + cur_image_info.oat_checksum_ = oat_header.GetChecksum(); + + if (oat_index == GetDefaultOatIndex()) { + // Primary oat file, read the trampolines. + cur_image_info.oat_address_offsets_[kOatAddressInterpreterToInterpreterBridge] = + oat_header.GetInterpreterToInterpreterBridgeOffset(); + cur_image_info.oat_address_offsets_[kOatAddressInterpreterToCompiledCodeBridge] = + oat_header.GetInterpreterToCompiledCodeBridgeOffset(); + cur_image_info.oat_address_offsets_[kOatAddressJNIDlsymLookup] = + oat_header.GetJniDlsymLookupOffset(); + cur_image_info.oat_address_offsets_[kOatAddressQuickGenericJNITrampoline] = + oat_header.GetQuickGenericJniTrampolineOffset(); + cur_image_info.oat_address_offsets_[kOatAddressQuickIMTConflictTrampoline] = + oat_header.GetQuickImtConflictTrampolineOffset(); + cur_image_info.oat_address_offsets_[kOatAddressQuickResolutionTrampoline] = + oat_header.GetQuickResolutionTrampolineOffset(); + cur_image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] = + oat_header.GetQuickToInterpreterBridgeOffset(); + } +} + +ImageWriter::ImageWriter( + const CompilerDriver& compiler_driver, + uintptr_t image_begin, + bool compile_pic, + bool compile_app_image, + ImageHeader::StorageMode image_storage_mode, + const std::vector& oat_filenames, + const std::unordered_map& dex_file_oat_index_map) + : compiler_driver_(compiler_driver), + global_image_begin_(reinterpret_cast(image_begin)), + image_objects_offset_begin_(0), + compile_pic_(compile_pic), + compile_app_image_(compile_app_image), + target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), + image_infos_(oat_filenames.size()), + dirty_methods_(0u), + clean_methods_(0u), + image_storage_mode_(image_storage_mode), + oat_filenames_(oat_filenames), + dex_file_oat_index_map_(dex_file_oat_index_map) { + CHECK_NE(image_begin, 0U); + std::fill_n(image_methods_, arraysize(image_methods_), nullptr); + CHECK_EQ(compile_app_image, !Runtime::Current()->GetHeap()->GetBootImageSpaces().empty()) + << "Compiling a boot image should occur iff there are no boot image spaces loaded"; +} + +ImageWriter::ImageInfo::ImageInfo() + : intern_table_(new InternTable), + class_table_(new ClassTable) {} + +} // namespace art diff --git a/compiler/image_writer.h b/compiler/image_writer.h new file mode 100644 index 000000000..37f108f66 --- /dev/null +++ b/compiler/image_writer.h @@ -0,0 +1,596 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_IMAGE_WRITER_H_ +#define ART_COMPILER_IMAGE_WRITER_H_ + +#include +#include "base/memory_tool.h" + +#include +#include +#include +#include +#include +#include + +#include "base/bit_utils.h" +#include "base/dchecked_vector.h" +#include "base/length_prefixed_array.h" +#include "base/macros.h" +#include "driver/compiler_driver.h" +#include "gc/space/space.h" +#include "image.h" +#include "lock_word.h" +#include "mem_map.h" +#include "oat_file.h" +#include "mirror/dex_cache.h" +#include "os.h" +#include "safe_map.h" +#include "utils.h" + +namespace art { +namespace gc { +namespace space { +class ImageSpace; +} // namespace space +} // namespace gc + +class ClassTable; + +static constexpr int kInvalidFd = -1; + +// Write a Space built during compilation for use during execution. +class ImageWriter FINAL { + public: + ImageWriter(const CompilerDriver& compiler_driver, + uintptr_t image_begin, + bool compile_pic, + bool compile_app_image, + ImageHeader::StorageMode image_storage_mode, + const std::vector& oat_filenames, + const std::unordered_map& dex_file_oat_index_map); + + bool PrepareImageAddressSpace(); + + bool IsImageAddressSpaceReady() const { + DCHECK(!image_infos_.empty()); + for (const ImageInfo& image_info : image_infos_) { + if (image_info.image_roots_address_ == 0u) { + return false; + } + } + return true; + } + + template + T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) { + if (object == nullptr || IsInBootImage(object)) { + return object; + } else { + size_t oat_index = GetOatIndex(object); + const ImageInfo& image_info = GetImageInfo(oat_index); + return reinterpret_cast(image_info.image_begin_ + GetImageOffset(object)); + } + } + + ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); + + template + PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset) + const SHARED_REQUIRES(Locks::mutator_lock_) { + auto oat_it = dex_file_oat_index_map_.find(dex_file); + DCHECK(oat_it != dex_file_oat_index_map_.end()); + const ImageInfo& image_info = GetImageInfo(oat_it->second); + auto it = image_info.dex_cache_array_starts_.find(dex_file); + DCHECK(it != image_info.dex_cache_array_starts_.end()); + return reinterpret_cast( + image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] + + it->second + offset); + } + + size_t GetOatFileOffset(size_t oat_index) const { + return GetImageInfo(oat_index).oat_offset_; + } + + const uint8_t* GetOatFileBegin(size_t oat_index) const { + return GetImageInfo(oat_index).oat_file_begin_; + } + + // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open + // the names in image_filenames. + // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open + // the names in oat_filenames. + bool Write(int image_fd, + const std::vector& image_filenames, + const std::vector& oat_filenames) + REQUIRES(!Locks::mutator_lock_); + + uintptr_t GetOatDataBegin(size_t oat_index) { + return reinterpret_cast(GetImageInfo(oat_index).oat_data_begin_); + } + + // Get the index of the oat file containing the dex file. + // + // This "oat_index" is used to retrieve information about the the memory layout + // of the oat file and its associated image file, needed for link-time patching + // of references to the image or across oat files. + size_t GetOatIndexForDexFile(const DexFile* dex_file) const; + + // Get the index of the oat file containing the dex file served by the dex cache. + size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const + SHARED_REQUIRES(Locks::mutator_lock_); + + // Update the oat layout for the given oat file. + // This will make the oat_offset for the next oat file valid. + void UpdateOatFileLayout(size_t oat_index, + size_t oat_loaded_size, + size_t oat_data_offset, + size_t oat_data_size); + // Update information about the oat header, i.e. checksum and trampoline offsets. + void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header); + + private: + using WorkStack = std::stack>; + + bool AllocMemory(); + + // Mark the objects defined in this space in the given live bitmap. + void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_); + + // Classify different kinds of bins that objects end up getting packed into during image writing. + // Ordered from dirtiest to cleanest (until ArtMethods). + enum Bin { + kBinMiscDirty, // Dex caches, object locks, etc... + kBinClassVerified, // Class verified, but initializers haven't been run + // Unknown mix of clean/dirty: + kBinRegular, + kBinClassInitialized, // Class initializers have been run + // All classes get their own bins since their fields often dirty + kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics + // Likely-clean: + kBinString, // [String] Almost always immutable (except for obj header). + // Add more bins here if we add more segregation code. + // Non mirror fields must be below. + // ArtFields should be always clean. + kBinArtField, + // If the class is initialized, then the ArtMethods are probably clean. + kBinArtMethodClean, + // ArtMethods may be dirty if the class has native methods or a declaring class that isn't + // initialized. + kBinArtMethodDirty, + // IMT (clean) + kBinImTable, + // Conflict tables (clean). + kBinIMTConflictTable, + // Runtime methods (always clean, do not have a length prefix array). + kBinRuntimeMethod, + // Dex cache arrays have a special slot for PC-relative addressing. Since they are + // huge, and as such their dirtiness is not important for the clean/dirty separation, + // we arbitrarily keep them at the end of the native data. + kBinDexCacheArray, // Arrays belonging to dex cache. + kBinSize, + // Number of bins which are for mirror objects. + kBinMirrorCount = kBinArtField, + }; + friend std::ostream& operator<<(std::ostream& stream, const Bin& bin); + + enum NativeObjectRelocationType { + kNativeObjectRelocationTypeArtField, + kNativeObjectRelocationTypeArtFieldArray, + kNativeObjectRelocationTypeArtMethodClean, + kNativeObjectRelocationTypeArtMethodArrayClean, + kNativeObjectRelocationTypeArtMethodDirty, + kNativeObjectRelocationTypeArtMethodArrayDirty, + kNativeObjectRelocationTypeRuntimeMethod, + kNativeObjectRelocationTypeIMTable, + kNativeObjectRelocationTypeIMTConflictTable, + kNativeObjectRelocationTypeDexCacheArray, + }; + friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type); + + enum OatAddress { + kOatAddressInterpreterToInterpreterBridge, + kOatAddressInterpreterToCompiledCodeBridge, + kOatAddressJNIDlsymLookup, + kOatAddressQuickGenericJNITrampoline, + kOatAddressQuickIMTConflictTrampoline, + kOatAddressQuickResolutionTrampoline, + kOatAddressQuickToInterpreterBridge, + // Number of elements in the enum. + kOatAddressCount, + }; + friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address); + + static constexpr size_t kBinBits = MinimumBitsToStore(kBinMirrorCount - 1); + // uint32 = typeof(lockword_) + // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK + // failures due to invalid read barrier bits during object field reads. + static const size_t kBinShift = BitSizeOf() - kBinBits - + LockWord::kReadBarrierStateSize; + // 111000.....0 + static const size_t kBinMask = ((static_cast(1) << kBinBits) - 1) << kBinShift; + + // We use the lock word to store the bin # and bin index of the object in the image. + // + // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up + // stored in the lock word bit-for-bit when object forwarding addresses are being calculated. + struct BinSlot { + explicit BinSlot(uint32_t lockword); + BinSlot(Bin bin, uint32_t index); + + // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc. + Bin GetBin() const; + // The offset in bytes from the beginning of the bin. Aligned to object size. + uint32_t GetIndex() const; + // Pack into a single uint32_t, for storing into a lock word. + uint32_t Uint32Value() const { return lockword_; } + // Comparison operator for map support + bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; } + + private: + // Must be the same size as LockWord, any larger and we would truncate the data. + const uint32_t lockword_; + }; + + struct ImageInfo { + ImageInfo(); + ImageInfo(ImageInfo&&) = default; + + // Create the image sections into the out sections variable, returns the size of the image + // excluding the bitmap. + size_t CreateImageSections(ImageSection* out_sections) const; + + std::unique_ptr image_; // Memory mapped for generating the image. + + // Target begin of this image. Notes: It is not valid to write here, this is the address + // of the target image, not necessarily where image_ is mapped. The address is only valid + // after layouting (otherwise null). + uint8_t* image_begin_ = nullptr; + + // Offset to the free space in image_, initially size of image header. + size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); + uint32_t image_roots_address_ = 0; // The image roots address in the image. + size_t image_offset_ = 0; // Offset of this image from the start of the first image. + + // Image size is the *address space* covered by this image. As the live bitmap is aligned + // to the page size, the live bitmap will cover more address space than necessary. But live + // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size. + // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be + // page-aligned). + size_t image_size_ = 0; + + // Oat data. + // Offset of the oat file for this image from start of oat files. This is + // valid when the previous oat file has been written. + size_t oat_offset_ = 0; + // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout(). + const uint8_t* oat_file_begin_ = nullptr; + size_t oat_loaded_size_ = 0; + const uint8_t* oat_data_begin_ = nullptr; + size_t oat_size_ = 0; // Size of the corresponding oat data. + // The oat header checksum, valid after UpdateOatFileHeader(). + uint32_t oat_checksum_ = 0u; + + // Image bitmap which lets us know where the objects inside of the image reside. + std::unique_ptr image_bitmap_; + + // The start offsets of the dex cache arrays. + SafeMap dex_cache_array_starts_; + + // Offset from oat_data_begin_ to the stubs. + uint32_t oat_address_offsets_[kOatAddressCount] = {}; + + // Bin slot tracking for dirty object packing. + size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin. + size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins. + size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin. + + // Cached size of the intern table for when we allocate memory. + size_t intern_table_bytes_ = 0; + + // Number of image class table bytes. + size_t class_table_bytes_ = 0; + + // Intern table associated with this image for serialization. + std::unique_ptr intern_table_; + + // Class table associated with this image for serialization. + std::unique_ptr class_table_; + }; + + // We use the lock word to store the offset of the object in the image. + void AssignImageOffset(mirror::Object* object, BinSlot bin_slot) + SHARED_REQUIRES(Locks::mutator_lock_); + void SetImageOffset(mirror::Object* object, size_t offset) + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsImageOffsetAssigned(mirror::Object* object) const + SHARED_REQUIRES(Locks::mutator_lock_); + size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); + void UpdateImageOffset(mirror::Object* obj, uintptr_t offset) + SHARED_REQUIRES(Locks::mutator_lock_); + + void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_); + void AssignImageBinSlot(mirror::Object* object, size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) + SHARED_REQUIRES(Locks::mutator_lock_); + bool IsImageBinSlotAssigned(mirror::Object* object) const + SHARED_REQUIRES(Locks::mutator_lock_); + BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); + + void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache) + SHARED_REQUIRES(Locks::mutator_lock_); + void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_); + + static void* GetImageAddressCallback(void* writer, mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_) { + return reinterpret_cast(writer)->GetImageAddress(obj); + } + + mirror::Object* GetLocalAddress(mirror::Object* object) const + SHARED_REQUIRES(Locks::mutator_lock_) { + size_t offset = GetImageOffset(object); + size_t oat_index = GetOatIndex(object); + const ImageInfo& image_info = GetImageInfo(oat_index); + uint8_t* dst = image_info.image_->Begin() + offset; + return reinterpret_cast(dst); + } + + // Returns the address in the boot image if we are compiling the app image. + const uint8_t* GetOatAddress(OatAddress type) const; + + const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const { + // With Quick, code is within the OatFile, as there are all in one + // .o ELF object. But interpret it as signed. + DCHECK_LE(static_cast(offset), static_cast(image_info.oat_size_)); + DCHECK(image_info.oat_data_begin_ != nullptr); + return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast(offset); + } + + // Returns true if the class was in the original requested image classes list. + bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + + // Debug aid that list of requested image classes. + void DumpImageClasses(); + + // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying. + void ComputeLazyFieldsForImageClasses() + SHARED_REQUIRES(Locks::mutator_lock_); + + // Remove unwanted classes from various roots. + void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_); + + // Verify unwanted classes removed. + void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_); + static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Lays out where the image objects will be at runtime. + void CalculateNewObjectOffsets() + SHARED_REQUIRES(Locks::mutator_lock_); + void ProcessWorkStack(WorkStack* work_stack) + SHARED_REQUIRES(Locks::mutator_lock_); + void CreateHeader(size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + mirror::ObjectArray* CreateImageRoots(size_t oat_index) const + SHARED_REQUIRES(Locks::mutator_lock_); + void UnbinObjectsIntoOffset(mirror::Object* obj) + SHARED_REQUIRES(Locks::mutator_lock_); + + static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg) + SHARED_REQUIRES(Locks::mutator_lock_); + static void DeflateMonitorCallback(mirror::Object* obj, void* arg) + SHARED_REQUIRES(Locks::mutator_lock_); + static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Creates the contiguous image in memory and adjusts pointers. + void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_); + static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg) + SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) + SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) + SHARED_REQUIRES(Locks::mutator_lock_); + void FixupClass(mirror::Class* orig, mirror::Class* copy) + SHARED_REQUIRES(Locks::mutator_lock_); + void FixupObject(mirror::Object* orig, mirror::Object* copy) + SHARED_REQUIRES(Locks::mutator_lock_); + void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache) + SHARED_REQUIRES(Locks::mutator_lock_); + void FixupPointerArray(mirror::Object* dst, + mirror::PointerArray* arr, + mirror::Class* klass, + Bin array_type) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Get quick code for non-resolution/imt_conflict/abstract method. + const uint8_t* GetQuickCode(ArtMethod* method, + const ImageInfo& image_info, + bool* quick_is_interpreted) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins. + size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const; + + // Return true if a method is likely to be dirtied at runtime. + bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_); + + // Assign the offset for an ArtMethod. + void AssignMethodOffset(ArtMethod* method, + NativeObjectRelocationType type, + size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + + void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + + // Assign the offset for an IMT conflict table. Does nothing if the table already has a native + // relocation. + void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Return true if klass is loaded by the boot class loader but not in the boot image. + bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + + // Return true if klass depends on a boot class loader non image class. We want to prune these + // classes since we do not want any boot class loader classes in the image. This means that + // we also cannot have any classes which refer to these boot class loader non image classes. + // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler + // driver. + bool PruneAppImageClass(mirror::Class* klass) + SHARED_REQUIRES(Locks::mutator_lock_); + + // early_exit is true if we had a cyclic dependency anywhere down the chain. + bool PruneAppImageClassInternal(mirror::Class* klass, + bool* early_exit, + std::unordered_set* visited) + SHARED_REQUIRES(Locks::mutator_lock_); + + bool IsMultiImage() const { + return image_infos_.size() > 1; + } + + static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type); + + uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_); + + // Location of where the object will be when the image is loaded at runtime. + template + T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_); + + // Location of where the temporary copy of the object currently is. + template + T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_); + + // Return true of obj is inside of the boot image space. This may only return true if we are + // compiling an app image. + bool IsInBootImage(const void* obj) const; + + // Return true if ptr is within the boot oat file. + bool IsInBootOatFile(const void* ptr) const; + + // Get the index of the oat file associated with the object. + size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_); + + // The oat index for shared data in multi-image and all data in single-image compilation. + size_t GetDefaultOatIndex() const { + return 0u; + } + + ImageInfo& GetImageInfo(size_t oat_index) { + return image_infos_[oat_index]; + } + + const ImageInfo& GetImageInfo(size_t oat_index) const { + return image_infos_[oat_index]; + } + + // Find an already strong interned string in the other images or in the boot image. Used to + // remove duplicates in the multi image and app image case. + mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_); + + // Return true if there already exists a native allocation for an object. + bool NativeRelocationAssigned(void* ptr) const; + + const CompilerDriver& compiler_driver_; + + // Beginning target image address for the first image. + uint8_t* global_image_begin_; + + // Offset from image_begin_ to where the first object is in image_. + size_t image_objects_offset_begin_; + + // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need + // to keep track. These include vtable arrays, iftable arrays, and dex caches. + std::unordered_map pointer_arrays_; + + // Saved hash codes. We use these to restore lockwords which were temporarily used to have + // forwarding addresses as well as copying over hash codes. + std::unordered_map saved_hashcode_map_; + + // Oat index map for objects. + std::unordered_map oat_index_map_; + + // Boolean flags. + const bool compile_pic_; + const bool compile_app_image_; + + // Size of pointers on the target architecture. + size_t target_ptr_size_; + + // Image data indexed by the oat file index. + dchecked_vector image_infos_; + + // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to + // have one entry per art field for convenience. ArtFields are placed right after the end of the + // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. + struct NativeObjectRelocation { + size_t oat_index; + uintptr_t offset; + NativeObjectRelocationType type; + + bool IsArtMethodRelocation() const { + return type == kNativeObjectRelocationTypeArtMethodClean || + type == kNativeObjectRelocationTypeArtMethodDirty || + type == kNativeObjectRelocationTypeRuntimeMethod; + } + }; + std::unordered_map native_object_relocations_; + + // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. + ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; + + // Counters for measurements, used for logging only. + uint64_t dirty_methods_; + uint64_t clean_methods_; + + // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass. + std::unordered_map prune_class_memo_; + + // Class loaders with a class table to write out. There should only be one class loader because + // dex2oat loads the dex files to be compiled into a single class loader. For the boot image, + // null is a valid entry. + std::unordered_set class_loaders_; + + // Which mode the image is stored as, see image.h + const ImageHeader::StorageMode image_storage_mode_; + + // The file names of oat files. + const std::vector& oat_filenames_; + + // Map of dex files to the indexes of oat files that they were compiled into. + const std::unordered_map& dex_file_oat_index_map_; + + friend class ContainsBootClassLoaderNonImageClassVisitor; + friend class FixupClassVisitor; + friend class FixupRootVisitor; + friend class FixupVisitor; + class GetRootsVisitor; + friend class NativeLocationVisitor; + friend class NonImageClassesVisitor; + class VisitReferencesVisitor; + DISALLOW_COPY_AND_ASSIGN(ImageWriter); +}; + +} // namespace art + +#endif // ART_COMPILER_IMAGE_WRITER_H_ diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc new file mode 100644 index 000000000..c5aefc209 --- /dev/null +++ b/compiler/jit/jit_compiler.cc @@ -0,0 +1,250 @@ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jit_compiler.h" + +#include "arch/instruction_set.h" +#include "arch/instruction_set_features.h" +#include "art_method-inl.h" +#include "base/stringpiece.h" +#include "base/time_utils.h" +#include "base/timing_logger.h" +#include "base/unix_file/fd_file.h" +#include "debug/elf_debug_writer.h" +#include "driver/compiler_driver.h" +#include "driver/compiler_options.h" +#include "jit/debugger_interface.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" +#include "oat_file-inl.h" +#include "oat_quick_method_header.h" +#include "object_lock.h" +#include "thread_list.h" + +namespace art { +namespace jit { + +JitCompiler* JitCompiler::Create() { + return new JitCompiler(); +} + +extern "C" void* jit_load(bool* generate_debug_info) { + VLOG(jit) << "loading jit compiler"; + auto* const jit_compiler = JitCompiler::Create(); + CHECK(jit_compiler != nullptr); + *generate_debug_info = jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo(); + VLOG(jit) << "Done loading jit compiler"; + return jit_compiler; +} + +extern "C" void jit_unload(void* handle) { + DCHECK(handle != nullptr); + delete reinterpret_cast(handle); +} + +extern "C" bool jit_compile_method( + void* handle, ArtMethod* method, Thread* self, bool osr) + SHARED_REQUIRES(Locks::mutator_lock_) { + auto* jit_compiler = reinterpret_cast(handle); + DCHECK(jit_compiler != nullptr); + return jit_compiler->CompileMethod(self, method, osr); +} + +extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count) + SHARED_REQUIRES(Locks::mutator_lock_) { + auto* jit_compiler = reinterpret_cast(handle); + DCHECK(jit_compiler != nullptr); + if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) { + const ArrayRef types_array(types, count); + std::vector elf_file = debug::WriteDebugElfFileForClasses( + kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array); + CreateJITCodeEntry(std::move(elf_file)); + } +} + +// Callers of this method assume it has NO_RETURN. +NO_RETURN static void Usage(const char* fmt, ...) { + va_list ap; + va_start(ap, fmt); + std::string error; + StringAppendV(&error, fmt, ap); + LOG(FATAL) << error; + va_end(ap); + exit(EXIT_FAILURE); +} + +JitCompiler::JitCompiler() { + compiler_options_.reset(new CompilerOptions( + CompilerOptions::kDefaultCompilerFilter, + CompilerOptions::kDefaultHugeMethodThreshold, + CompilerOptions::kDefaultLargeMethodThreshold, + CompilerOptions::kDefaultSmallMethodThreshold, + CompilerOptions::kDefaultTinyMethodThreshold, + CompilerOptions::kDefaultNumDexMethodsThreshold, + CompilerOptions::kDefaultInlineDepthLimit, + CompilerOptions::kDefaultInlineMaxCodeUnits, + /* no_inline_from */ nullptr, + /* include_patch_information */ false, + CompilerOptions::kDefaultTopKProfileThreshold, + Runtime::Current()->IsDebuggable(), + CompilerOptions::kDefaultGenerateDebugInfo, + /* implicit_null_checks */ true, + /* implicit_so_checks */ true, + /* implicit_suspend_checks */ false, + /* pic */ true, // TODO: Support non-PIC in optimizing. + /* verbose_methods */ nullptr, + /* init_failure_output */ nullptr, + /* abort_on_hard_verifier_failure */ false, + /* dump_cfg_file_name */ "", + /* dump_cfg_append */ false, + /* force_determinism */ false)); + for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) { + compiler_options_->ParseCompilerOption(argument, Usage); + } + const InstructionSet instruction_set = kRuntimeISA; + for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) { + VLOG(compiler) << "JIT compiler option " << option; + std::string error_msg; + if (option.starts_with("--instruction-set-variant=")) { + StringPiece str = option.substr(strlen("--instruction-set-variant=")).data(); + VLOG(compiler) << "JIT instruction set variant " << str; + instruction_set_features_.reset(InstructionSetFeatures::FromVariant( + instruction_set, str.as_string(), &error_msg)); + if (instruction_set_features_ == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } else if (option.starts_with("--instruction-set-features=")) { + StringPiece str = option.substr(strlen("--instruction-set-features=")).data(); + VLOG(compiler) << "JIT instruction set features " << str; + if (instruction_set_features_.get() == nullptr) { + instruction_set_features_.reset(InstructionSetFeatures::FromVariant( + instruction_set, "default", &error_msg)); + if (instruction_set_features_ == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } + instruction_set_features_.reset( + instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg)); + if (instruction_set_features_ == nullptr) { + LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; + } + } + } + if (instruction_set_features_ == nullptr) { + instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines()); + } + cumulative_logger_.reset(new CumulativeLogger("jit times")); + method_inliner_map_.reset(new DexFileToMethodInlinerMap); + compiler_driver_.reset(new CompilerDriver( + compiler_options_.get(), + /* verification_results */ nullptr, + method_inliner_map_.get(), + Compiler::kOptimizing, + instruction_set, + instruction_set_features_.get(), + /* boot_image */ false, + /* app_image */ false, + /* image_classes */ nullptr, + /* compiled_classes */ nullptr, + /* compiled_methods */ nullptr, + /* thread_count */ 1, + /* dump_stats */ false, + /* dump_passes */ false, + cumulative_logger_.get(), + /* swap_fd */ -1, + /* profile_compilation_info */ nullptr)); + // Disable dedupe so we can remove compiled methods. + compiler_driver_->SetDedupeEnabled(false); + compiler_driver_->SetSupportBootImageFixup(false); + + size_t thread_count = compiler_driver_->GetThreadCount(); + if (compiler_options_->GetGenerateDebugInfo()) { +#ifdef __ANDROID__ + const char* prefix = "/data/misc/trace"; +#else + const char* prefix = "/tmp"; +#endif + DCHECK_EQ(thread_count, 1u) + << "Generating debug info only works with one compiler thread"; + std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map"; + perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str())); + if (perf_file_ == nullptr) { + LOG(ERROR) << "Could not create perf file at " << perf_filename << + " Are you on a user build? Perf only works on userdebug/eng builds"; + } + } + + size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit(); + DCHECK_LT(thread_count * inline_depth_limit, std::numeric_limits::max()) + << "ProfilingInfo's inline counter can potentially overflow"; +} + +JitCompiler::~JitCompiler() { + if (perf_file_ != nullptr) { + UNUSED(perf_file_->Flush()); + UNUSED(perf_file_->Close()); + } +} + +bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { + DCHECK(!method->IsProxyMethod()); + TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit)); + StackHandleScope<2> hs(self); + self->AssertNoPendingException(); + Runtime* runtime = Runtime::Current(); + + // Ensure the class is initialized. + Handle h_class(hs.NewHandle(method->GetDeclaringClass())); + if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { + VLOG(jit) << "JIT failed to initialize " << PrettyMethod(method); + return false; + } + + // Do the compilation. + bool success = false; + { + TimingLogger::ScopedTiming t2("Compiling", &logger); + JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); + success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr); + if (success && (perf_file_ != nullptr)) { + const void* ptr = method->GetEntryPointFromQuickCompiledCode(); + std::ostringstream stream; + stream << std::hex + << reinterpret_cast(ptr) + << " " + << code_cache->GetMemorySizeOfCodePointer(ptr) + << " " + << PrettyMethod(method) + << std::endl; + std::string str = stream.str(); + bool res = perf_file_->WriteFully(str.c_str(), str.size()); + CHECK(res); + } + } + + // Trim maps to reduce memory usage. + // TODO: move this to an idle phase. + { + TimingLogger::ScopedTiming t2("TrimMaps", &logger); + runtime->GetJitArenaPool()->TrimMaps(); + } + + runtime->GetJit()->AddTimingLogger(logger); + return success; +} + +} // namespace jit +} // namespace art diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h new file mode 100644 index 000000000..533dccf21 --- /dev/null +++ b/compiler/jit/jit_compiler.h @@ -0,0 +1,70 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JIT_JIT_COMPILER_H_ +#define ART_COMPILER_JIT_JIT_COMPILER_H_ + +#include "base/mutex.h" +#include "compiled_method.h" +#include "dex/quick/dex_file_to_method_inliner_map.h" +#include "driver/compiler_driver.h" +#include "driver/compiler_options.h" + +namespace art { + +class ArtMethod; +class InstructionSetFeatures; + +namespace jit { + +class JitCompiler { + public: + static JitCompiler* Create(); + virtual ~JitCompiler(); + + // Compilation entrypoint. Returns whether the compilation succeeded. + bool CompileMethod(Thread* self, ArtMethod* method, bool osr) + SHARED_REQUIRES(Locks::mutator_lock_); + + CompilerOptions* GetCompilerOptions() const { + return compiler_options_.get(); + } + CompilerDriver* GetCompilerDriver() const { + return compiler_driver_.get(); + } + + private: + std::unique_ptr compiler_options_; + std::unique_ptr cumulative_logger_; + std::unique_ptr method_inliner_map_; + std::unique_ptr compiler_driver_; + std::unique_ptr instruction_set_features_; + std::unique_ptr perf_file_; + + JitCompiler(); + + // This is in the compiler since the runtime doesn't have access to the compiled method + // structures. + bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method) + SHARED_REQUIRES(Locks::mutator_lock_); + + DISALLOW_COPY_AND_ASSIGN(JitCompiler); +}; + +} // namespace jit +} // namespace art + +#endif // ART_COMPILER_JIT_JIT_COMPILER_H_ diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc new file mode 100644 index 000000000..05c85e027 --- /dev/null +++ b/compiler/jni/jni_cfi_test.cc @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "arch/instruction_set.h" +#include "base/arena_allocator.h" +#include "cfi_test.h" +#include "gtest/gtest.h" +#include "jni/quick/calling_convention.h" +#include "utils/assembler.h" + +#include "jni/jni_cfi_test_expected.inc" + +namespace art { + +// Run the tests only on host. +#ifndef __ANDROID__ + +class JNICFITest : public CFITest { + public: + // Enable this flag to generate the expected outputs. + static constexpr bool kGenerateExpected = false; + + void TestImpl(InstructionSet isa, const char* isa_str, + const std::vector& expected_asm, + const std::vector& expected_cfi) { + // Description of simple method. + const bool is_static = true; + const bool is_synchronized = false; + const char* shorty = "IIFII"; + + ArenaPool pool; + ArenaAllocator arena(&pool); + + std::unique_ptr jni_conv( + JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa)); + std::unique_ptr mr_conv( + ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa)); + const int frame_size(jni_conv->FrameSize()); + const std::vector& callee_save_regs = jni_conv->CalleeSaveRegisters(); + + // Assemble the method. + std::unique_ptr jni_asm(Assembler::Create(&arena, isa)); + jni_asm->cfi().SetEnabled(true); + jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(), + callee_save_regs, mr_conv->EntrySpills()); + jni_asm->IncreaseFrameSize(32); + jni_asm->DecreaseFrameSize(32); + jni_asm->RemoveFrame(frame_size, callee_save_regs); + jni_asm->FinalizeCode(); + std::vector actual_asm(jni_asm->CodeSize()); + MemoryRegion code(&actual_asm[0], actual_asm.size()); + jni_asm->FinalizeInstructions(code); + ASSERT_EQ(jni_asm->cfi().GetCurrentCFAOffset(), frame_size); + const std::vector& actual_cfi = *(jni_asm->cfi().data()); + + if (kGenerateExpected) { + GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi); + } else { + EXPECT_EQ(expected_asm, actual_asm); + EXPECT_EQ(expected_cfi, actual_cfi); + } + } +}; + +#define TEST_ISA(isa) \ + TEST_F(JNICFITest, isa) { \ + std::vector expected_asm(expected_asm_##isa, \ + expected_asm_##isa + arraysize(expected_asm_##isa)); \ + std::vector expected_cfi(expected_cfi_##isa, \ + expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ + TestImpl(isa, #isa, expected_asm, expected_cfi); \ + } + +TEST_ISA(kThumb2) +TEST_ISA(kArm64) +TEST_ISA(kX86) +TEST_ISA(kX86_64) +TEST_ISA(kMips) +TEST_ISA(kMips64) + +#endif // __ANDROID__ + +} // namespace art diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc new file mode 100644 index 000000000..16b438693 --- /dev/null +++ b/compiler/jni/jni_cfi_test_expected.inc @@ -0,0 +1,470 @@ +static constexpr uint8_t expected_asm_kThumb2[] = { + 0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90, + 0xCD, 0xF8, 0x84, 0x10, 0x8D, 0xED, 0x22, 0x0A, 0xCD, 0xF8, 0x8C, 0x20, + 0xCD, 0xF8, 0x90, 0x30, 0x88, 0xB0, 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, + 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x8D, +}; +static constexpr uint8_t expected_cfi_kThumb2[] = { + 0x44, 0x0E, 0x1C, 0x85, 0x07, 0x86, 0x06, 0x87, 0x05, 0x88, 0x04, 0x8A, + 0x03, 0x8B, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x5C, 0x05, 0x50, 0x17, 0x05, + 0x51, 0x16, 0x05, 0x52, 0x15, 0x05, 0x53, 0x14, 0x05, 0x54, 0x13, 0x05, + 0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05, + 0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05, + 0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01, + 0x54, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C, + 0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06, + 0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06, + 0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x44, + 0x0B, 0x0E, 0x80, 0x01, +}; +// 0x00000000: push {r5, r6, r7, r8, r10, r11, lr} +// 0x00000004: .cfi_def_cfa_offset: 28 +// 0x00000004: .cfi_offset: r5 at cfa-28 +// 0x00000004: .cfi_offset: r6 at cfa-24 +// 0x00000004: .cfi_offset: r7 at cfa-20 +// 0x00000004: .cfi_offset: r8 at cfa-16 +// 0x00000004: .cfi_offset: r10 at cfa-12 +// 0x00000004: .cfi_offset: r11 at cfa-8 +// 0x00000004: .cfi_offset: r14 at cfa-4 +// 0x00000004: vpush.f32 {s16-s31} +// 0x00000008: .cfi_def_cfa_offset: 92 +// 0x00000008: .cfi_offset_extended: r80 at cfa-92 +// 0x00000008: .cfi_offset_extended: r81 at cfa-88 +// 0x00000008: .cfi_offset_extended: r82 at cfa-84 +// 0x00000008: .cfi_offset_extended: r83 at cfa-80 +// 0x00000008: .cfi_offset_extended: r84 at cfa-76 +// 0x00000008: .cfi_offset_extended: r85 at cfa-72 +// 0x00000008: .cfi_offset_extended: r86 at cfa-68 +// 0x00000008: .cfi_offset_extended: r87 at cfa-64 +// 0x00000008: .cfi_offset_extended: r88 at cfa-60 +// 0x00000008: .cfi_offset_extended: r89 at cfa-56 +// 0x00000008: .cfi_offset_extended: r90 at cfa-52 +// 0x00000008: .cfi_offset_extended: r91 at cfa-48 +// 0x00000008: .cfi_offset_extended: r92 at cfa-44 +// 0x00000008: .cfi_offset_extended: r93 at cfa-40 +// 0x00000008: .cfi_offset_extended: r94 at cfa-36 +// 0x00000008: .cfi_offset_extended: r95 at cfa-32 +// 0x00000008: sub sp, sp, #36 +// 0x0000000a: .cfi_def_cfa_offset: 128 +// 0x0000000a: str r0, [sp, #0] +// 0x0000000c: str.w r1, [sp, #132] +// 0x00000010: vstr.f32 s0, [sp, #136] +// 0x00000014: str.w r2, [sp, #140] +// 0x00000018: str.w r3, [sp, #144] +// 0x0000001c: sub sp, sp, #32 +// 0x0000001e: .cfi_def_cfa_offset: 160 +// 0x0000001e: add sp, sp, #32 +// 0x00000020: .cfi_def_cfa_offset: 128 +// 0x00000020: .cfi_remember_state +// 0x00000020: add sp, sp, #36 +// 0x00000022: .cfi_def_cfa_offset: 92 +// 0x00000022: vpop.f32 {s16-s31} +// 0x00000026: .cfi_def_cfa_offset: 28 +// 0x00000026: .cfi_restore_extended: r80 +// 0x00000026: .cfi_restore_extended: r81 +// 0x00000026: .cfi_restore_extended: r82 +// 0x00000026: .cfi_restore_extended: r83 +// 0x00000026: .cfi_restore_extended: r84 +// 0x00000026: .cfi_restore_extended: r85 +// 0x00000026: .cfi_restore_extended: r86 +// 0x00000026: .cfi_restore_extended: r87 +// 0x00000026: .cfi_restore_extended: r88 +// 0x00000026: .cfi_restore_extended: r89 +// 0x00000026: .cfi_restore_extended: r90 +// 0x00000026: .cfi_restore_extended: r91 +// 0x00000026: .cfi_restore_extended: r92 +// 0x00000026: .cfi_restore_extended: r93 +// 0x00000026: .cfi_restore_extended: r94 +// 0x00000026: .cfi_restore_extended: r95 +// 0x00000026: pop {r5, r6, r7, r8, r10, r11, pc} +// 0x0000002a: .cfi_restore_state +// 0x0000002a: .cfi_def_cfa_offset: 128 + +static constexpr uint8_t expected_asm_kArm64[] = { + 0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9, + 0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9, + 0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D, + 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xE0, 0x03, 0x00, 0xF9, + 0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD, 0xE2, 0xD3, 0x00, 0xB9, + 0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91, + 0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, + 0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, + 0xE8, 0x27, 0x42, 0x6D, 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D, + 0xEE, 0x3F, 0x45, 0x6D, 0xFF, 0x03, 0x03, 0x91, 0xC0, 0x03, 0x5F, 0xD6, +}; +static constexpr uint8_t expected_cfi_kArm64[] = { + 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14, + 0x96, 0x12, 0x44, 0x97, 0x10, 0x98, 0x0E, 0x44, 0x99, 0x0C, 0x9A, 0x0A, + 0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05, + 0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22, + 0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05, + 0x4F, 0x1A, 0x58, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x44, + 0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA, 0x44, + 0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0x06, + 0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E, 0x06, + 0x4F, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01, +}; +// 0x00000000: sub sp, sp, #0xc0 (192) +// 0x00000004: .cfi_def_cfa_offset: 192 +// 0x00000004: stp tr, x20, [sp, #96] +// 0x00000008: .cfi_offset: r19 at cfa-96 +// 0x00000008: .cfi_offset: r20 at cfa-88 +// 0x00000008: stp x21, x22, [sp, #112] +// 0x0000000c: .cfi_offset: r21 at cfa-80 +// 0x0000000c: .cfi_offset: r22 at cfa-72 +// 0x0000000c: stp x23, x24, [sp, #128] +// 0x00000010: .cfi_offset: r23 at cfa-64 +// 0x00000010: .cfi_offset: r24 at cfa-56 +// 0x00000010: stp x25, x26, [sp, #144] +// 0x00000014: .cfi_offset: r25 at cfa-48 +// 0x00000014: .cfi_offset: r26 at cfa-40 +// 0x00000014: stp x27, x28, [sp, #160] +// 0x00000018: .cfi_offset: r27 at cfa-32 +// 0x00000018: .cfi_offset: r28 at cfa-24 +// 0x00000018: stp x29, lr, [sp, #176] +// 0x0000001c: .cfi_offset: r29 at cfa-16 +// 0x0000001c: .cfi_offset: r30 at cfa-8 +// 0x0000001c: stp d8, d9, [sp, #32] +// 0x00000020: .cfi_offset_extended: r72 at cfa-160 +// 0x00000020: .cfi_offset_extended: r73 at cfa-152 +// 0x00000020: stp d10, d11, [sp, #48] +// 0x00000024: .cfi_offset_extended: r74 at cfa-144 +// 0x00000024: .cfi_offset_extended: r75 at cfa-136 +// 0x00000024: stp d12, d13, [sp, #64] +// 0x00000028: .cfi_offset_extended: r76 at cfa-128 +// 0x00000028: .cfi_offset_extended: r77 at cfa-120 +// 0x00000028: stp d14, d15, [sp, #80] +// 0x0000002c: .cfi_offset_extended: r78 at cfa-112 +// 0x0000002c: .cfi_offset_extended: r79 at cfa-104 +// 0x0000002c: str x0, [sp] +// 0x00000030: str w1, [sp, #200] +// 0x00000034: str s0, [sp, #204] +// 0x00000038: str w2, [sp, #208] +// 0x0000003c: str w3, [sp, #212] +// 0x00000040: sub sp, sp, #0x20 (32) +// 0x00000044: .cfi_def_cfa_offset: 224 +// 0x00000044: add sp, sp, #0x20 (32) +// 0x00000048: .cfi_def_cfa_offset: 192 +// 0x00000048: .cfi_remember_state +// 0x00000048: ldp tr, x20, [sp, #96] +// 0x0000004c: .cfi_restore: r19 +// 0x0000004c: .cfi_restore: r20 +// 0x0000004c: ldp x21, x22, [sp, #112] +// 0x00000050: .cfi_restore: r21 +// 0x00000050: .cfi_restore: r22 +// 0x00000050: ldp x23, x24, [sp, #128] +// 0x00000054: .cfi_restore: r23 +// 0x00000054: .cfi_restore: r24 +// 0x00000054: ldp x25, x26, [sp, #144] +// 0x00000058: .cfi_restore: r25 +// 0x00000058: .cfi_restore: r26 +// 0x00000058: ldp x27, x28, [sp, #160] +// 0x0000005c: .cfi_restore: r27 +// 0x0000005c: .cfi_restore: r28 +// 0x0000005c: ldp x29, lr, [sp, #176] +// 0x00000060: .cfi_restore: r29 +// 0x00000060: .cfi_restore: r30 +// 0x00000060: ldp d8, d9, [sp, #32] +// 0x00000064: .cfi_restore_extended: r72 +// 0x00000064: .cfi_restore_extended: r73 +// 0x00000064: ldp d10, d11, [sp, #48] +// 0x00000068: .cfi_restore_extended: r74 +// 0x00000068: .cfi_restore_extended: r75 +// 0x00000068: ldp d12, d13, [sp, #64] +// 0x0000006c: .cfi_restore_extended: r76 +// 0x0000006c: .cfi_restore_extended: r77 +// 0x0000006c: ldp d14, d15, [sp, #80] +// 0x00000070: .cfi_restore_extended: r78 +// 0x00000070: .cfi_restore_extended: r79 +// 0x00000070: add sp, sp, #0xc0 (192) +// 0x00000074: .cfi_def_cfa_offset: 0 +// 0x00000074: ret +// 0x00000078: .cfi_restore_state +// 0x00000078: .cfi_def_cfa_offset: 192 + +static constexpr uint8_t expected_asm_kX86[] = { + 0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3, + 0x0F, 0x11, 0x44, 0x24, 0x38, 0x89, 0x54, 0x24, 0x3C, 0x89, 0x5C, 0x24, + 0x40, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x20, 0x5D, 0x5E, + 0x5F, 0xC3, +}; +static constexpr uint8_t expected_cfi_kX86[] = { + 0x41, 0x0E, 0x08, 0x87, 0x02, 0x41, 0x0E, 0x0C, 0x86, 0x03, 0x41, 0x0E, + 0x10, 0x85, 0x04, 0x43, 0x0E, 0x2C, 0x41, 0x0E, 0x30, 0x55, 0x0E, 0x50, + 0x43, 0x0E, 0x30, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41, + 0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x30, +}; +// 0x00000000: push edi +// 0x00000001: .cfi_def_cfa_offset: 8 +// 0x00000001: .cfi_offset: r7 at cfa-8 +// 0x00000001: push esi +// 0x00000002: .cfi_def_cfa_offset: 12 +// 0x00000002: .cfi_offset: r6 at cfa-12 +// 0x00000002: push ebp +// 0x00000003: .cfi_def_cfa_offset: 16 +// 0x00000003: .cfi_offset: r5 at cfa-16 +// 0x00000003: add esp, -28 +// 0x00000006: .cfi_def_cfa_offset: 44 +// 0x00000006: push eax +// 0x00000007: .cfi_def_cfa_offset: 48 +// 0x00000007: mov [esp + 52], ecx +// 0x0000000b: movss [esp + 56], xmm0 +// 0x00000011: mov [esp + 60], edx +// 0x00000015: mov [esp + 64], ebx +// 0x00000019: add esp, -32 +// 0x0000001c: .cfi_def_cfa_offset: 80 +// 0x0000001c: add esp, 32 +// 0x0000001f: .cfi_def_cfa_offset: 48 +// 0x0000001f: .cfi_remember_state +// 0x0000001f: add esp, 32 +// 0x00000022: .cfi_def_cfa_offset: 16 +// 0x00000022: pop ebp +// 0x00000023: .cfi_def_cfa_offset: 12 +// 0x00000023: .cfi_restore: r5 +// 0x00000023: pop esi +// 0x00000024: .cfi_def_cfa_offset: 8 +// 0x00000024: .cfi_restore: r6 +// 0x00000024: pop edi +// 0x00000025: .cfi_def_cfa_offset: 4 +// 0x00000025: .cfi_restore: r7 +// 0x00000025: ret +// 0x00000026: .cfi_restore_state +// 0x00000026: .cfi_def_cfa_offset: 48 + +static constexpr uint8_t expected_asm_kX86_64[] = { + 0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83, + 0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F, + 0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2, + 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4, + 0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00, + 0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, + 0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, + 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, + 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, + 0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, + 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3, +}; +static constexpr uint8_t expected_cfi_kX86_64[] = { + 0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E, + 0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86, + 0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0, + 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E, + 0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47, + 0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E, + 0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E, + 0x10, 0xCE, 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x80, 0x01, +}; +// 0x00000000: push r15 +// 0x00000002: .cfi_def_cfa_offset: 16 +// 0x00000002: .cfi_offset: r15 at cfa-16 +// 0x00000002: push r14 +// 0x00000004: .cfi_def_cfa_offset: 24 +// 0x00000004: .cfi_offset: r14 at cfa-24 +// 0x00000004: push r13 +// 0x00000006: .cfi_def_cfa_offset: 32 +// 0x00000006: .cfi_offset: r13 at cfa-32 +// 0x00000006: push r12 +// 0x00000008: .cfi_def_cfa_offset: 40 +// 0x00000008: .cfi_offset: r12 at cfa-40 +// 0x00000008: push rbp +// 0x00000009: .cfi_def_cfa_offset: 48 +// 0x00000009: .cfi_offset: r6 at cfa-48 +// 0x00000009: push rbx +// 0x0000000a: .cfi_def_cfa_offset: 56 +// 0x0000000a: .cfi_offset: r3 at cfa-56 +// 0x0000000a: subq rsp, 72 +// 0x0000000e: .cfi_def_cfa_offset: 128 +// 0x0000000e: movsd [rsp + 64], xmm15 +// 0x00000015: .cfi_offset: r32 at cfa-64 +// 0x00000015: movsd [rsp + 56], xmm14 +// 0x0000001c: .cfi_offset: r31 at cfa-72 +// 0x0000001c: movsd [rsp + 48], xmm13 +// 0x00000023: .cfi_offset: r30 at cfa-80 +// 0x00000023: movsd [rsp + 40], xmm12 +// 0x0000002a: .cfi_offset: r29 at cfa-88 +// 0x0000002a: movq [rsp], rdi +// 0x0000002e: mov [rsp + 136], esi +// 0x00000035: movss [rsp + 140], xmm0 +// 0x0000003e: mov [rsp + 144], edx +// 0x00000045: mov [rsp + 148], ecx +// 0x0000004c: addq rsp, -32 +// 0x00000050: .cfi_def_cfa_offset: 160 +// 0x00000050: addq rsp, 32 +// 0x00000054: .cfi_def_cfa_offset: 128 +// 0x00000054: .cfi_remember_state +// 0x00000054: movsd xmm12, [rsp + 40] +// 0x0000005b: .cfi_restore: r29 +// 0x0000005b: movsd xmm13, [rsp + 48] +// 0x00000062: .cfi_restore: r30 +// 0x00000062: movsd xmm14, [rsp + 56] +// 0x00000069: .cfi_restore: r31 +// 0x00000069: movsd xmm15, [rsp + 64] +// 0x00000070: .cfi_restore: r32 +// 0x00000070: addq rsp, 72 +// 0x00000074: .cfi_def_cfa_offset: 56 +// 0x00000074: pop rbx +// 0x00000075: .cfi_def_cfa_offset: 48 +// 0x00000075: .cfi_restore: r3 +// 0x00000075: pop rbp +// 0x00000076: .cfi_def_cfa_offset: 40 +// 0x00000076: .cfi_restore: r6 +// 0x00000076: pop r12 +// 0x00000078: .cfi_def_cfa_offset: 32 +// 0x00000078: .cfi_restore: r12 +// 0x00000078: pop r13 +// 0x0000007a: .cfi_def_cfa_offset: 24 +// 0x0000007a: .cfi_restore: r13 +// 0x0000007a: pop r14 +// 0x0000007c: .cfi_def_cfa_offset: 16 +// 0x0000007c: .cfi_restore: r14 +// 0x0000007c: pop r15 +// 0x0000007e: .cfi_def_cfa_offset: 8 +// 0x0000007e: .cfi_restore: r15 +// 0x0000007e: ret +// 0x0000007f: .cfi_restore_state +// 0x0000007f: .cfi_def_cfa_offset: 128 + +static constexpr uint8_t expected_asm_kMips[] = { + 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xBE, 0xAF, + 0x34, 0x00, 0xB7, 0xAF, 0x30, 0x00, 0xB6, 0xAF, 0x2C, 0x00, 0xB5, 0xAF, + 0x28, 0x00, 0xB4, 0xAF, 0x24, 0x00, 0xB3, 0xAF, 0x20, 0x00, 0xB2, 0xAF, + 0x00, 0x00, 0xA4, 0xAF, 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xAC, 0xE7, + 0x4C, 0x00, 0xA6, 0xAF, 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27, + 0x20, 0x00, 0xBD, 0x27, 0x20, 0x00, 0xB2, 0x8F, 0x24, 0x00, 0xB3, 0x8F, + 0x28, 0x00, 0xB4, 0x8F, 0x2C, 0x00, 0xB5, 0x8F, 0x30, 0x00, 0xB6, 0x8F, + 0x34, 0x00, 0xB7, 0x8F, 0x38, 0x00, 0xBE, 0x8F, 0x3C, 0x00, 0xBF, 0x8F, + 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00, +}; +static constexpr uint8_t expected_cfi_kMips[] = { + 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x97, 0x03, + 0x44, 0x96, 0x04, 0x44, 0x95, 0x05, 0x44, 0x94, 0x06, 0x44, 0x93, 0x07, + 0x44, 0x92, 0x08, 0x58, 0x0E, 0x60, 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xD2, + 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDE, + 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40, +}; +// 0x00000000: addiu r29, r29, -64 +// 0x00000004: .cfi_def_cfa_offset: 64 +// 0x00000004: sw r31, +60(r29) +// 0x00000008: .cfi_offset: r31 at cfa-4 +// 0x00000008: sw r30, +56(r29) +// 0x0000000c: .cfi_offset: r30 at cfa-8 +// 0x0000000c: sw r23, +52(r29) +// 0x00000010: .cfi_offset: r23 at cfa-12 +// 0x00000010: sw r22, +48(r29) +// 0x00000014: .cfi_offset: r22 at cfa-16 +// 0x00000014: sw r21, +44(r29) +// 0x00000018: .cfi_offset: r21 at cfa-20 +// 0x00000018: sw r20, +40(r29) +// 0x0000001c: .cfi_offset: r20 at cfa-24 +// 0x0000001c: sw r19, +36(r29) +// 0x00000020: .cfi_offset: r19 at cfa-28 +// 0x00000020: sw r18, +32(r29) +// 0x00000024: .cfi_offset: r18 at cfa-32 +// 0x00000024: sw r4, +0(r29) +// 0x00000028: sw r5, +68(r29) +// 0x0000002c: swc1 f12, +72(r29) +// 0x00000030: sw r6, +76(r29) +// 0x00000034: sw r7, +80(r29) +// 0x00000038: addiu r29, r29, -32 +// 0x0000003c: .cfi_def_cfa_offset: 96 +// 0x0000003c: addiu r29, r29, 32 +// 0x00000040: .cfi_def_cfa_offset: 64 +// 0x00000040: .cfi_remember_state +// 0x00000040: lw r18, +32(r29) +// 0x00000044: .cfi_restore: r18 +// 0x00000044: lw r19, +36(r29) +// 0x00000048: .cfi_restore: r19 +// 0x00000048: lw r20, +40(r29) +// 0x0000004c: .cfi_restore: r20 +// 0x0000004c: lw r21, +44(r29) +// 0x00000050: .cfi_restore: r21 +// 0x00000050: lw r22, +48(r29) +// 0x00000054: .cfi_restore: r22 +// 0x00000054: lw r23, +52(r29) +// 0x00000058: .cfi_restore: r23 +// 0x00000058: lw r30, +56(r29) +// 0x0000005c: .cfi_restore: r30 +// 0x0000005c: lw r31, +60(r29) +// 0x00000060: .cfi_restore: r31 +// 0x00000060: addiu r29, r29, 64 +// 0x00000064: .cfi_def_cfa_offset: 0 +// 0x00000064: jr r31 +// 0x00000068: nop +// 0x0000006c: .cfi_restore_state +// 0x0000006c: .cfi_def_cfa_offset: 64 + +static constexpr uint8_t expected_asm_kMips64[] = { + 0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF, + 0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF, + 0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF, + 0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF, + 0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF, + 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF, + 0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF, + 0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF, + 0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67, + 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00, +}; +static constexpr uint8_t expected_cfi_kMips64[] = { + 0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06, + 0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E, + 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E, + 0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, + 0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, + 0x0B, 0x0E, 0x70, +}; +// 0x00000000: daddiu r29, r29, -112 +// 0x00000004: .cfi_def_cfa_offset: 112 +// 0x00000004: sd r31, +104(r29) +// 0x00000008: .cfi_offset: r31 at cfa-8 +// 0x00000008: sd r30, +96(r29) +// 0x0000000c: .cfi_offset: r30 at cfa-16 +// 0x0000000c: sd r28, +88(r29) +// 0x00000010: .cfi_offset: r28 at cfa-24 +// 0x00000010: sd r23, +80(r29) +// 0x00000014: .cfi_offset: r23 at cfa-32 +// 0x00000014: sd r22, +72(r29) +// 0x00000018: .cfi_offset: r22 at cfa-40 +// 0x00000018: sd r21, +64(r29) +// 0x0000001c: .cfi_offset: r21 at cfa-48 +// 0x0000001c: sd r20, +56(r29) +// 0x00000020: .cfi_offset: r20 at cfa-56 +// 0x00000020: sd r19, +48(r29) +// 0x00000024: .cfi_offset: r19 at cfa-64 +// 0x00000024: sd r18, +40(r29) +// 0x00000028: .cfi_offset: r18 at cfa-72 +// 0x00000028: sd r4, +0(r29) +// 0x0000002c: sw r5, +120(r29) +// 0x00000030: swc1 f14, +124(r29) +// 0x00000034: sw r7, +128(r29) +// 0x00000038: sw r8, +132(r29) +// 0x0000003c: daddiu r29, r29, -32 +// 0x00000040: .cfi_def_cfa_offset: 144 +// 0x00000040: daddiu r29, r29, 32 +// 0x00000044: .cfi_def_cfa_offset: 112 +// 0x00000044: .cfi_remember_state +// 0x00000044: ld r18, +40(r29) +// 0x00000048: .cfi_restore: r18 +// 0x00000048: ld r19, +48(r29) +// 0x0000004c: .cfi_restore: r19 +// 0x0000004c: ld r20, +56(r29) +// 0x00000050: .cfi_restore: r20 +// 0x00000050: ld r21, +64(r29) +// 0x00000054: .cfi_restore: r21 +// 0x00000054: ld r22, +72(r29) +// 0x00000058: .cfi_restore: r22 +// 0x00000058: ld r23, +80(r29) +// 0x0000005c: .cfi_restore: r23 +// 0x0000005c: ld r28, +88(r29) +// 0x00000060: .cfi_restore: r28 +// 0x00000060: ld r30, +96(r29) +// 0x00000064: .cfi_restore: r30 +// 0x00000064: ld r31, +104(r29) +// 0x00000068: .cfi_restore: r31 +// 0x00000068: daddiu r29, r29, 112 +// 0x0000006c: .cfi_def_cfa_offset: 0 +// 0x0000006c: jr r31 +// 0x00000070: nop +// 0x00000074: .cfi_restore_state +// 0x00000074: .cfi_def_cfa_offset: 112 + diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc new file mode 100644 index 000000000..c4c2399cc --- /dev/null +++ b/compiler/jni/jni_compiler_test.cc @@ -0,0 +1,1775 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include "art_method-inl.h" +#include "class_linker.h" +#include "common_compiler_test.h" +#include "dex_file.h" +#include "gtest/gtest.h" +#include "indirect_reference_table.h" +#include "jni_internal.h" +#include "mem_map.h" +#include "mirror/class-inl.h" +#include "mirror/class_loader.h" +#include "mirror/object_array-inl.h" +#include "mirror/object-inl.h" +#include "mirror/stack_trace_element.h" +#include "nativeloader/native_loader.h" +#include "runtime.h" +#include "ScopedLocalRef.h" +#include "scoped_thread_state_change.h" +#include "thread.h" + +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar(JNIEnv*, jobject, jint count) { + return count + 1; +} + +extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar(JNIEnv*, jclass, jint count) { + return count + 1; +} + +namespace art { + +class JniCompilerTest : public CommonCompilerTest { + protected: + void SetUp() OVERRIDE { + CommonCompilerTest::SetUp(); + check_generic_jni_ = false; + } + + void TearDown() OVERRIDE { + android::ResetNativeLoader(); + CommonCompilerTest::TearDown(); + } + + void SetCheckGenericJni(bool generic) { + check_generic_jni_ = generic; + } + + void CompileForTest(jobject class_loader, bool direct, + const char* method_name, const char* method_sig) { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<1> hs(soa.Self()); + Handle loader( + hs.NewHandle(soa.Decode(class_loader))); + // Compile the native method before starting the runtime + mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader); + const auto pointer_size = class_linker_->GetImagePointerSize(); + ArtMethod* method = direct ? c->FindDirectMethod(method_name, method_sig, pointer_size) : + c->FindVirtualMethod(method_name, method_sig, pointer_size); + ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig; + if (check_generic_jni_) { + method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub()); + } else { + const void* code = method->GetEntryPointFromQuickCompiledCode(); + if (code == nullptr || class_linker_->IsQuickGenericJniStub(code)) { + CompileMethod(method); + ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) + << method_name << " " << method_sig; + } + } + } + + void SetUpForTest(bool direct, const char* method_name, const char* method_sig, + void* native_fnptr) { + // Initialize class loader and compile method when runtime not started. + if (!runtime_->IsStarted()) { + { + ScopedObjectAccess soa(Thread::Current()); + class_loader_ = LoadDex("MyClassNatives"); + } + CompileForTest(class_loader_, direct, method_name, method_sig); + // Start runtime. + Thread::Current()->TransitionFromSuspendedToRunnable(); + android::InitializeNativeLoader(); + bool started = runtime_->Start(); + CHECK(started); + } + // JNI operations after runtime start. + env_ = Thread::Current()->GetJniEnv(); + library_search_path_ = env_->NewStringUTF(""); + jklass_ = env_->FindClass("MyClassNatives"); + ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig; + + if (direct) { + jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig); + } else { + jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig); + } + ASSERT_TRUE(jmethod_ != nullptr) << method_name << " " << method_sig; + + if (native_fnptr != nullptr) { + JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } }; + ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1)) + << method_name << " " << method_sig; + } else { + env_->UnregisterNatives(jklass_); + } + + jmethodID constructor = env_->GetMethodID(jklass_, "", "()V"); + jobj_ = env_->NewObject(jklass_, constructor); + ASSERT_TRUE(jobj_ != nullptr) << method_name << " " << method_sig; + } + + public: + static jclass jklass_; + static jobject jobj_; + static jobject class_loader_; + + protected: + // We have to list the methods here so we can share them between default and generic JNI. + void CompileAndRunNoArgMethodImpl(); + void CompileAndRunIntMethodThroughStubImpl(); + void CompileAndRunStaticIntMethodThroughStubImpl(); + void CompileAndRunIntMethodImpl(); + void CompileAndRunIntIntMethodImpl(); + void CompileAndRunLongLongMethodImpl(); + void CompileAndRunDoubleDoubleMethodImpl(); + void CompileAndRun_fooJJ_synchronizedImpl(); + void CompileAndRunIntObjectObjectMethodImpl(); + void CompileAndRunStaticIntIntMethodImpl(); + void CompileAndRunStaticDoubleDoubleMethodImpl(); + void RunStaticLogDoubleMethodImpl(); + void RunStaticLogFloatMethodImpl(); + void RunStaticReturnTrueImpl(); + void RunStaticReturnFalseImpl(); + void RunGenericStaticReturnIntImpl(); + void CompileAndRunStaticIntObjectObjectMethodImpl(); + void CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl(); + void ExceptionHandlingImpl(); + void NativeStackTraceElementImpl(); + void ReturnGlobalRefImpl(); + void LocalReferenceTableClearingTestImpl(); + void JavaLangSystemArrayCopyImpl(); + void CompareAndSwapIntImpl(); + void GetTextImpl(); + void GetSinkPropertiesNativeImpl(); + void UpcallReturnTypeChecking_InstanceImpl(); + void UpcallReturnTypeChecking_StaticImpl(); + void UpcallArgumentTypeChecking_InstanceImpl(); + void UpcallArgumentTypeChecking_StaticImpl(); + void CompileAndRunFloatFloatMethodImpl(); + void CheckParameterAlignImpl(); + void MaxParamNumberImpl(); + void WithoutImplementationImpl(); + void WithoutImplementationRefReturnImpl(); + void StackArgsIntsFirstImpl(); + void StackArgsFloatsFirstImpl(); + void StackArgsMixedImpl(); + void StackArgsSignExtendedMips64Impl(); + + JNIEnv* env_; + jstring library_search_path_; + jmethodID jmethod_; + bool check_generic_jni_; +}; + +jclass JniCompilerTest::jklass_; +jobject JniCompilerTest::jobj_; +jobject JniCompilerTest::class_loader_; + +#define JNI_TEST(TestName) \ + TEST_F(JniCompilerTest, TestName ## Default) { \ + TestName ## Impl(); \ + } \ + \ + TEST_F(JniCompilerTest, TestName ## Generic) { \ + TEST_DISABLED_FOR_MIPS(); \ + SetCheckGenericJni(true); \ + TestName ## Impl(); \ + } + +int gJava_MyClassNatives_foo_calls = 0; +void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) { + // 1 = thisObj + EXPECT_EQ(kNative, Thread::Current()->GetState()); + Locks::mutator_lock_->AssertNotHeld(Thread::Current()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_foo_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); +} + +void JniCompilerTest::CompileAndRunNoArgMethodImpl() { + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); + + EXPECT_EQ(0, gJava_MyClassNatives_foo_calls); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(1, gJava_MyClassNatives_foo_calls); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(2, gJava_MyClassNatives_foo_calls); + + gJava_MyClassNatives_foo_calls = 0; +} + +JNI_TEST(CompileAndRunNoArgMethod) + +void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() { + SetUpForTest(false, "bar", "(I)I", nullptr); + // calling through stub will link with &Java_MyClassNatives_bar + + std::string reason; + ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> + LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) + << reason; + + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); + EXPECT_EQ(25, result); +} + +JNI_TEST(CompileAndRunIntMethodThroughStub) + +void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() { + SetUpForTest(true, "sbar", "(I)I", nullptr); + // calling through stub will link with &Java_MyClassNatives_sbar + + std::string reason; + ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> + LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) + << reason; + + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); + EXPECT_EQ(43, result); +} + +JNI_TEST(CompileAndRunStaticIntMethodThroughStub) + +int gJava_MyClassNatives_fooI_calls = 0; +jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) { + // 1 = thisObj + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_fooI_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x; +} + +void JniCompilerTest::CompileAndRunIntMethodImpl() { + SetUpForTest(false, "fooI", "(I)I", + reinterpret_cast(&Java_MyClassNatives_fooI)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooI_calls); + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 42); + EXPECT_EQ(42, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooI_calls); + result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFED00D); + EXPECT_EQ(static_cast(0xCAFED00D), result); + EXPECT_EQ(2, gJava_MyClassNatives_fooI_calls); + + gJava_MyClassNatives_fooI_calls = 0; +} + +JNI_TEST(CompileAndRunIntMethod) + +int gJava_MyClassNatives_fooII_calls = 0; +jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) { + // 1 = thisObj + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_fooII_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunIntIntMethodImpl() { + SetUpForTest(false, "fooII", "(II)I", + reinterpret_cast(&Java_MyClassNatives_fooII)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooII_calls); + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 99, 10); + EXPECT_EQ(99 - 10, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooII_calls); + result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFEBABE, + 0xCAFED00D); + EXPECT_EQ(static_cast(0xCAFEBABE - 0xCAFED00D), result); + EXPECT_EQ(2, gJava_MyClassNatives_fooII_calls); + + gJava_MyClassNatives_fooII_calls = 0; +} + +JNI_TEST(CompileAndRunIntIntMethod) + +int gJava_MyClassNatives_fooJJ_calls = 0; +jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) { + // 1 = thisObj + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_fooJJ_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunLongLongMethodImpl() { + SetUpForTest(false, "fooJJ", "(JJ)J", + reinterpret_cast(&Java_MyClassNatives_fooJJ)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_calls); + jlong a = INT64_C(0x1234567890ABCDEF); + jlong b = INT64_C(0xFEDCBA0987654321); + jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_EQ(a - b, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_calls); + result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, b, a); + EXPECT_EQ(b - a, result); + EXPECT_EQ(2, gJava_MyClassNatives_fooJJ_calls); + + gJava_MyClassNatives_fooJJ_calls = 0; +} + +JNI_TEST(CompileAndRunLongLongMethod) + +int gJava_MyClassNatives_fooDD_calls = 0; +jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) { + // 1 = thisObj + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_fooDD_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() { + SetUpForTest(false, "fooDD", "(DD)D", + reinterpret_cast(&Java_MyClassNatives_fooDD)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooDD_calls); + jdouble result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, + 99.0, 10.0); + EXPECT_DOUBLE_EQ(99.0 - 10.0, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooDD_calls); + jdouble a = 3.14159265358979323846; + jdouble b = 0.69314718055994530942; + result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_DOUBLE_EQ(a - b, result); + EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls); + + gJava_MyClassNatives_fooDD_calls = 0; +} + +int gJava_MyClassNatives_fooJJ_synchronized_calls = 0; +jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv* env, jobject thisObj, jlong x, jlong y) { + // 1 = thisObj + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_fooJJ_synchronized_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x | y; +} + +void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() { + SetUpForTest(false, "fooJJ_synchronized", "(JJ)J", + reinterpret_cast(&Java_MyClassNatives_fooJJ_synchronized)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls); + jlong a = 0x1000000020000000ULL; + jlong b = 0x00ff000000aa0000ULL; + jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_EQ(a | b, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_synchronized_calls); + + gJava_MyClassNatives_fooJJ_synchronized_calls = 0; +} + +JNI_TEST(CompileAndRun_fooJJ_synchronized) + +int gJava_MyClassNatives_fooIOO_calls = 0; +jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y, + jobject z) { + // 3 = this + y + z + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + gJava_MyClassNatives_fooIOO_calls++; + ScopedObjectAccess soa(Thread::Current()); + size_t null_args = (y == nullptr ? 1 : 0) + (z == nullptr ? 1 : 0); + EXPECT_TRUE(3U == Thread::Current()->NumStackReferences() || + (3U - null_args) == Thread::Current()->NumStackReferences()); + switch (x) { + case 1: + return y; + case 2: + return z; + default: + return thisObj; + } +} + +void JniCompilerTest::CompileAndRunIntObjectObjectMethodImpl() { + SetUpForTest(false, "fooIOO", + "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + reinterpret_cast(&Java_MyClassNatives_fooIOO)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooIOO_calls); + jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(1, gJava_MyClassNatives_fooIOO_calls); + + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, jklass_); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(2, gJava_MyClassNatives_fooIOO_calls); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, nullptr, jklass_); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(3, gJava_MyClassNatives_fooIOO_calls); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, nullptr, jklass_); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(4, gJava_MyClassNatives_fooIOO_calls); + + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(5, gJava_MyClassNatives_fooIOO_calls); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(6, gJava_MyClassNatives_fooIOO_calls); + result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls); + + gJava_MyClassNatives_fooIOO_calls = 0; +} + +JNI_TEST(CompileAndRunIntObjectObjectMethod) + +int gJava_MyClassNatives_fooSII_calls = 0; +jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) { + // 1 = klass + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(klass != nullptr); + EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); + gJava_MyClassNatives_fooSII_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x + y; +} + +void JniCompilerTest::CompileAndRunStaticIntIntMethodImpl() { + SetUpForTest(true, "fooSII", "(II)I", + reinterpret_cast(&Java_MyClassNatives_fooSII)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSII_calls); + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 20, 30); + EXPECT_EQ(50, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooSII_calls); + + gJava_MyClassNatives_fooSII_calls = 0; +} + +JNI_TEST(CompileAndRunStaticIntIntMethod) + +int gJava_MyClassNatives_fooSDD_calls = 0; +jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble y) { + // 1 = klass + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(klass != nullptr); + EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); + gJava_MyClassNatives_fooSDD_calls++; + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return x - y; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() { + SetUpForTest(true, "fooSDD", "(DD)D", + reinterpret_cast(&Java_MyClassNatives_fooSDD)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSDD_calls); + jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 99.0, 10.0); + EXPECT_DOUBLE_EQ(99.0 - 10.0, result); + EXPECT_EQ(1, gJava_MyClassNatives_fooSDD_calls); + jdouble a = 3.14159265358979323846; + jdouble b = 0.69314718055994530942; + result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b); + EXPECT_DOUBLE_EQ(a - b, result); + EXPECT_DOUBLE_EQ(2, gJava_MyClassNatives_fooSDD_calls); + + gJava_MyClassNatives_fooSDD_calls = 0; +} + +JNI_TEST(CompileAndRunStaticDoubleDoubleMethod) + +// The x86 generic JNI code had a bug where it assumed a floating +// point return value would be in xmm0. We use log, to somehow ensure +// the compiler will use the floating point stack. + +jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) { + return log(x); +} + +void JniCompilerTest::RunStaticLogDoubleMethodImpl() { + SetUpForTest(true, "logD", "(D)D", reinterpret_cast(&Java_MyClassNatives_logD)); + + jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0); + EXPECT_DOUBLE_EQ(log(2.0), result); +} + +JNI_TEST(RunStaticLogDoubleMethod) + +jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) { + return logf(x); +} + +void JniCompilerTest::RunStaticLogFloatMethodImpl() { + SetUpForTest(true, "logF", "(F)F", reinterpret_cast(&Java_MyClassNatives_logF)); + + jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0); + EXPECT_FLOAT_EQ(logf(2.0), result); +} + +JNI_TEST(RunStaticLogFloatMethod) + +jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) { + return JNI_TRUE; +} + +jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) { + return JNI_FALSE; +} + +jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) { + return 42; +} + +void JniCompilerTest::RunStaticReturnTrueImpl() { + SetUpForTest(true, "returnTrue", "()Z", reinterpret_cast(&Java_MyClassNatives_returnTrue)); + + jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_); + EXPECT_TRUE(result); +} + +JNI_TEST(RunStaticReturnTrue) + +void JniCompilerTest::RunStaticReturnFalseImpl() { + SetUpForTest(true, "returnFalse", "()Z", + reinterpret_cast(&Java_MyClassNatives_returnFalse)); + + jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_); + EXPECT_FALSE(result); +} + +JNI_TEST(RunStaticReturnFalse) + +void JniCompilerTest::RunGenericStaticReturnIntImpl() { + SetUpForTest(true, "returnInt", "()I", reinterpret_cast(&Java_MyClassNatives_returnInt)); + + jint result = env_->CallStaticIntMethod(jklass_, jmethod_); + EXPECT_EQ(42, result); +} + +JNI_TEST(RunGenericStaticReturnInt) + +int gJava_MyClassNatives_fooSIOO_calls = 0; +jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y, + jobject z) { + // 3 = klass + y + z + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(klass != nullptr); + EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); + gJava_MyClassNatives_fooSIOO_calls++; + ScopedObjectAccess soa(Thread::Current()); + size_t null_args = (y == nullptr ? 1 : 0) + (z == nullptr ? 1 : 0); + EXPECT_TRUE(3U == Thread::Current()->NumStackReferences() || + (3U - null_args) == Thread::Current()->NumStackReferences()); + switch (x) { + case 1: + return y; + case 2: + return z; + default: + return klass; + } +} + + +void JniCompilerTest::CompileAndRunStaticIntObjectObjectMethodImpl() { + SetUpForTest(true, "fooSIOO", + "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + reinterpret_cast(&Java_MyClassNatives_fooSIOO)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSIOO_calls); + jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(1, gJava_MyClassNatives_fooSIOO_calls); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(2, gJava_MyClassNatives_fooSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(3, gJava_MyClassNatives_fooSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(4, gJava_MyClassNatives_fooSIOO_calls); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(5, gJava_MyClassNatives_fooSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(6, gJava_MyClassNatives_fooSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls); + + gJava_MyClassNatives_fooSIOO_calls = 0; +} + +JNI_TEST(CompileAndRunStaticIntObjectObjectMethod) + +int gJava_MyClassNatives_fooSSIOO_calls = 0; +jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) { + // 3 = klass + y + z + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(klass != nullptr); + EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); + gJava_MyClassNatives_fooSSIOO_calls++; + ScopedObjectAccess soa(Thread::Current()); + size_t null_args = (y == nullptr ? 1 : 0) + (z == nullptr ? 1 : 0); + EXPECT_TRUE(3U == Thread::Current()->NumStackReferences() || + (3U - null_args) == Thread::Current()->NumStackReferences()); + switch (x) { + case 1: + return y; + case 2: + return z; + default: + return klass; + } +} + +void JniCompilerTest::CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl() { + SetUpForTest(true, "fooSSIOO", + "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", + reinterpret_cast(&Java_MyClassNatives_fooSSIOO)); + + EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls); + jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(1, gJava_MyClassNatives_fooSSIOO_calls); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(2, gJava_MyClassNatives_fooSSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(3, gJava_MyClassNatives_fooSSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(4, gJava_MyClassNatives_fooSSIOO_calls); + + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jklass_, result)); + EXPECT_EQ(5, gJava_MyClassNatives_fooSSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(jobj_, result)); + EXPECT_EQ(6, gJava_MyClassNatives_fooSSIOO_calls); + result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr); + EXPECT_TRUE(env_->IsSameObject(nullptr, result)); + EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls); + + gJava_MyClassNatives_fooSSIOO_calls = 0; +} + +JNI_TEST(CompileAndRunStaticSynchronizedIntObjectObjectMethod) + +void Java_MyClassNatives_throwException(JNIEnv* env, jobject) { + jclass c = env->FindClass("java/lang/RuntimeException"); + env->ThrowNew(c, "hello"); +} + +void JniCompilerTest::ExceptionHandlingImpl() { + { + ASSERT_FALSE(runtime_->IsStarted()); + ScopedObjectAccess soa(Thread::Current()); + class_loader_ = LoadDex("MyClassNatives"); + + // all compilation needs to happen before Runtime::Start + CompileForTest(class_loader_, false, "foo", "()V"); + CompileForTest(class_loader_, false, "throwException", "()V"); + CompileForTest(class_loader_, false, "foo", "()V"); + } + // Start runtime to avoid re-initialization in SetupForTest. + Thread::Current()->TransitionFromSuspendedToRunnable(); + bool started = runtime_->Start(); + CHECK(started); + + gJava_MyClassNatives_foo_calls = 0; + + // Check a single call of a JNI method is ok + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(1, gJava_MyClassNatives_foo_calls); + EXPECT_FALSE(Thread::Current()->IsExceptionPending()); + + // Get class for exception we expect to be thrown + ScopedLocalRef jlre(env_, env_->FindClass("java/lang/RuntimeException")); + SetUpForTest(false, "throwException", "()V", + reinterpret_cast(&Java_MyClassNatives_throwException)); + // Call Java_MyClassNatives_throwException (JNI method that throws exception) + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(1, gJava_MyClassNatives_foo_calls); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); + ScopedLocalRef exception(env_, env_->ExceptionOccurred()); + env_->ExceptionClear(); + EXPECT_TRUE(env_->IsInstanceOf(exception.get(), jlre.get())); + + // Check a single call of a JNI method is ok + SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); + EXPECT_EQ(2, gJava_MyClassNatives_foo_calls); + + gJava_MyClassNatives_foo_calls = 0; +} + +JNI_TEST(ExceptionHandling) + +jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { + if (i <= 0) { + // We want to check raw Object* / Array* below + ScopedObjectAccess soa(env); + + // Build stack trace + jobject internal = Thread::Current()->CreateInternalStackTrace(soa); + jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); + mirror::ObjectArray* trace_array = + soa.Decode*>(ste_array); + EXPECT_TRUE(trace_array != nullptr); + EXPECT_EQ(11, trace_array->GetLength()); + + // Check stack trace entries have expected values + for (int32_t j = 0; j < trace_array->GetLength(); ++j) { + EXPECT_EQ(-2, trace_array->Get(j)->GetLineNumber()); + mirror::StackTraceElement* ste = trace_array->Get(j); + EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str()); + EXPECT_STREQ("fooI", ste->GetMethodName()->ToModifiedUtf8().c_str()); + } + + // end recursion + return 0; + } else { + jclass jklass = env->FindClass("MyClassNatives"); + EXPECT_TRUE(jklass != nullptr); + jmethodID jmethod = env->GetMethodID(jklass, "fooI", "(I)I"); + EXPECT_TRUE(jmethod != nullptr); + + // Recurse with i - 1 + jint result = env->CallNonvirtualIntMethod(thisObj, jklass, jmethod, i - 1); + + // Return sum of all depths + return i + result; + } +} + +void JniCompilerTest::NativeStackTraceElementImpl() { + SetUpForTest(false, "fooI", "(I)I", + reinterpret_cast(&Java_MyClassNatives_nativeUpCall)); + jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 10); + EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result); +} + +JNI_TEST(NativeStackTraceElement) + +jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) { + return env->NewGlobalRef(x); +} + +void JniCompilerTest::ReturnGlobalRefImpl() { + SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;", + reinterpret_cast(&Java_MyClassNatives_fooO)); + jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, jobj_); + EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(result)); + EXPECT_TRUE(env_->IsSameObject(result, jobj_)); +} + +JNI_TEST(ReturnGlobalRef) + +jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) { + // Add 10 local references + ScopedObjectAccess soa(env); + for (int i = 0; i < 10; i++) { + soa.AddLocalReference(soa.Decode(thisObj)); + } + return x+1; +} + +void JniCompilerTest::LocalReferenceTableClearingTestImpl() { + SetUpForTest(false, "fooI", "(I)I", reinterpret_cast(&local_ref_test)); + // 1000 invocations of a method that adds 10 local references + for (int i = 0; i < 1000; i++) { + jint result = env_->CallIntMethod(jobj_, jmethod_, i); + EXPECT_TRUE(result == i + 1); + } +} + +JNI_TEST(LocalReferenceTableClearingTest) + +void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject dst, jint dst_pos, jint length) { + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, klass)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, dst)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, src)); + EXPECT_EQ(1234, src_pos); + EXPECT_EQ(5678, dst_pos); + EXPECT_EQ(9876, length); +} + +void JniCompilerTest::JavaLangSystemArrayCopyImpl() { + SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V", + reinterpret_cast(&my_arraycopy)); + env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876); +} + +JNI_TEST(JavaLangSystemArrayCopy) + +jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint expected, jint newval) { + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, unsafe)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj)); + EXPECT_EQ(INT64_C(0x12345678ABCDEF88), offset); + EXPECT_EQ(static_cast(0xCAFEF00D), expected); + EXPECT_EQ(static_cast(0xEBADF00D), newval); + return JNI_TRUE; +} + +void JniCompilerTest::CompareAndSwapIntImpl() { + SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z", + reinterpret_cast(&my_casi)); + jboolean result = env_->CallBooleanMethod(jobj_, jmethod_, jobj_, INT64_C(0x12345678ABCDEF88), + 0xCAFEF00D, 0xEBADF00D); + EXPECT_EQ(result, JNI_TRUE); +} + +JNI_TEST(CompareAndSwapInt) + +jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, jobject obj2) { + EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1)); + EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2)); + EXPECT_EQ(0x12345678ABCDEF88ll, val1); + EXPECT_EQ(0x7FEDCBA987654321ll, val2); + return 42; +} + +void JniCompilerTest::GetTextImpl() { + SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I", + reinterpret_cast(&my_gettext)); + jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_, + INT64_C(0x7FEDCBA987654321), jobj_); + EXPECT_EQ(result, 42); +} + +JNI_TEST(GetText) + +int gJava_MyClassNatives_GetSinkProperties_calls = 0; +jarray Java_MyClassNatives_GetSinkProperties(JNIEnv* env, jobject thisObj, jstring s) { + // 1 = thisObj + Thread* self = Thread::Current(); + EXPECT_EQ(kNative, self->GetState()); + Locks::mutator_lock_->AssertNotHeld(self); + EXPECT_EQ(self->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + EXPECT_EQ(s, nullptr); + gJava_MyClassNatives_GetSinkProperties_calls++; + ScopedObjectAccess soa(self); + EXPECT_EQ(2U, self->NumStackReferences()); + EXPECT_TRUE(self->HoldsLock(soa.Decode(thisObj))); + return nullptr; +} + +void JniCompilerTest::GetSinkPropertiesNativeImpl() { + SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", + reinterpret_cast(&Java_MyClassNatives_GetSinkProperties)); + + EXPECT_EQ(0, gJava_MyClassNatives_GetSinkProperties_calls); + jarray result = down_cast( + env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, nullptr)); + EXPECT_EQ(nullptr, result); + EXPECT_EQ(1, gJava_MyClassNatives_GetSinkProperties_calls); + + gJava_MyClassNatives_GetSinkProperties_calls = 0; +} + +JNI_TEST(GetSinkPropertiesNative) + +// This should return jclass, but we're imitating a bug pattern. +jobject Java_MyClassNatives_instanceMethodThatShouldReturnClass(JNIEnv* env, jobject) { + return env->NewStringUTF("not a class!"); +} + +// This should return jclass, but we're imitating a bug pattern. +jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclass) { + return env->NewStringUTF("not a class!"); +} + +void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() { + SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;", + reinterpret_cast(&Java_MyClassNatives_instanceMethodThatShouldReturnClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // This native method is bad, and tries to return a jstring as a jclass. + env_->CallObjectMethod(jobj_, jmethod_); + check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()"); + + // Here, we just call the method incorrectly; we should catch that too. + env_->CallObjectMethod(jobj_, jmethod_); + check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()"); + env_->CallStaticObjectMethod(jklass_, jmethod_); + check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticObjectMethodV"); +} + +JNI_TEST(UpcallReturnTypeChecking_Instance) + +void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() { + SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;", + reinterpret_cast(&Java_MyClassNatives_staticMethodThatShouldReturnClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // This native method is bad, and tries to return a jstring as a jclass. + env_->CallStaticObjectMethod(jklass_, jmethod_); + check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()"); + + // Here, we just call the method incorrectly; we should catch that too. + env_->CallStaticObjectMethod(jklass_, jmethod_); + check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()"); + env_->CallObjectMethod(jobj_, jmethod_); + check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallObjectMethodV"); +} + +JNI_TEST(UpcallReturnTypeChecking_Static) + +// This should take jclass, but we're imitating a bug pattern. +void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jclass) { +} + +// This should take jclass, but we're imitating a bug pattern. +void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass) { +} + +void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", + reinterpret_cast(&Java_MyClassNatives_instanceMethodThatShouldTakeClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // We deliberately pass a bad second argument here. + env_->CallVoidMethod(jobj_, jmethod_, 123, env_->NewStringUTF("not a class!")); + check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.instanceMethodThatShouldTakeClass(int, java.lang.Class)"); +} + +JNI_TEST(UpcallArgumentTypeChecking_Instance) + +void JniCompilerTest::UpcallArgumentTypeChecking_StaticImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", + reinterpret_cast(&Java_MyClassNatives_staticMethodThatShouldTakeClass)); + + CheckJniAbortCatcher check_jni_abort_catcher; + // We deliberately pass a bad second argument here. + env_->CallStaticVoidMethod(jklass_, jmethod_, 123, env_->NewStringUTF("not a class!")); + check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.staticMethodThatShouldTakeClass(int, java.lang.Class)"); +} + +JNI_TEST(UpcallArgumentTypeChecking_Static) + +jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1, jfloat f2) { + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + ScopedObjectAccess soa(Thread::Current()); + EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + return f1 - f2; // non-commutative operator +} + +void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() { + SetUpForTest(false, "checkFloats", "(FF)F", + reinterpret_cast(&Java_MyClassNatives_checkFloats)); + + jfloat result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, + 99.0F, 10.0F); + EXPECT_FLOAT_EQ(99.0F - 10.0F, result); + jfloat a = 3.14159F; + jfloat b = 0.69314F; + result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, a, b); + EXPECT_FLOAT_EQ(a - b, result); +} + +JNI_TEST(CompileAndRunFloatFloatMethod) + +void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED, + jobject thisObj ATTRIBUTE_UNUSED, + jint i1 ATTRIBUTE_UNUSED, + jlong l1 ATTRIBUTE_UNUSED) { +// EXPECT_EQ(kNative, Thread::Current()->GetState()); +// EXPECT_EQ(Thread::Current()->GetJniEnv(), env); +// EXPECT_TRUE(thisObj != nullptr); +// EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); +// ScopedObjectAccess soa(Thread::Current()); +// EXPECT_EQ(1U, Thread::Current()->NumStackReferences()); + EXPECT_EQ(i1, 1234); + EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0)); +} + +void JniCompilerTest::CheckParameterAlignImpl() { + SetUpForTest(false, "checkParameterAlign", "(IJ)V", + reinterpret_cast(&Java_MyClassNatives_checkParameterAlign)); + + env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_, 1234, INT64_C(0x12345678ABCDEF0)); +} + +JNI_TEST(CheckParameterAlign) + +void Java_MyClassNatives_maxParamNumber(JNIEnv* env, jobject thisObj, + jobject o0, jobject o1, jobject o2, jobject o3, jobject o4, jobject o5, jobject o6, jobject o7, + jobject o8, jobject o9, jobject o10, jobject o11, jobject o12, jobject o13, jobject o14, jobject o15, + jobject o16, jobject o17, jobject o18, jobject o19, jobject o20, jobject o21, jobject o22, jobject o23, + jobject o24, jobject o25, jobject o26, jobject o27, jobject o28, jobject o29, jobject o30, jobject o31, + jobject o32, jobject o33, jobject o34, jobject o35, jobject o36, jobject o37, jobject o38, jobject o39, + jobject o40, jobject o41, jobject o42, jobject o43, jobject o44, jobject o45, jobject o46, jobject o47, + jobject o48, jobject o49, jobject o50, jobject o51, jobject o52, jobject o53, jobject o54, jobject o55, + jobject o56, jobject o57, jobject o58, jobject o59, jobject o60, jobject o61, jobject o62, jobject o63, + jobject o64, jobject o65, jobject o66, jobject o67, jobject o68, jobject o69, jobject o70, jobject o71, + jobject o72, jobject o73, jobject o74, jobject o75, jobject o76, jobject o77, jobject o78, jobject o79, + jobject o80, jobject o81, jobject o82, jobject o83, jobject o84, jobject o85, jobject o86, jobject o87, + jobject o88, jobject o89, jobject o90, jobject o91, jobject o92, jobject o93, jobject o94, jobject o95, + jobject o96, jobject o97, jobject o98, jobject o99, jobject o100, jobject o101, jobject o102, jobject o103, + jobject o104, jobject o105, jobject o106, jobject o107, jobject o108, jobject o109, jobject o110, jobject o111, + jobject o112, jobject o113, jobject o114, jobject o115, jobject o116, jobject o117, jobject o118, jobject o119, + jobject o120, jobject o121, jobject o122, jobject o123, jobject o124, jobject o125, jobject o126, jobject o127, + jobject o128, jobject o129, jobject o130, jobject o131, jobject o132, jobject o133, jobject o134, jobject o135, + jobject o136, jobject o137, jobject o138, jobject o139, jobject o140, jobject o141, jobject o142, jobject o143, + jobject o144, jobject o145, jobject o146, jobject o147, jobject o148, jobject o149, jobject o150, jobject o151, + jobject o152, jobject o153, jobject o154, jobject o155, jobject o156, jobject o157, jobject o158, jobject o159, + jobject o160, jobject o161, jobject o162, jobject o163, jobject o164, jobject o165, jobject o166, jobject o167, + jobject o168, jobject o169, jobject o170, jobject o171, jobject o172, jobject o173, jobject o174, jobject o175, + jobject o176, jobject o177, jobject o178, jobject o179, jobject o180, jobject o181, jobject o182, jobject o183, + jobject o184, jobject o185, jobject o186, jobject o187, jobject o188, jobject o189, jobject o190, jobject o191, + jobject o192, jobject o193, jobject o194, jobject o195, jobject o196, jobject o197, jobject o198, jobject o199, + jobject o200, jobject o201, jobject o202, jobject o203, jobject o204, jobject o205, jobject o206, jobject o207, + jobject o208, jobject o209, jobject o210, jobject o211, jobject o212, jobject o213, jobject o214, jobject o215, + jobject o216, jobject o217, jobject o218, jobject o219, jobject o220, jobject o221, jobject o222, jobject o223, + jobject o224, jobject o225, jobject o226, jobject o227, jobject o228, jobject o229, jobject o230, jobject o231, + jobject o232, jobject o233, jobject o234, jobject o235, jobject o236, jobject o237, jobject o238, jobject o239, + jobject o240, jobject o241, jobject o242, jobject o243, jobject o244, jobject o245, jobject o246, jobject o247, + jobject o248, jobject o249, jobject o250, jobject o251, jobject o252, jobject o253) { + EXPECT_EQ(kNative, Thread::Current()->GetState()); + EXPECT_EQ(Thread::Current()->GetJniEnv(), env); + EXPECT_TRUE(thisObj != nullptr); + EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); + ScopedObjectAccess soa(Thread::Current()); + EXPECT_GE(255U, Thread::Current()->NumStackReferences()); + + // two tests possible + if (o0 == nullptr) { + // 1) everything is null + EXPECT_TRUE(o0 == nullptr && o1 == nullptr && o2 == nullptr && o3 == nullptr && o4 == nullptr + && o5 == nullptr && o6 == nullptr && o7 == nullptr && o8 == nullptr && o9 == nullptr + && o10 == nullptr && o11 == nullptr && o12 == nullptr && o13 == nullptr && o14 == nullptr + && o15 == nullptr && o16 == nullptr && o17 == nullptr && o18 == nullptr && o19 == nullptr + && o20 == nullptr && o21 == nullptr && o22 == nullptr && o23 == nullptr && o24 == nullptr + && o25 == nullptr && o26 == nullptr && o27 == nullptr && o28 == nullptr && o29 == nullptr + && o30 == nullptr && o31 == nullptr && o32 == nullptr && o33 == nullptr && o34 == nullptr + && o35 == nullptr && o36 == nullptr && o37 == nullptr && o38 == nullptr && o39 == nullptr + && o40 == nullptr && o41 == nullptr && o42 == nullptr && o43 == nullptr && o44 == nullptr + && o45 == nullptr && o46 == nullptr && o47 == nullptr && o48 == nullptr && o49 == nullptr + && o50 == nullptr && o51 == nullptr && o52 == nullptr && o53 == nullptr && o54 == nullptr + && o55 == nullptr && o56 == nullptr && o57 == nullptr && o58 == nullptr && o59 == nullptr + && o60 == nullptr && o61 == nullptr && o62 == nullptr && o63 == nullptr && o64 == nullptr + && o65 == nullptr && o66 == nullptr && o67 == nullptr && o68 == nullptr && o69 == nullptr + && o70 == nullptr && o71 == nullptr && o72 == nullptr && o73 == nullptr && o74 == nullptr + && o75 == nullptr && o76 == nullptr && o77 == nullptr && o78 == nullptr && o79 == nullptr + && o80 == nullptr && o81 == nullptr && o82 == nullptr && o83 == nullptr && o84 == nullptr + && o85 == nullptr && o86 == nullptr && o87 == nullptr && o88 == nullptr && o89 == nullptr + && o90 == nullptr && o91 == nullptr && o92 == nullptr && o93 == nullptr && o94 == nullptr + && o95 == nullptr && o96 == nullptr && o97 == nullptr && o98 == nullptr && o99 == nullptr + && o100 == nullptr && o101 == nullptr && o102 == nullptr && o103 == nullptr && o104 == nullptr + && o105 == nullptr && o106 == nullptr && o107 == nullptr && o108 == nullptr && o109 == nullptr + && o110 == nullptr && o111 == nullptr && o112 == nullptr && o113 == nullptr && o114 == nullptr + && o115 == nullptr && o116 == nullptr && o117 == nullptr && o118 == nullptr && o119 == nullptr + && o120 == nullptr && o121 == nullptr && o122 == nullptr && o123 == nullptr && o124 == nullptr + && o125 == nullptr && o126 == nullptr && o127 == nullptr && o128 == nullptr && o129 == nullptr + && o130 == nullptr && o131 == nullptr && o132 == nullptr && o133 == nullptr && o134 == nullptr + && o135 == nullptr && o136 == nullptr && o137 == nullptr && o138 == nullptr && o139 == nullptr + && o140 == nullptr && o141 == nullptr && o142 == nullptr && o143 == nullptr && o144 == nullptr + && o145 == nullptr && o146 == nullptr && o147 == nullptr && o148 == nullptr && o149 == nullptr + && o150 == nullptr && o151 == nullptr && o152 == nullptr && o153 == nullptr && o154 == nullptr + && o155 == nullptr && o156 == nullptr && o157 == nullptr && o158 == nullptr && o159 == nullptr + && o160 == nullptr && o161 == nullptr && o162 == nullptr && o163 == nullptr && o164 == nullptr + && o165 == nullptr && o166 == nullptr && o167 == nullptr && o168 == nullptr && o169 == nullptr + && o170 == nullptr && o171 == nullptr && o172 == nullptr && o173 == nullptr && o174 == nullptr + && o175 == nullptr && o176 == nullptr && o177 == nullptr && o178 == nullptr && o179 == nullptr + && o180 == nullptr && o181 == nullptr && o182 == nullptr && o183 == nullptr && o184 == nullptr + && o185 == nullptr && o186 == nullptr && o187 == nullptr && o188 == nullptr && o189 == nullptr + && o190 == nullptr && o191 == nullptr && o192 == nullptr && o193 == nullptr && o194 == nullptr + && o195 == nullptr && o196 == nullptr && o197 == nullptr && o198 == nullptr && o199 == nullptr + && o200 == nullptr && o201 == nullptr && o202 == nullptr && o203 == nullptr && o204 == nullptr + && o205 == nullptr && o206 == nullptr && o207 == nullptr && o208 == nullptr && o209 == nullptr + && o210 == nullptr && o211 == nullptr && o212 == nullptr && o213 == nullptr && o214 == nullptr + && o215 == nullptr && o216 == nullptr && o217 == nullptr && o218 == nullptr && o219 == nullptr + && o220 == nullptr && o221 == nullptr && o222 == nullptr && o223 == nullptr && o224 == nullptr + && o225 == nullptr && o226 == nullptr && o227 == nullptr && o228 == nullptr && o229 == nullptr + && o230 == nullptr && o231 == nullptr && o232 == nullptr && o233 == nullptr && o234 == nullptr + && o235 == nullptr && o236 == nullptr && o237 == nullptr && o238 == nullptr && o239 == nullptr + && o240 == nullptr && o241 == nullptr && o242 == nullptr && o243 == nullptr && o244 == nullptr + && o245 == nullptr && o246 == nullptr && o247 == nullptr && o248 == nullptr && o249 == nullptr + && o250 == nullptr && o251 == nullptr && o252 == nullptr && o253 == nullptr); + } else { + EXPECT_EQ(0, env->GetArrayLength(reinterpret_cast(o0))); + EXPECT_EQ(1, env->GetArrayLength(reinterpret_cast(o1))); + EXPECT_EQ(2, env->GetArrayLength(reinterpret_cast(o2))); + EXPECT_EQ(3, env->GetArrayLength(reinterpret_cast(o3))); + EXPECT_EQ(4, env->GetArrayLength(reinterpret_cast(o4))); + EXPECT_EQ(5, env->GetArrayLength(reinterpret_cast(o5))); + EXPECT_EQ(6, env->GetArrayLength(reinterpret_cast(o6))); + EXPECT_EQ(7, env->GetArrayLength(reinterpret_cast(o7))); + EXPECT_EQ(8, env->GetArrayLength(reinterpret_cast(o8))); + EXPECT_EQ(9, env->GetArrayLength(reinterpret_cast(o9))); + EXPECT_EQ(10, env->GetArrayLength(reinterpret_cast(o10))); + EXPECT_EQ(11, env->GetArrayLength(reinterpret_cast(o11))); + EXPECT_EQ(12, env->GetArrayLength(reinterpret_cast(o12))); + EXPECT_EQ(13, env->GetArrayLength(reinterpret_cast(o13))); + EXPECT_EQ(14, env->GetArrayLength(reinterpret_cast(o14))); + EXPECT_EQ(15, env->GetArrayLength(reinterpret_cast(o15))); + EXPECT_EQ(16, env->GetArrayLength(reinterpret_cast(o16))); + EXPECT_EQ(17, env->GetArrayLength(reinterpret_cast(o17))); + EXPECT_EQ(18, env->GetArrayLength(reinterpret_cast(o18))); + EXPECT_EQ(19, env->GetArrayLength(reinterpret_cast(o19))); + EXPECT_EQ(20, env->GetArrayLength(reinterpret_cast(o20))); + EXPECT_EQ(21, env->GetArrayLength(reinterpret_cast(o21))); + EXPECT_EQ(22, env->GetArrayLength(reinterpret_cast(o22))); + EXPECT_EQ(23, env->GetArrayLength(reinterpret_cast(o23))); + EXPECT_EQ(24, env->GetArrayLength(reinterpret_cast(o24))); + EXPECT_EQ(25, env->GetArrayLength(reinterpret_cast(o25))); + EXPECT_EQ(26, env->GetArrayLength(reinterpret_cast(o26))); + EXPECT_EQ(27, env->GetArrayLength(reinterpret_cast(o27))); + EXPECT_EQ(28, env->GetArrayLength(reinterpret_cast(o28))); + EXPECT_EQ(29, env->GetArrayLength(reinterpret_cast(o29))); + EXPECT_EQ(30, env->GetArrayLength(reinterpret_cast(o30))); + EXPECT_EQ(31, env->GetArrayLength(reinterpret_cast(o31))); + EXPECT_EQ(32, env->GetArrayLength(reinterpret_cast(o32))); + EXPECT_EQ(33, env->GetArrayLength(reinterpret_cast(o33))); + EXPECT_EQ(34, env->GetArrayLength(reinterpret_cast(o34))); + EXPECT_EQ(35, env->GetArrayLength(reinterpret_cast(o35))); + EXPECT_EQ(36, env->GetArrayLength(reinterpret_cast(o36))); + EXPECT_EQ(37, env->GetArrayLength(reinterpret_cast(o37))); + EXPECT_EQ(38, env->GetArrayLength(reinterpret_cast(o38))); + EXPECT_EQ(39, env->GetArrayLength(reinterpret_cast(o39))); + EXPECT_EQ(40, env->GetArrayLength(reinterpret_cast(o40))); + EXPECT_EQ(41, env->GetArrayLength(reinterpret_cast(o41))); + EXPECT_EQ(42, env->GetArrayLength(reinterpret_cast(o42))); + EXPECT_EQ(43, env->GetArrayLength(reinterpret_cast(o43))); + EXPECT_EQ(44, env->GetArrayLength(reinterpret_cast(o44))); + EXPECT_EQ(45, env->GetArrayLength(reinterpret_cast(o45))); + EXPECT_EQ(46, env->GetArrayLength(reinterpret_cast(o46))); + EXPECT_EQ(47, env->GetArrayLength(reinterpret_cast(o47))); + EXPECT_EQ(48, env->GetArrayLength(reinterpret_cast(o48))); + EXPECT_EQ(49, env->GetArrayLength(reinterpret_cast(o49))); + EXPECT_EQ(50, env->GetArrayLength(reinterpret_cast(o50))); + EXPECT_EQ(51, env->GetArrayLength(reinterpret_cast(o51))); + EXPECT_EQ(52, env->GetArrayLength(reinterpret_cast(o52))); + EXPECT_EQ(53, env->GetArrayLength(reinterpret_cast(o53))); + EXPECT_EQ(54, env->GetArrayLength(reinterpret_cast(o54))); + EXPECT_EQ(55, env->GetArrayLength(reinterpret_cast(o55))); + EXPECT_EQ(56, env->GetArrayLength(reinterpret_cast(o56))); + EXPECT_EQ(57, env->GetArrayLength(reinterpret_cast(o57))); + EXPECT_EQ(58, env->GetArrayLength(reinterpret_cast(o58))); + EXPECT_EQ(59, env->GetArrayLength(reinterpret_cast(o59))); + EXPECT_EQ(60, env->GetArrayLength(reinterpret_cast(o60))); + EXPECT_EQ(61, env->GetArrayLength(reinterpret_cast(o61))); + EXPECT_EQ(62, env->GetArrayLength(reinterpret_cast(o62))); + EXPECT_EQ(63, env->GetArrayLength(reinterpret_cast(o63))); + EXPECT_EQ(64, env->GetArrayLength(reinterpret_cast(o64))); + EXPECT_EQ(65, env->GetArrayLength(reinterpret_cast(o65))); + EXPECT_EQ(66, env->GetArrayLength(reinterpret_cast(o66))); + EXPECT_EQ(67, env->GetArrayLength(reinterpret_cast(o67))); + EXPECT_EQ(68, env->GetArrayLength(reinterpret_cast(o68))); + EXPECT_EQ(69, env->GetArrayLength(reinterpret_cast(o69))); + EXPECT_EQ(70, env->GetArrayLength(reinterpret_cast(o70))); + EXPECT_EQ(71, env->GetArrayLength(reinterpret_cast(o71))); + EXPECT_EQ(72, env->GetArrayLength(reinterpret_cast(o72))); + EXPECT_EQ(73, env->GetArrayLength(reinterpret_cast(o73))); + EXPECT_EQ(74, env->GetArrayLength(reinterpret_cast(o74))); + EXPECT_EQ(75, env->GetArrayLength(reinterpret_cast(o75))); + EXPECT_EQ(76, env->GetArrayLength(reinterpret_cast(o76))); + EXPECT_EQ(77, env->GetArrayLength(reinterpret_cast(o77))); + EXPECT_EQ(78, env->GetArrayLength(reinterpret_cast(o78))); + EXPECT_EQ(79, env->GetArrayLength(reinterpret_cast(o79))); + EXPECT_EQ(80, env->GetArrayLength(reinterpret_cast(o80))); + EXPECT_EQ(81, env->GetArrayLength(reinterpret_cast(o81))); + EXPECT_EQ(82, env->GetArrayLength(reinterpret_cast(o82))); + EXPECT_EQ(83, env->GetArrayLength(reinterpret_cast(o83))); + EXPECT_EQ(84, env->GetArrayLength(reinterpret_cast(o84))); + EXPECT_EQ(85, env->GetArrayLength(reinterpret_cast(o85))); + EXPECT_EQ(86, env->GetArrayLength(reinterpret_cast(o86))); + EXPECT_EQ(87, env->GetArrayLength(reinterpret_cast(o87))); + EXPECT_EQ(88, env->GetArrayLength(reinterpret_cast(o88))); + EXPECT_EQ(89, env->GetArrayLength(reinterpret_cast(o89))); + EXPECT_EQ(90, env->GetArrayLength(reinterpret_cast(o90))); + EXPECT_EQ(91, env->GetArrayLength(reinterpret_cast(o91))); + EXPECT_EQ(92, env->GetArrayLength(reinterpret_cast(o92))); + EXPECT_EQ(93, env->GetArrayLength(reinterpret_cast(o93))); + EXPECT_EQ(94, env->GetArrayLength(reinterpret_cast(o94))); + EXPECT_EQ(95, env->GetArrayLength(reinterpret_cast(o95))); + EXPECT_EQ(96, env->GetArrayLength(reinterpret_cast(o96))); + EXPECT_EQ(97, env->GetArrayLength(reinterpret_cast(o97))); + EXPECT_EQ(98, env->GetArrayLength(reinterpret_cast(o98))); + EXPECT_EQ(99, env->GetArrayLength(reinterpret_cast(o99))); + EXPECT_EQ(100, env->GetArrayLength(reinterpret_cast(o100))); + EXPECT_EQ(101, env->GetArrayLength(reinterpret_cast(o101))); + EXPECT_EQ(102, env->GetArrayLength(reinterpret_cast(o102))); + EXPECT_EQ(103, env->GetArrayLength(reinterpret_cast(o103))); + EXPECT_EQ(104, env->GetArrayLength(reinterpret_cast(o104))); + EXPECT_EQ(105, env->GetArrayLength(reinterpret_cast(o105))); + EXPECT_EQ(106, env->GetArrayLength(reinterpret_cast(o106))); + EXPECT_EQ(107, env->GetArrayLength(reinterpret_cast(o107))); + EXPECT_EQ(108, env->GetArrayLength(reinterpret_cast(o108))); + EXPECT_EQ(109, env->GetArrayLength(reinterpret_cast(o109))); + EXPECT_EQ(110, env->GetArrayLength(reinterpret_cast(o110))); + EXPECT_EQ(111, env->GetArrayLength(reinterpret_cast(o111))); + EXPECT_EQ(112, env->GetArrayLength(reinterpret_cast(o112))); + EXPECT_EQ(113, env->GetArrayLength(reinterpret_cast(o113))); + EXPECT_EQ(114, env->GetArrayLength(reinterpret_cast(o114))); + EXPECT_EQ(115, env->GetArrayLength(reinterpret_cast(o115))); + EXPECT_EQ(116, env->GetArrayLength(reinterpret_cast(o116))); + EXPECT_EQ(117, env->GetArrayLength(reinterpret_cast(o117))); + EXPECT_EQ(118, env->GetArrayLength(reinterpret_cast(o118))); + EXPECT_EQ(119, env->GetArrayLength(reinterpret_cast(o119))); + EXPECT_EQ(120, env->GetArrayLength(reinterpret_cast(o120))); + EXPECT_EQ(121, env->GetArrayLength(reinterpret_cast(o121))); + EXPECT_EQ(122, env->GetArrayLength(reinterpret_cast(o122))); + EXPECT_EQ(123, env->GetArrayLength(reinterpret_cast(o123))); + EXPECT_EQ(124, env->GetArrayLength(reinterpret_cast(o124))); + EXPECT_EQ(125, env->GetArrayLength(reinterpret_cast(o125))); + EXPECT_EQ(126, env->GetArrayLength(reinterpret_cast(o126))); + EXPECT_EQ(127, env->GetArrayLength(reinterpret_cast(o127))); + EXPECT_EQ(128, env->GetArrayLength(reinterpret_cast(o128))); + EXPECT_EQ(129, env->GetArrayLength(reinterpret_cast(o129))); + EXPECT_EQ(130, env->GetArrayLength(reinterpret_cast(o130))); + EXPECT_EQ(131, env->GetArrayLength(reinterpret_cast(o131))); + EXPECT_EQ(132, env->GetArrayLength(reinterpret_cast(o132))); + EXPECT_EQ(133, env->GetArrayLength(reinterpret_cast(o133))); + EXPECT_EQ(134, env->GetArrayLength(reinterpret_cast(o134))); + EXPECT_EQ(135, env->GetArrayLength(reinterpret_cast(o135))); + EXPECT_EQ(136, env->GetArrayLength(reinterpret_cast(o136))); + EXPECT_EQ(137, env->GetArrayLength(reinterpret_cast(o137))); + EXPECT_EQ(138, env->GetArrayLength(reinterpret_cast(o138))); + EXPECT_EQ(139, env->GetArrayLength(reinterpret_cast(o139))); + EXPECT_EQ(140, env->GetArrayLength(reinterpret_cast(o140))); + EXPECT_EQ(141, env->GetArrayLength(reinterpret_cast(o141))); + EXPECT_EQ(142, env->GetArrayLength(reinterpret_cast(o142))); + EXPECT_EQ(143, env->GetArrayLength(reinterpret_cast(o143))); + EXPECT_EQ(144, env->GetArrayLength(reinterpret_cast(o144))); + EXPECT_EQ(145, env->GetArrayLength(reinterpret_cast(o145))); + EXPECT_EQ(146, env->GetArrayLength(reinterpret_cast(o146))); + EXPECT_EQ(147, env->GetArrayLength(reinterpret_cast(o147))); + EXPECT_EQ(148, env->GetArrayLength(reinterpret_cast(o148))); + EXPECT_EQ(149, env->GetArrayLength(reinterpret_cast(o149))); + EXPECT_EQ(150, env->GetArrayLength(reinterpret_cast(o150))); + EXPECT_EQ(151, env->GetArrayLength(reinterpret_cast(o151))); + EXPECT_EQ(152, env->GetArrayLength(reinterpret_cast(o152))); + EXPECT_EQ(153, env->GetArrayLength(reinterpret_cast(o153))); + EXPECT_EQ(154, env->GetArrayLength(reinterpret_cast(o154))); + EXPECT_EQ(155, env->GetArrayLength(reinterpret_cast(o155))); + EXPECT_EQ(156, env->GetArrayLength(reinterpret_cast(o156))); + EXPECT_EQ(157, env->GetArrayLength(reinterpret_cast(o157))); + EXPECT_EQ(158, env->GetArrayLength(reinterpret_cast(o158))); + EXPECT_EQ(159, env->GetArrayLength(reinterpret_cast(o159))); + EXPECT_EQ(160, env->GetArrayLength(reinterpret_cast(o160))); + EXPECT_EQ(161, env->GetArrayLength(reinterpret_cast(o161))); + EXPECT_EQ(162, env->GetArrayLength(reinterpret_cast(o162))); + EXPECT_EQ(163, env->GetArrayLength(reinterpret_cast(o163))); + EXPECT_EQ(164, env->GetArrayLength(reinterpret_cast(o164))); + EXPECT_EQ(165, env->GetArrayLength(reinterpret_cast(o165))); + EXPECT_EQ(166, env->GetArrayLength(reinterpret_cast(o166))); + EXPECT_EQ(167, env->GetArrayLength(reinterpret_cast(o167))); + EXPECT_EQ(168, env->GetArrayLength(reinterpret_cast(o168))); + EXPECT_EQ(169, env->GetArrayLength(reinterpret_cast(o169))); + EXPECT_EQ(170, env->GetArrayLength(reinterpret_cast(o170))); + EXPECT_EQ(171, env->GetArrayLength(reinterpret_cast(o171))); + EXPECT_EQ(172, env->GetArrayLength(reinterpret_cast(o172))); + EXPECT_EQ(173, env->GetArrayLength(reinterpret_cast(o173))); + EXPECT_EQ(174, env->GetArrayLength(reinterpret_cast(o174))); + EXPECT_EQ(175, env->GetArrayLength(reinterpret_cast(o175))); + EXPECT_EQ(176, env->GetArrayLength(reinterpret_cast(o176))); + EXPECT_EQ(177, env->GetArrayLength(reinterpret_cast(o177))); + EXPECT_EQ(178, env->GetArrayLength(reinterpret_cast(o178))); + EXPECT_EQ(179, env->GetArrayLength(reinterpret_cast(o179))); + EXPECT_EQ(180, env->GetArrayLength(reinterpret_cast(o180))); + EXPECT_EQ(181, env->GetArrayLength(reinterpret_cast(o181))); + EXPECT_EQ(182, env->GetArrayLength(reinterpret_cast(o182))); + EXPECT_EQ(183, env->GetArrayLength(reinterpret_cast(o183))); + EXPECT_EQ(184, env->GetArrayLength(reinterpret_cast(o184))); + EXPECT_EQ(185, env->GetArrayLength(reinterpret_cast(o185))); + EXPECT_EQ(186, env->GetArrayLength(reinterpret_cast(o186))); + EXPECT_EQ(187, env->GetArrayLength(reinterpret_cast(o187))); + EXPECT_EQ(188, env->GetArrayLength(reinterpret_cast(o188))); + EXPECT_EQ(189, env->GetArrayLength(reinterpret_cast(o189))); + EXPECT_EQ(190, env->GetArrayLength(reinterpret_cast(o190))); + EXPECT_EQ(191, env->GetArrayLength(reinterpret_cast(o191))); + EXPECT_EQ(192, env->GetArrayLength(reinterpret_cast(o192))); + EXPECT_EQ(193, env->GetArrayLength(reinterpret_cast(o193))); + EXPECT_EQ(194, env->GetArrayLength(reinterpret_cast(o194))); + EXPECT_EQ(195, env->GetArrayLength(reinterpret_cast(o195))); + EXPECT_EQ(196, env->GetArrayLength(reinterpret_cast(o196))); + EXPECT_EQ(197, env->GetArrayLength(reinterpret_cast(o197))); + EXPECT_EQ(198, env->GetArrayLength(reinterpret_cast(o198))); + EXPECT_EQ(199, env->GetArrayLength(reinterpret_cast(o199))); + EXPECT_EQ(200, env->GetArrayLength(reinterpret_cast(o200))); + EXPECT_EQ(201, env->GetArrayLength(reinterpret_cast(o201))); + EXPECT_EQ(202, env->GetArrayLength(reinterpret_cast(o202))); + EXPECT_EQ(203, env->GetArrayLength(reinterpret_cast(o203))); + EXPECT_EQ(204, env->GetArrayLength(reinterpret_cast(o204))); + EXPECT_EQ(205, env->GetArrayLength(reinterpret_cast(o205))); + EXPECT_EQ(206, env->GetArrayLength(reinterpret_cast(o206))); + EXPECT_EQ(207, env->GetArrayLength(reinterpret_cast(o207))); + EXPECT_EQ(208, env->GetArrayLength(reinterpret_cast(o208))); + EXPECT_EQ(209, env->GetArrayLength(reinterpret_cast(o209))); + EXPECT_EQ(210, env->GetArrayLength(reinterpret_cast(o210))); + EXPECT_EQ(211, env->GetArrayLength(reinterpret_cast(o211))); + EXPECT_EQ(212, env->GetArrayLength(reinterpret_cast(o212))); + EXPECT_EQ(213, env->GetArrayLength(reinterpret_cast(o213))); + EXPECT_EQ(214, env->GetArrayLength(reinterpret_cast(o214))); + EXPECT_EQ(215, env->GetArrayLength(reinterpret_cast(o215))); + EXPECT_EQ(216, env->GetArrayLength(reinterpret_cast(o216))); + EXPECT_EQ(217, env->GetArrayLength(reinterpret_cast(o217))); + EXPECT_EQ(218, env->GetArrayLength(reinterpret_cast(o218))); + EXPECT_EQ(219, env->GetArrayLength(reinterpret_cast(o219))); + EXPECT_EQ(220, env->GetArrayLength(reinterpret_cast(o220))); + EXPECT_EQ(221, env->GetArrayLength(reinterpret_cast(o221))); + EXPECT_EQ(222, env->GetArrayLength(reinterpret_cast(o222))); + EXPECT_EQ(223, env->GetArrayLength(reinterpret_cast(o223))); + EXPECT_EQ(224, env->GetArrayLength(reinterpret_cast(o224))); + EXPECT_EQ(225, env->GetArrayLength(reinterpret_cast(o225))); + EXPECT_EQ(226, env->GetArrayLength(reinterpret_cast(o226))); + EXPECT_EQ(227, env->GetArrayLength(reinterpret_cast(o227))); + EXPECT_EQ(228, env->GetArrayLength(reinterpret_cast(o228))); + EXPECT_EQ(229, env->GetArrayLength(reinterpret_cast(o229))); + EXPECT_EQ(230, env->GetArrayLength(reinterpret_cast(o230))); + EXPECT_EQ(231, env->GetArrayLength(reinterpret_cast(o231))); + EXPECT_EQ(232, env->GetArrayLength(reinterpret_cast(o232))); + EXPECT_EQ(233, env->GetArrayLength(reinterpret_cast(o233))); + EXPECT_EQ(234, env->GetArrayLength(reinterpret_cast(o234))); + EXPECT_EQ(235, env->GetArrayLength(reinterpret_cast(o235))); + EXPECT_EQ(236, env->GetArrayLength(reinterpret_cast(o236))); + EXPECT_EQ(237, env->GetArrayLength(reinterpret_cast(o237))); + EXPECT_EQ(238, env->GetArrayLength(reinterpret_cast(o238))); + EXPECT_EQ(239, env->GetArrayLength(reinterpret_cast(o239))); + EXPECT_EQ(240, env->GetArrayLength(reinterpret_cast(o240))); + EXPECT_EQ(241, env->GetArrayLength(reinterpret_cast(o241))); + EXPECT_EQ(242, env->GetArrayLength(reinterpret_cast(o242))); + EXPECT_EQ(243, env->GetArrayLength(reinterpret_cast(o243))); + EXPECT_EQ(244, env->GetArrayLength(reinterpret_cast(o244))); + EXPECT_EQ(245, env->GetArrayLength(reinterpret_cast(o245))); + EXPECT_EQ(246, env->GetArrayLength(reinterpret_cast(o246))); + EXPECT_EQ(247, env->GetArrayLength(reinterpret_cast(o247))); + EXPECT_EQ(248, env->GetArrayLength(reinterpret_cast(o248))); + EXPECT_EQ(249, env->GetArrayLength(reinterpret_cast(o249))); + EXPECT_EQ(250, env->GetArrayLength(reinterpret_cast(o250))); + EXPECT_EQ(251, env->GetArrayLength(reinterpret_cast(o251))); + EXPECT_EQ(252, env->GetArrayLength(reinterpret_cast(o252))); + EXPECT_EQ(253, env->GetArrayLength(reinterpret_cast(o253))); + } +} + +const char* longSig = + "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" + "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)V"; + +void JniCompilerTest::MaxParamNumberImpl() { + SetUpForTest(false, "maxParamNumber", longSig, + reinterpret_cast(&Java_MyClassNatives_maxParamNumber)); + + jvalue args[254]; + + // First test: test with all arguments null. + for (int i = 0; i < 254; ++i) { + args[i].l = nullptr; + } + + env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args); + + // Second test: test with int[] objects with increasing lengths + for (int i = 0; i < 254; ++i) { + jintArray tmp = env_->NewIntArray(i); + args[i].l = tmp; + EXPECT_NE(args[i].l, nullptr); + } + + env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args); +} + +JNI_TEST(MaxParamNumber) + +void JniCompilerTest::WithoutImplementationImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, "withoutImplementation", "()V", nullptr); + + env_->CallVoidMethod(jobj_, jmethod_); + + EXPECT_TRUE(Thread::Current()->IsExceptionPending()); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); +} + +JNI_TEST(WithoutImplementation) + +void JniCompilerTest::WithoutImplementationRefReturnImpl() { + // This will lead to error messages in the log. + ScopedLogSeverity sls(LogSeverity::FATAL); + + SetUpForTest(false, "withoutImplementationRefReturn", "()Ljava/lang/Object;", nullptr); + + env_->CallObjectMethod(jobj_, jmethod_); + + EXPECT_TRUE(Thread::Current()->IsExceptionPending()); + EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); +} + +JNI_TEST(WithoutImplementationRefReturn) + +void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3, + jint i4, jint i5, jint i6, jint i7, jint i8, jint i9, + jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4, + jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9, + jfloat f10) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, 8); + EXPECT_EQ(i9, 9); + EXPECT_EQ(i10, 10); + + jint i11 = bit_cast(f1); + EXPECT_EQ(i11, 11); + jint i12 = bit_cast(f2); + EXPECT_EQ(i12, 12); + jint i13 = bit_cast(f3); + EXPECT_EQ(i13, 13); + jint i14 = bit_cast(f4); + EXPECT_EQ(i14, 14); + jint i15 = bit_cast(f5); + EXPECT_EQ(i15, 15); + jint i16 = bit_cast(f6); + EXPECT_EQ(i16, 16); + jint i17 = bit_cast(f7); + EXPECT_EQ(i17, 17); + jint i18 = bit_cast(f8); + EXPECT_EQ(i18, 18); + jint i19 = bit_cast(f9); + EXPECT_EQ(i19, 19); + jint i20 = bit_cast(f10); + EXPECT_EQ(i20, 20); +} + +void JniCompilerTest::StackArgsIntsFirstImpl() { + SetUpForTest(true, "stackArgsIntsFirst", "(IIIIIIIIIIFFFFFFFFFF)V", + reinterpret_cast(&Java_MyClassNatives_stackArgsIntsFirst)); + + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = 8; + jint i9 = 9; + jint i10 = 10; + + jfloat f1 = bit_cast(11); + jfloat f2 = bit_cast(12); + jfloat f3 = bit_cast(13); + jfloat f4 = bit_cast(14); + jfloat f5 = bit_cast(15); + jfloat f6 = bit_cast(16); + jfloat f7 = bit_cast(17); + jfloat f8 = bit_cast(18); + jfloat f9 = bit_cast(19); + jfloat f10 = bit_cast(20); + + env_->CallStaticVoidMethod(jklass_, jmethod_, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, f1, f2, + f3, f4, f5, f6, f7, f8, f9, f10); +} + +JNI_TEST(StackArgsIntsFirst) + +void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2, + jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7, + jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2, + jint i3, jint i4, jint i5, jint i6, jint i7, jint i8, + jint i9, jint i10) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, 8); + EXPECT_EQ(i9, 9); + EXPECT_EQ(i10, 10); + + jint i11 = bit_cast(f1); + EXPECT_EQ(i11, 11); + jint i12 = bit_cast(f2); + EXPECT_EQ(i12, 12); + jint i13 = bit_cast(f3); + EXPECT_EQ(i13, 13); + jint i14 = bit_cast(f4); + EXPECT_EQ(i14, 14); + jint i15 = bit_cast(f5); + EXPECT_EQ(i15, 15); + jint i16 = bit_cast(f6); + EXPECT_EQ(i16, 16); + jint i17 = bit_cast(f7); + EXPECT_EQ(i17, 17); + jint i18 = bit_cast(f8); + EXPECT_EQ(i18, 18); + jint i19 = bit_cast(f9); + EXPECT_EQ(i19, 19); + jint i20 = bit_cast(f10); + EXPECT_EQ(i20, 20); +} + +void JniCompilerTest::StackArgsFloatsFirstImpl() { + SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V", + reinterpret_cast(&Java_MyClassNatives_stackArgsFloatsFirst)); + + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = 8; + jint i9 = 9; + jint i10 = 10; + + jfloat f1 = bit_cast(11); + jfloat f2 = bit_cast(12); + jfloat f3 = bit_cast(13); + jfloat f4 = bit_cast(14); + jfloat f5 = bit_cast(15); + jfloat f6 = bit_cast(16); + jfloat f7 = bit_cast(17); + jfloat f8 = bit_cast(18); + jfloat f9 = bit_cast(19); + jfloat f10 = bit_cast(20); + + env_->CallStaticVoidMethod(jklass_, jmethod_, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, i1, i2, i3, + i4, i5, i6, i7, i8, i9, i10); +} + +JNI_TEST(StackArgsFloatsFirst) + +void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2, + jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5, + jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8, + jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, 8); + EXPECT_EQ(i9, 9); + EXPECT_EQ(i10, 10); + + jint i11 = bit_cast(f1); + EXPECT_EQ(i11, 11); + jint i12 = bit_cast(f2); + EXPECT_EQ(i12, 12); + jint i13 = bit_cast(f3); + EXPECT_EQ(i13, 13); + jint i14 = bit_cast(f4); + EXPECT_EQ(i14, 14); + jint i15 = bit_cast(f5); + EXPECT_EQ(i15, 15); + jint i16 = bit_cast(f6); + EXPECT_EQ(i16, 16); + jint i17 = bit_cast(f7); + EXPECT_EQ(i17, 17); + jint i18 = bit_cast(f8); + EXPECT_EQ(i18, 18); + jint i19 = bit_cast(f9); + EXPECT_EQ(i19, 19); + jint i20 = bit_cast(f10); + EXPECT_EQ(i20, 20); +} + +void JniCompilerTest::StackArgsMixedImpl() { + SetUpForTest(true, "stackArgsMixed", "(IFIFIFIFIFIFIFIFIFIF)V", + reinterpret_cast(&Java_MyClassNatives_stackArgsMixed)); + + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = 8; + jint i9 = 9; + jint i10 = 10; + + jfloat f1 = bit_cast(11); + jfloat f2 = bit_cast(12); + jfloat f3 = bit_cast(13); + jfloat f4 = bit_cast(14); + jfloat f5 = bit_cast(15); + jfloat f6 = bit_cast(16); + jfloat f7 = bit_cast(17); + jfloat f8 = bit_cast(18); + jfloat f9 = bit_cast(19); + jfloat f10 = bit_cast(20); + + env_->CallStaticVoidMethod(jklass_, jmethod_, i1, f1, i2, f2, i3, f3, i4, f4, i5, f5, i6, f6, i7, + f7, i8, f8, i9, f9, i10, f10); +} + +JNI_TEST(StackArgsMixed) + +void Java_MyClassNatives_stackArgsSignExtendedMips64(JNIEnv*, jclass, jint i1, jint i2, jint i3, + jint i4, jint i5, jint i6, jint i7, jint i8) { + EXPECT_EQ(i1, 1); + EXPECT_EQ(i2, 2); + EXPECT_EQ(i3, 3); + EXPECT_EQ(i4, 4); + EXPECT_EQ(i5, 5); + EXPECT_EQ(i6, 6); + EXPECT_EQ(i7, 7); + EXPECT_EQ(i8, -8); + +#if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + // Mips64 ABI requires that arguments passed through stack be sign-extended 8B slots. + // First 8 arguments are passed through registers, check i7 and i8. + uint32_t stack1_high = *(&i7 + 1); + uint32_t stack2_high = *(&i8 + 1); + + EXPECT_EQ(stack1_high, static_cast(0)); + EXPECT_EQ(stack2_high, static_cast(0xffffffff)); +#else + LOG(INFO) << "Skipping stackArgsSignExtendedMips64 as there is nothing to be done on " + << kRuntimeISA; + // Force-print to std::cout so it's also outside the logcat. + std::cout << "Skipping stackArgsSignExtendedMips64 as there is nothing to be done on " + << kRuntimeISA << std::endl; +#endif +} + +void JniCompilerTest::StackArgsSignExtendedMips64Impl() { + SetUpForTest(true, "stackArgsSignExtendedMips64", "(IIIIIIII)V", + reinterpret_cast(&Java_MyClassNatives_stackArgsSignExtendedMips64)); + jint i1 = 1; + jint i2 = 2; + jint i3 = 3; + jint i4 = 4; + jint i5 = 5; + jint i6 = 6; + jint i7 = 7; + jint i8 = -8; + + env_->CallStaticVoidMethod(jklass_, jmethod_, i1, i2, i3, i4, i5, i6, i7, i8); +} + +JNI_TEST(StackArgsSignExtendedMips64) + +} // namespace art diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc new file mode 100644 index 000000000..9d2732aa2 --- /dev/null +++ b/compiler/jni/quick/arm/calling_convention_arm.cc @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/logging.h" +#include "calling_convention_arm.h" +#include "handle_scope-inl.h" +#include "utils/arm/managed_register_arm.h" + +namespace art { +namespace arm { + +// Used by hard float. +static const Register kHFCoreArgumentRegisters[] = { + R0, R1, R2, R3 +}; + +static const SRegister kHFSArgumentRegisters[] = { + S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15 +}; + +static const SRegister kHFSCalleeSaveRegisters[] = { + S16, S17, S18, S19, S20, S21, S22, S23, S24, S25, S26, S27, S28, S29, S30, S31 +}; + +static const DRegister kHFDArgumentRegisters[] = { + D0, D1, D2, D3, D4, D5, D6, D7 +}; + +static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters), + "ks d argument registers mismatch"); + +// Calling convention + +ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() { + return ArmManagedRegister::FromCoreRegister(IP); // R12 +} + +ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() { + return ArmManagedRegister::FromCoreRegister(IP); // R12 +} + +ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() { + if (kArm32QuickCodeUseSoftFloat) { + switch (GetShorty()[0]) { + case 'V': + return ArmManagedRegister::NoRegister(); + case 'D': + case 'J': + return ArmManagedRegister::FromRegisterPair(R0_R1); + default: + return ArmManagedRegister::FromCoreRegister(R0); + } + } else { + switch (GetShorty()[0]) { + case 'V': + return ArmManagedRegister::NoRegister(); + case 'D': + return ArmManagedRegister::FromDRegister(D0); + case 'F': + return ArmManagedRegister::FromSRegister(S0); + case 'J': + return ArmManagedRegister::FromRegisterPair(R0_R1); + default: + return ArmManagedRegister::FromCoreRegister(R0); + } + } +} + +ManagedRegister ArmJniCallingConvention::ReturnRegister() { + switch (GetShorty()[0]) { + case 'V': + return ArmManagedRegister::NoRegister(); + case 'D': + case 'J': + return ArmManagedRegister::FromRegisterPair(R0_R1); + default: + return ArmManagedRegister::FromCoreRegister(R0); + } +} + +ManagedRegister ArmJniCallingConvention::IntReturnRegister() { + return ArmManagedRegister::FromCoreRegister(R0); +} + +// Managed runtime calling convention + +ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() { + return ArmManagedRegister::FromCoreRegister(R0); +} + +bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything moved to stack on entry. +} + +bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return true; +} + +ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + return ManagedRegister::NoRegister(); +} + +FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + FrameOffset result = + FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method* + (itr_slots_ * kFramePointerSize)); // offset into in args + return result; +} + +const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on ARM to free them up for scratch use, we then assume + // all arguments are on the stack. + if (kArm32QuickCodeUseSoftFloat) { + if (entry_spills_.size() == 0) { + size_t num_spills = NumArgs() + NumLongOrDoubleArgs(); + if (num_spills > 0) { + entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1)); + if (num_spills > 1) { + entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2)); + if (num_spills > 2) { + entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3)); + } + } + } + } + } else { + if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { + uint32_t gpr_index = 1; // R0 ~ R3. Reserve r0 for ArtMethod*. + uint32_t fpr_index = 0; // S0 ~ S15. + uint32_t fpr_double_index = 0; // D0 ~ D7. + + ResetIterator(FrameOffset(0)); + while (HasNext()) { + if (IsCurrentParamAFloatOrDouble()) { + if (IsCurrentParamADouble()) { // Double. + // Double should not overlap with float. + fpr_double_index = (std::max(fpr_double_index * 2, RoundUp(fpr_index, 2))) / 2; + if (fpr_double_index < arraysize(kHFDArgumentRegisters)) { + entry_spills_.push_back( + ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[fpr_double_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } + } else { // Float. + // Float should not overlap with double. + if (fpr_index % 2 == 0) { + fpr_index = std::max(fpr_double_index * 2, fpr_index); + } + if (fpr_index < arraysize(kHFSArgumentRegisters)) { + entry_spills_.push_back( + ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[fpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + } else { + // FIXME: Pointer this returns as both reference and long. + if (IsCurrentParamALong() && !IsCurrentParamAReference()) { // Long. + if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) { + // Skip R1, and use R2_R3 if the long is the first parameter. + if (gpr_index == 1) { + gpr_index++; + } + } + + // If it spans register and memory, we must use the value in memory. + if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) { + entry_spills_.push_back( + ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++])); + } else if (gpr_index == arraysize(kHFCoreArgumentRegisters) - 1) { + gpr_index++; + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + // High part of long or 32-bit argument. + if (gpr_index < arraysize(kHFCoreArgumentRegisters)) { + entry_spills_.push_back( + ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + Next(); + } + } + } + return entry_spills_; +} +// JNI calling convention + +ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized, + const char* shorty) + : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) { + // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject + // or jclass for static methods and the JNIEnv. We start at the aligned register r2. + size_t padding = 0; + for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) { + if (IsParamALongOrDouble(cur_arg)) { + if ((cur_reg & 1) != 0) { + padding += 4; + cur_reg++; // additional bump to ensure alignment + } + cur_reg++; // additional bump to skip extra long word + } + cur_reg++; // bump the iterator for every argument + } + padding_ = padding; + + callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R5)); + callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R6)); + callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R7)); + callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R8)); + callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R10)); + callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R11)); + + for (size_t i = 0; i < arraysize(kHFSCalleeSaveRegisters); ++i) { + callee_save_regs_.push_back(ArmManagedRegister::FromSRegister(kHFSCalleeSaveRegisters[i])); + } +} + +uint32_t ArmJniCallingConvention::CoreSpillMask() const { + // Compute spill mask to agree with callee saves initialized in the constructor + uint32_t result = 0; + result = 1 << R5 | 1 << R6 | 1 << R7 | 1 << R8 | 1 << R10 | 1 << R11 | 1 << LR; + return result; +} + +uint32_t ArmJniCallingConvention::FpSpillMask() const { + uint32_t result = 0; + for (size_t i = 0; i < arraysize(kHFSCalleeSaveRegisters); ++i) { + result |= (1 << kHFSCalleeSaveRegisters[i]); + } + return result; +} + +ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const { + return ArmManagedRegister::FromCoreRegister(R2); +} + +size_t ArmJniCallingConvention::FrameSize() { + // Method*, LR and callee save area size, local reference segment state + size_t frame_data_size = kArmPointerSize + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; + // References plus 2 words for HandleScope header + size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); + // Plus return value spill area size + return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment); +} + +size_t ArmJniCallingConvention::OutArgSize() { + return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_, + kStackAlignment); +} + +// JniCallingConvention ABI follows AAPCS where longs and doubles must occur +// in even register numbers and stack slots +void ArmJniCallingConvention::Next() { + JniCallingConvention::Next(); + size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + if ((itr_args_ >= 2) && + (arg_pos < NumArgs()) && + IsParamALongOrDouble(arg_pos)) { + // itr_slots_ needs to be an even number, according to AAPCS. + if ((itr_slots_ & 0x1u) != 0) { + itr_slots_++; + } + } +} + +bool ArmJniCallingConvention::IsCurrentParamInRegister() { + return itr_slots_ < 4; +} + +bool ArmJniCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +static const Register kJniArgumentRegisters[] = { + R0, R1, R2, R3 +}; +ManagedRegister ArmJniCallingConvention::CurrentParamRegister() { + CHECK_LT(itr_slots_, 4u); + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) { + CHECK_EQ(itr_slots_, 2u); + return ArmManagedRegister::FromRegisterPair(R2_R3); + } else { + return + ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]); + } +} + +FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() { + CHECK_GE(itr_slots_, 4u); + size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +size_t ArmJniCallingConvention::NumberOfOutgoingStackArgs() { + size_t static_args = IsStatic() ? 1 : 0; // count jclass + // regular argument parameters and this + size_t param_args = NumArgs() + NumLongOrDoubleArgs(); + // count JNIEnv* less arguments in registers + return static_args + param_args + 1 - 4; +} + +} // namespace arm +} // namespace art diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h new file mode 100644 index 000000000..35b50937e --- /dev/null +++ b/compiler/jni/quick/arm/calling_convention_arm.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ +#define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ + +#include "jni/quick/calling_convention.h" + +namespace art { +namespace arm { + +constexpr size_t kFramePointerSize = 4; + +class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { + public: + ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {} + ~ArmManagedRuntimeCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // Managed runtime calling convention + ManagedRegister MethodRegister() OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; + + private: + ManagedRegisterEntrySpills entry_spills_; + + DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention); +}; + +class ArmJniCallingConvention FINAL : public JniCallingConvention { + public: + ArmJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ~ArmJniCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister IntReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // JNI calling convention + void Next() OVERRIDE; // Override default behavior for AAPCS + size_t FrameSize() OVERRIDE; + size_t OutArgSize() OVERRIDE; + const std::vector& CalleeSaveRegisters() const OVERRIDE { + return callee_save_regs_; + } + ManagedRegister ReturnScratchRegister() const OVERRIDE; + uint32_t CoreSpillMask() const OVERRIDE; + uint32_t FpSpillMask() const OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + + // AAPCS mandates return values are extended. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return false; + } + + protected: + size_t NumberOfOutgoingStackArgs() OVERRIDE; + + private: + // TODO: these values aren't unique and can be shared amongst instances + std::vector callee_save_regs_; + + // Padding to ensure longs and doubles are not split in AAPCS + size_t padding_; + + DISALLOW_COPY_AND_ASSIGN(ArmJniCallingConvention); +}; + +} // namespace arm +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc new file mode 100644 index 000000000..9aef10e8f --- /dev/null +++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc @@ -0,0 +1,275 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/logging.h" +#include "calling_convention_arm64.h" +#include "handle_scope-inl.h" +#include "utils/arm64/managed_register_arm64.h" + +namespace art { +namespace arm64 { + +static const XRegister kXArgumentRegisters[] = { + X0, X1, X2, X3, X4, X5, X6, X7 +}; + +static const WRegister kWArgumentRegisters[] = { + W0, W1, W2, W3, W4, W5, W6, W7 +}; + +static const DRegister kDArgumentRegisters[] = { + D0, D1, D2, D3, D4, D5, D6, D7 +}; + +static const SRegister kSArgumentRegisters[] = { + S0, S1, S2, S3, S4, S5, S6, S7 +}; + +static const DRegister kDCalleeSaveRegisters[] = { + D8, D9, D10, D11, D12, D13, D14, D15 +}; + +// Calling convention +ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { + return Arm64ManagedRegister::FromXRegister(X20); // saved on entry restored on exit +} + +ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() { + return Arm64ManagedRegister::FromXRegister(X20); // saved on entry restored on exit +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty) { + if (shorty[0] == 'F') { + return Arm64ManagedRegister::FromSRegister(S0); + } else if (shorty[0] == 'D') { + return Arm64ManagedRegister::FromDRegister(D0); + } else if (shorty[0] == 'J') { + return Arm64ManagedRegister::FromXRegister(X0); + } else if (shorty[0] == 'V') { + return Arm64ManagedRegister::NoRegister(); + } else { + return Arm64ManagedRegister::FromWRegister(W0); + } +} + +ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Arm64JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Arm64JniCallingConvention::IntReturnRegister() { + return Arm64ManagedRegister::FromWRegister(W0); +} + +// Managed runtime calling convention + +ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() { + return Arm64ManagedRegister::FromXRegister(X0); +} + +bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything moved to stack on entry. +} + +bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return true; +} + +ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + return ManagedRegister::NoRegister(); +} + +FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + FrameOffset result = + FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method ref + (itr_slots_ * sizeof(uint32_t))); // offset into in args + return result; +} + +const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on ARM64 to free them up for scratch use, we then assume + // all arguments are on the stack. + if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { + int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*. + int fp_reg_index = 0; // D0/S0. + + // We need to choose the correct register (D/S or X/W) since the managed + // stack uses 32bit stack slots. + ResetIterator(FrameOffset(0)); + while (HasNext()) { + if (IsCurrentParamAFloatOrDouble()) { // FP regs. + if (fp_reg_index < 8) { + if (!IsCurrentParamADouble()) { + entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index])); + } else { + entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index])); + } + fp_reg_index++; + } else { // just increase the stack offset. + if (!IsCurrentParamADouble()) { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } + } + } else { // GP regs. + if (gp_reg_index < 8) { + if (IsCurrentParamALong() && (!IsCurrentParamAReference())) { + entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index])); + } else { + entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index])); + } + gp_reg_index++; + } else { // just increase the stack offset. + if (IsCurrentParamALong() && (!IsCurrentParamAReference())) { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention +Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized, + const char* shorty) + : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) { + uint32_t core_spill_mask = CoreSpillMask(); + DCHECK_EQ(XZR, kNumberOfXRegisters - 1); // Exclude XZR from the loop (avoid 1 << 32). + for (int x_reg = 0; x_reg < kNumberOfXRegisters - 1; ++x_reg) { + if (((1 << x_reg) & core_spill_mask) != 0) { + callee_save_regs_.push_back( + Arm64ManagedRegister::FromXRegister(static_cast(x_reg))); + } + } + + uint32_t fp_spill_mask = FpSpillMask(); + for (int d_reg = 0; d_reg < kNumberOfDRegisters; ++d_reg) { + if (((1 << d_reg) & fp_spill_mask) != 0) { + callee_save_regs_.push_back( + Arm64ManagedRegister::FromDRegister(static_cast(d_reg))); + } + } +} + +uint32_t Arm64JniCallingConvention::CoreSpillMask() const { + // Compute spill mask to agree with callee saves initialized in the constructor. + // Note: The native jni function may call to some VM runtime functions which may suspend + // or trigger GC. And the jni method frame will become top quick frame in those cases. + // So we need to satisfy GC to save LR and callee-save registers which is similar to + // CalleeSaveMethod(RefOnly) frame. + // Jni function is the native function which the java code wants to call. + // Jni method is the method that compiled by jni compiler. + // Call chain: managed code(java) --> jni method --> jni function. + // Thread register(X19) is saved on stack. + return 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 | + 1 << X25 | 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR; +} + +uint32_t Arm64JniCallingConvention::FpSpillMask() const { + // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2, we may + // break on java_method_2 and we still need to find out the values of DEX registers in + // java_method_1. So all callee-saves(in managed code) need to be saved. + uint32_t result = 0; + for (size_t i = 0; i < arraysize(kDCalleeSaveRegisters); ++i) { + result |= (1 << kDCalleeSaveRegisters[i]); + } + return result; +} + +ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const { + return ManagedRegister::NoRegister(); +} + +size_t Arm64JniCallingConvention::FrameSize() { + // Method*, callee save area size, local reference segment state + size_t frame_data_size = kFramePointerSize + + CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t); + // References plus 2 words for HandleScope header + size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); + // Plus return value spill area size + return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment); +} + +size_t Arm64JniCallingConvention::OutArgSize() { + return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); +} + +bool Arm64JniCallingConvention::IsCurrentParamInRegister() { + if (IsCurrentParamAFloatOrDouble()) { + return (itr_float_and_doubles_ < 8); + } else { + return ((itr_args_ - itr_float_and_doubles_) < 8); + } +} + +bool Arm64JniCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() { + CHECK(IsCurrentParamInRegister()); + if (IsCurrentParamAFloatOrDouble()) { + CHECK_LT(itr_float_and_doubles_, 8u); + if (IsCurrentParamADouble()) { + return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]); + } else { + return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]); + } + } else { + int gp_reg = itr_args_ - itr_float_and_doubles_; + CHECK_LT(static_cast(gp_reg), 8u); + if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) { + return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]); + } else { + return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]); + } + } +} + +FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + size_t args_on_stack = itr_args_ + - std::min(8u, itr_float_and_doubles_) + - std::min(8u, (itr_args_ - itr_float_and_doubles_)); + size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() { + // all arguments including JNI args + size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni(); + + size_t all_stack_args = all_args - + std::min(8u, static_cast(NumFloatOrDoubleArgs())) - + std::min(8u, static_cast((all_args - NumFloatOrDoubleArgs()))); + + return all_stack_args; +} + +} // namespace arm64 +} // namespace art diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h new file mode 100644 index 000000000..37c92b203 --- /dev/null +++ b/compiler/jni/quick/arm64/calling_convention_arm64.h @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ +#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ + +#include "jni/quick/calling_convention.h" + +namespace art { +namespace arm64 { + +constexpr size_t kFramePointerSize = 8; + +class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { + public: + Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {} + ~Arm64ManagedRuntimeCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // Managed runtime calling convention + ManagedRegister MethodRegister() OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; + + private: + ManagedRegisterEntrySpills entry_spills_; + + DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention); +}; + +class Arm64JniCallingConvention FINAL : public JniCallingConvention { + public: + Arm64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ~Arm64JniCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister IntReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // JNI calling convention + size_t FrameSize() OVERRIDE; + size_t OutArgSize() OVERRIDE; + const std::vector& CalleeSaveRegisters() const OVERRIDE { + return callee_save_regs_; + } + ManagedRegister ReturnScratchRegister() const OVERRIDE; + uint32_t CoreSpillMask() const OVERRIDE; + uint32_t FpSpillMask() const OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + + // aarch64 calling convention leaves upper bits undefined. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return true; + } + + protected: + size_t NumberOfOutgoingStackArgs() OVERRIDE; + + private: + // TODO: these values aren't unique and can be shared amongst instances + std::vector callee_save_regs_; + + DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention); +}; + +} // namespace arm64 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc new file mode 100644 index 000000000..e21f55433 --- /dev/null +++ b/compiler/jni/quick/calling_convention.cc @@ -0,0 +1,315 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention.h" + +#include "base/logging.h" + +#ifdef ART_ENABLE_CODEGEN_arm +#include "jni/quick/arm/calling_convention_arm.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_arm64 +#include "jni/quick/arm64/calling_convention_arm64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips +#include "jni/quick/mips/calling_convention_mips.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_mips64 +#include "jni/quick/mips64/calling_convention_mips64.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86 +#include "jni/quick/x86/calling_convention_x86.h" +#endif + +#ifdef ART_ENABLE_CODEGEN_x86_64 +#include "jni/quick/x86_64/calling_convention_x86_64.h" +#endif + +namespace art { + +// Managed runtime calling convention + +std::unique_ptr ManagedRuntimeCallingConvention::Create( + ArenaAllocator* arena, + bool is_static, + bool is_synchronized, + const char* shorty, + InstructionSet instruction_set) { + switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm + case kArm: + case kThumb2: + return std::unique_ptr( + new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + case kArm64: + return std::unique_ptr( + new (arena) arm64::Arm64ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_mips + case kMips: + return std::unique_ptr( + new (arena) mips::MipsManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 + case kMips64: + return std::unique_ptr( + new (arena) mips64::Mips64ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case kX86: + return std::unique_ptr( + new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + case kX86_64: + return std::unique_ptr( + new (arena) x86_64::X86_64ManagedRuntimeCallingConvention( + is_static, is_synchronized, shorty)); +#endif + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +bool ManagedRuntimeCallingConvention::HasNext() { + return itr_args_ < NumArgs(); +} + +void ManagedRuntimeCallingConvention::Next() { + CHECK(HasNext()); + if (IsCurrentArgExplicit() && // don't query parameter type of implicit args + IsParamALongOrDouble(itr_args_)) { + itr_longs_and_doubles_++; + itr_slots_++; + } + if (IsParamAFloatOrDouble(itr_args_)) { + itr_float_and_doubles_++; + } + if (IsCurrentParamAReference()) { + itr_refs_++; + } + itr_args_++; + itr_slots_++; +} + +bool ManagedRuntimeCallingConvention::IsCurrentArgExplicit() { + // Static methods have no implicit arguments, others implicitly pass this + return IsStatic() || (itr_args_ != 0); +} + +bool ManagedRuntimeCallingConvention::IsCurrentArgPossiblyNull() { + return IsCurrentArgExplicit(); // any user parameter may be null +} + +size_t ManagedRuntimeCallingConvention::CurrentParamSize() { + return ParamSize(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() { + return IsParamAReference(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamAFloatOrDouble() { + return IsParamAFloatOrDouble(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamADouble() { + return IsParamADouble(itr_args_); +} + +bool ManagedRuntimeCallingConvention::IsCurrentParamALong() { + return IsParamALong(itr_args_); +} + +// JNI calling convention + +std::unique_ptr JniCallingConvention::Create(ArenaAllocator* arena, + bool is_static, + bool is_synchronized, + const char* shorty, + InstructionSet instruction_set) { + switch (instruction_set) { +#ifdef ART_ENABLE_CODEGEN_arm + case kArm: + case kThumb2: + return std::unique_ptr( + new (arena) arm::ArmJniCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 + case kArm64: + return std::unique_ptr( + new (arena) arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_mips + case kMips: + return std::unique_ptr( + new (arena) mips::MipsJniCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_mips64 + case kMips64: + return std::unique_ptr( + new (arena) mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86 + case kX86: + return std::unique_ptr( + new (arena) x86::X86JniCallingConvention(is_static, is_synchronized, shorty)); +#endif +#ifdef ART_ENABLE_CODEGEN_x86_64 + case kX86_64: + return std::unique_ptr( + new (arena) x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty)); +#endif + default: + LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; + UNREACHABLE(); + } +} + +size_t JniCallingConvention::ReferenceCount() const { + return NumReferenceArgs() + (IsStatic() ? 1 : 0); +} + +FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const { + size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header + return FrameOffset(HandleReferencesOffset().Int32Value() + references_size); +} + +FrameOffset JniCallingConvention::ReturnValueSaveLocation() const { + // Segment state is 4 bytes long + return FrameOffset(SavedLocalReferenceCookieOffset().Int32Value() + 4); +} + +bool JniCallingConvention::HasNext() { + if (itr_args_ <= kObjectOrClass) { + return true; + } else { + unsigned int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + return arg_pos < NumArgs(); + } +} + +void JniCallingConvention::Next() { + CHECK(HasNext()); + if (itr_args_ > kObjectOrClass) { + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + if (IsParamALongOrDouble(arg_pos)) { + itr_longs_and_doubles_++; + itr_slots_++; + } + } + if (IsCurrentParamAFloatOrDouble()) { + itr_float_and_doubles_++; + } + if (IsCurrentParamAReference()) { + itr_refs_++; + } + itr_args_++; + itr_slots_++; +} + +bool JniCallingConvention::IsCurrentParamAReference() { + switch (itr_args_) { + case kJniEnv: + return false; // JNIEnv* + case kObjectOrClass: + return true; // jobject or jclass + default: { + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + return IsParamAReference(arg_pos); + } + } +} + +bool JniCallingConvention::IsCurrentParamJniEnv() { + return (itr_args_ == kJniEnv); +} + +bool JniCallingConvention::IsCurrentParamAFloatOrDouble() { + switch (itr_args_) { + case kJniEnv: + return false; // JNIEnv* + case kObjectOrClass: + return false; // jobject or jclass + default: { + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + return IsParamAFloatOrDouble(arg_pos); + } + } +} + +bool JniCallingConvention::IsCurrentParamADouble() { + switch (itr_args_) { + case kJniEnv: + return false; // JNIEnv* + case kObjectOrClass: + return false; // jobject or jclass + default: { + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + return IsParamADouble(arg_pos); + } + } +} + +bool JniCallingConvention::IsCurrentParamALong() { + switch (itr_args_) { + case kJniEnv: + return false; // JNIEnv* + case kObjectOrClass: + return false; // jobject or jclass + default: { + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + return IsParamALong(arg_pos); + } + } +} + +// Return position of handle scope entry holding reference at the current iterator +// position +FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() { + CHECK(IsCurrentParamAReference()); + CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset()); + int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_; + CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value()); + return FrameOffset(result); +} + +size_t JniCallingConvention::CurrentParamSize() { + if (itr_args_ <= kObjectOrClass) { + return frame_pointer_size_; // JNIEnv or jobject/jclass + } else { + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + return ParamSize(arg_pos); + } +} + +size_t JniCallingConvention::NumberOfExtraArgumentsForJni() { + // The first argument is the JNIEnv*. + // Static methods have an extra argument which is the jclass. + return IsStatic() ? 2 : 1; +} + +} // namespace art diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h new file mode 100644 index 000000000..2c4b15ca3 --- /dev/null +++ b/compiler/jni/quick/calling_convention.h @@ -0,0 +1,374 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ +#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ + +#include + +#include "base/arena_object.h" +#include "handle_scope.h" +#include "primitive.h" +#include "thread.h" +#include "utils/managed_register.h" + +namespace art { + +// Top-level abstraction for different calling conventions. +class CallingConvention : public DeletableArenaObject { + public: + bool IsReturnAReference() const { return shorty_[0] == 'L'; } + + Primitive::Type GetReturnType() const { + return Primitive::GetType(shorty_[0]); + } + + size_t SizeOfReturnValue() const { + size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[0])); + if (result >= 1 && result < 4) { + result = 4; + } + return result; + } + + // Register that holds result of this method invocation. + virtual ManagedRegister ReturnRegister() = 0; + // Register reserved for scratch usage during procedure calls. + virtual ManagedRegister InterproceduralScratchRegister() = 0; + + // Offset of Method within the frame. + FrameOffset MethodStackOffset() { + return displacement_; + } + + // Iterator interface + + // Place iterator at start of arguments. The displacement is applied to + // frame offset methods to account for frames which may be on the stack + // below the one being iterated over. + void ResetIterator(FrameOffset displacement) { + displacement_ = displacement; + itr_slots_ = 0; + itr_args_ = 0; + itr_refs_ = 0; + itr_longs_and_doubles_ = 0; + itr_float_and_doubles_ = 0; + } + + virtual ~CallingConvention() {} + + protected: + CallingConvention(bool is_static, bool is_synchronized, const char* shorty, + size_t frame_pointer_size) + : itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0), + itr_float_and_doubles_(0), displacement_(0), + frame_pointer_size_(frame_pointer_size), + handle_scope_pointer_size_(sizeof(StackReference)), + is_static_(is_static), is_synchronized_(is_synchronized), + shorty_(shorty) { + num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1; + num_ref_args_ = is_static ? 0 : 1; // The implicit this pointer. + num_float_or_double_args_ = 0; + num_long_or_double_args_ = 0; + for (size_t i = 1; i < strlen(shorty); i++) { + char ch = shorty_[i]; + switch (ch) { + case 'L': + num_ref_args_++; + break; + case 'J': + num_long_or_double_args_++; + break; + case 'D': + num_long_or_double_args_++; + num_float_or_double_args_++; + break; + case 'F': + num_float_or_double_args_++; + break; + } + } + } + + bool IsStatic() const { + return is_static_; + } + bool IsSynchronized() const { + return is_synchronized_; + } + bool IsParamALongOrDouble(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + char ch = shorty_[param]; + return (ch == 'J' || ch == 'D'); + } + bool IsParamAFloatOrDouble(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + char ch = shorty_[param]; + return (ch == 'F' || ch == 'D'); + } + bool IsParamADouble(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + return shorty_[param] == 'D'; + } + bool IsParamALong(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return false; // this argument + } + return shorty_[param] == 'J'; + } + bool IsParamAReference(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return true; // this argument + } + return shorty_[param] == 'L'; + } + size_t NumArgs() const { + return num_args_; + } + size_t NumLongOrDoubleArgs() const { + return num_long_or_double_args_; + } + size_t NumFloatOrDoubleArgs() const { + return num_float_or_double_args_; + } + size_t NumReferenceArgs() const { + return num_ref_args_; + } + size_t ParamSize(unsigned int param) const { + DCHECK_LT(param, NumArgs()); + if (IsStatic()) { + param++; // 0th argument must skip return value at start of the shorty + } else if (param == 0) { + return sizeof(mirror::HeapReference); // this argument + } + size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param])); + if (result >= 1 && result < 4) { + result = 4; + } + return result; + } + const char* GetShorty() const { + return shorty_.c_str(); + } + // The slot number for current calling_convention argument. + // Note that each slot is 32-bit. When the current argument is bigger + // than 32 bits, return the first slot number for this argument. + unsigned int itr_slots_; + // The number of references iterated past. + unsigned int itr_refs_; + // The argument number along argument list for current argument. + unsigned int itr_args_; + // Number of longs and doubles seen along argument list. + unsigned int itr_longs_and_doubles_; + // Number of float and doubles seen along argument list. + unsigned int itr_float_and_doubles_; + // Space for frames below this on the stack. + FrameOffset displacement_; + // The size of a pointer. + const size_t frame_pointer_size_; + // The size of a reference entry within the handle scope. + const size_t handle_scope_pointer_size_; + + private: + const bool is_static_; + const bool is_synchronized_; + std::string shorty_; + size_t num_args_; + size_t num_ref_args_; + size_t num_float_or_double_args_; + size_t num_long_or_double_args_; +}; + +// Abstraction for managed code's calling conventions +// | { Incoming stack args } | +// | { Prior Method* } | <-- Prior SP +// | { Return address } | +// | { Callee saves } | +// | { Spills ... } | +// | { Outgoing stack args } | +// | { Method* } | <-- SP +class ManagedRuntimeCallingConvention : public CallingConvention { + public: + static std::unique_ptr Create(ArenaAllocator* arena, + bool is_static, + bool is_synchronized, + const char* shorty, + InstructionSet instruction_set); + + // Register that holds the incoming method argument + virtual ManagedRegister MethodRegister() = 0; + + // Iterator interface + bool HasNext(); + void Next(); + bool IsCurrentParamAReference(); + bool IsCurrentParamAFloatOrDouble(); + bool IsCurrentParamADouble(); + bool IsCurrentParamALong(); + bool IsCurrentArgExplicit(); // ie a non-implict argument such as this + bool IsCurrentArgPossiblyNull(); + size_t CurrentParamSize(); + virtual bool IsCurrentParamInRegister() = 0; + virtual bool IsCurrentParamOnStack() = 0; + virtual ManagedRegister CurrentParamRegister() = 0; + virtual FrameOffset CurrentParamStackOffset() = 0; + + virtual ~ManagedRuntimeCallingConvention() {} + + // Registers to spill to caller's out registers on entry. + virtual const ManagedRegisterEntrySpills& EntrySpills() = 0; + + protected: + ManagedRuntimeCallingConvention(bool is_static, + bool is_synchronized, + const char* shorty, + size_t frame_pointer_size) + : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {} +}; + +// Abstraction for JNI calling conventions +// | { Incoming stack args } | <-- Prior SP +// | { Return address } | +// | { Callee saves } | ([1]) +// | { Return value spill } | (live on return slow paths) +// | { Local Ref. Table State } | +// | { Stack Indirect Ref. Table | +// | num. refs./link } | (here to prior SP is frame size) +// | { Method* } | <-- Anchor SP written to thread +// | { Outgoing stack args } | <-- SP at point of call +// | Native frame | +// +// [1] We must save all callee saves here to enable any exception throws to restore +// callee saves for frames above this one. +class JniCallingConvention : public CallingConvention { + public: + static std::unique_ptr Create(ArenaAllocator* arena, + bool is_static, + bool is_synchronized, + const char* shorty, + InstructionSet instruction_set); + + // Size of frame excluding space for outgoing args (its assumed Method* is + // always at the bottom of a frame, but this doesn't work for outgoing + // native args). Includes alignment. + virtual size_t FrameSize() = 0; + // Size of outgoing arguments, including alignment + virtual size_t OutArgSize() = 0; + // Number of references in stack indirect reference table + size_t ReferenceCount() const; + // Location where the segment state of the local indirect reference table is saved + FrameOffset SavedLocalReferenceCookieOffset() const; + // Location where the return value of a call can be squirreled if another + // call is made following the native call + FrameOffset ReturnValueSaveLocation() const; + // Register that holds result if it is integer. + virtual ManagedRegister IntReturnRegister() = 0; + // Whether the compiler needs to ensure zero-/sign-extension of a small result type + virtual bool RequiresSmallResultTypeExtension() const = 0; + + // Callee save registers to spill prior to native code (which may clobber) + virtual const std::vector& CalleeSaveRegisters() const = 0; + + // Spill mask values + virtual uint32_t CoreSpillMask() const = 0; + virtual uint32_t FpSpillMask() const = 0; + + // An extra scratch register live after the call + virtual ManagedRegister ReturnScratchRegister() const = 0; + + // Iterator interface + bool HasNext(); + virtual void Next(); + bool IsCurrentParamAReference(); + bool IsCurrentParamAFloatOrDouble(); + bool IsCurrentParamADouble(); + bool IsCurrentParamALong(); + bool IsCurrentParamJniEnv(); + size_t CurrentParamSize(); + virtual bool IsCurrentParamInRegister() = 0; + virtual bool IsCurrentParamOnStack() = 0; + virtual ManagedRegister CurrentParamRegister() = 0; + virtual FrameOffset CurrentParamStackOffset() = 0; + + // Iterator interface extension for JNI + FrameOffset CurrentParamHandleScopeEntryOffset(); + + // Position of handle scope and interior fields + FrameOffset HandleScopeOffset() const { + return FrameOffset(this->displacement_.Int32Value() + frame_pointer_size_); + // above Method reference + } + + FrameOffset HandleScopeLinkOffset() const { + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::LinkOffset(frame_pointer_size_)); + } + + FrameOffset HandleScopeNumRefsOffset() const { + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::NumberOfReferencesOffset(frame_pointer_size_)); + } + + FrameOffset HandleReferencesOffset() const { + return FrameOffset(HandleScopeOffset().Int32Value() + + HandleScope::ReferencesOffset(frame_pointer_size_)); + } + + virtual ~JniCallingConvention() {} + + protected: + // Named iterator positions + enum IteratorPos { + kJniEnv = 0, + kObjectOrClass = 1 + }; + + JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty, + size_t frame_pointer_size) + : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {} + + // Number of stack slots for outgoing arguments, above which the handle scope is + // located + virtual size_t NumberOfOutgoingStackArgs() = 0; + + protected: + size_t NumberOfExtraArgumentsForJni(); +}; + +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc new file mode 100644 index 000000000..27714b805 --- /dev/null +++ b/compiler/jni/quick/jni_compiler.cc @@ -0,0 +1,611 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni_compiler.h" + +#include +#include +#include +#include + +#include "art_method.h" +#include "base/arena_allocator.h" +#include "base/logging.h" +#include "base/macros.h" +#include "calling_convention.h" +#include "class_linker.h" +#include "compiled_method.h" +#include "dex_file-inl.h" +#include "driver/compiler_driver.h" +#include "driver/compiler_options.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "jni_env_ext.h" +#include "utils/assembler.h" +#include "utils/managed_register.h" +#include "utils/arm/managed_register_arm.h" +#include "utils/arm64/managed_register_arm64.h" +#include "utils/mips/managed_register_mips.h" +#include "utils/mips64/managed_register_mips64.h" +#include "utils/x86/managed_register_x86.h" +#include "thread.h" + +#define __ jni_asm-> + +namespace art { + +static void CopyParameter(Assembler* jni_asm, + ManagedRuntimeCallingConvention* mr_conv, + JniCallingConvention* jni_conv, + size_t frame_size, size_t out_arg_size); +static void SetNativeParameter(Assembler* jni_asm, + JniCallingConvention* jni_conv, + ManagedRegister in_reg); + +// Generate the JNI bridge for the given method, general contract: +// - Arguments are in the managed runtime format, either on stack or in +// registers, a reference to the method object is supplied as part of this +// convention. +// +CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, + uint32_t access_flags, uint32_t method_idx, + const DexFile& dex_file) { + const bool is_native = (access_flags & kAccNative) != 0; + CHECK(is_native); + const bool is_static = (access_flags & kAccStatic) != 0; + const bool is_synchronized = (access_flags & kAccSynchronized) != 0; + const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); + InstructionSet instruction_set = driver->GetInstructionSet(); + const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures(); + const bool is_64_bit_target = Is64BitInstructionSet(instruction_set); + + ArenaPool pool; + ArenaAllocator arena(&pool); + + // Calling conventions used to iterate over parameters to method + std::unique_ptr main_jni_conv( + JniCallingConvention::Create(&arena, is_static, is_synchronized, shorty, instruction_set)); + bool reference_return = main_jni_conv->IsReturnAReference(); + + std::unique_ptr mr_conv( + ManagedRuntimeCallingConvention::Create( + &arena, is_static, is_synchronized, shorty, instruction_set)); + + // Calling conventions to call into JNI method "end" possibly passing a returned reference, the + // method and the current thread. + const char* jni_end_shorty; + if (reference_return && is_synchronized) { + jni_end_shorty = "ILL"; + } else if (reference_return) { + jni_end_shorty = "IL"; + } else if (is_synchronized) { + jni_end_shorty = "VL"; + } else { + jni_end_shorty = "V"; + } + + std::unique_ptr end_jni_conv(JniCallingConvention::Create( + &arena, is_static, is_synchronized, jni_end_shorty, instruction_set)); + + // Assembler that holds generated instructions + std::unique_ptr jni_asm( + Assembler::Create(&arena, instruction_set, instruction_set_features)); + jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo()); + + // Offsets into data structures + // TODO: if cross compiling these offsets are for the host not the target + const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions)); + const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter)); + const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit)); + + // 1. Build the frame saving all callee saves + const size_t frame_size(main_jni_conv->FrameSize()); + const std::vector& callee_save_regs = main_jni_conv->CalleeSaveRegisters(); + __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills()); + DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(frame_size)); + + // 2. Set up the HandleScope + mr_conv->ResetIterator(FrameOffset(frame_size)); + main_jni_conv->ResetIterator(FrameOffset(0)); + __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(), + main_jni_conv->ReferenceCount(), + mr_conv->InterproceduralScratchRegister()); + + if (is_64_bit_target) { + __ CopyRawPtrFromThread64(main_jni_conv->HandleScopeLinkOffset(), + Thread::TopHandleScopeOffset<8>(), + mr_conv->InterproceduralScratchRegister()); + __ StoreStackOffsetToThread64(Thread::TopHandleScopeOffset<8>(), + main_jni_conv->HandleScopeOffset(), + mr_conv->InterproceduralScratchRegister()); + } else { + __ CopyRawPtrFromThread32(main_jni_conv->HandleScopeLinkOffset(), + Thread::TopHandleScopeOffset<4>(), + mr_conv->InterproceduralScratchRegister()); + __ StoreStackOffsetToThread32(Thread::TopHandleScopeOffset<4>(), + main_jni_conv->HandleScopeOffset(), + mr_conv->InterproceduralScratchRegister()); + } + + // 3. Place incoming reference arguments into handle scope + main_jni_conv->Next(); // Skip JNIEnv* + // 3.5. Create Class argument for static methods out of passed method + if (is_static) { + FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + // Check handle scope offset is within frame + CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); + // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod. + // Note this LoadRef() does not include read barrier. It will be handled below. + __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), + mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false); + __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false); + __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister()); + main_jni_conv->Next(); // in handle scope so move to next argument + } + while (mr_conv->HasNext()) { + CHECK(main_jni_conv->HasNext()); + bool ref_param = main_jni_conv->IsCurrentParamAReference(); + CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); + // References need placing in handle scope and the entry value passing + if (ref_param) { + // Compute handle scope entry, note null is placed in the handle scope but its boxed value + // must be null. + FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + // Check handle scope offset is within frame and doesn't run into the saved segment state. + CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); + CHECK_NE(handle_scope_offset.Uint32Value(), + main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value()); + bool input_in_reg = mr_conv->IsCurrentParamInRegister(); + bool input_on_stack = mr_conv->IsCurrentParamOnStack(); + CHECK(input_in_reg || input_on_stack); + + if (input_in_reg) { + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull()); + __ StoreRef(handle_scope_offset, in_reg); + } else if (input_on_stack) { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull()); + __ CopyRef(handle_scope_offset, in_off, + mr_conv->InterproceduralScratchRegister()); + } + } + mr_conv->Next(); + main_jni_conv->Next(); + } + + // 4. Write out the end of the quick frames. + if (is_64_bit_target) { + __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>()); + } else { + __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>()); + } + + // 5. Move frame down to allow space for out going args. + const size_t main_out_arg_size = main_jni_conv->OutArgSize(); + size_t current_out_arg_size = main_out_arg_size; + __ IncreaseFrameSize(main_out_arg_size); + + // Call the read barrier for the declaring class loaded from the method for a static call. + // Note that we always have outgoing param space available for at least two params. + if (kUseReadBarrier && is_static) { + ThreadOffset<4> read_barrier32 = QUICK_ENTRYPOINT_OFFSET(4, pReadBarrierJni); + ThreadOffset<8> read_barrier64 = QUICK_ENTRYPOINT_OFFSET(8, pReadBarrierJni); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + main_jni_conv->Next(); // Skip JNIEnv. + FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + // Pass the handle for the class as the first argument. + if (main_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, + mr_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, class_handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + main_jni_conv->Next(); + // Pass the current thread as the second argument and call. + if (main_jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); + if (is_64_bit_target) { + __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier64), + main_jni_conv->InterproceduralScratchRegister()); + } else { + __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier32), + main_jni_conv->InterproceduralScratchRegister()); + } + } else { + __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), + main_jni_conv->InterproceduralScratchRegister()); + if (is_64_bit_target) { + __ CallFromThread64(read_barrier64, main_jni_conv->InterproceduralScratchRegister()); + } else { + __ CallFromThread32(read_barrier32, main_jni_conv->InterproceduralScratchRegister()); + } + } + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset. + } + + // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable + // can occur. The result is the saved JNI local state that is restored by the exit call. We + // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer + // arguments. + ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart); + ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + FrameOffset locked_object_handle_scope_offset(0); + if (is_synchronized) { + // Pass object for locking. + main_jni_conv->Next(); // Skip JNIEnv. + locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + if (main_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, + mr_conv->InterproceduralScratchRegister(), false); + } else { + ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + main_jni_conv->Next(); + } + if (main_jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); + if (is_64_bit_target) { + __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64), + main_jni_conv->InterproceduralScratchRegister()); + } else { + __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32), + main_jni_conv->InterproceduralScratchRegister()); + } + } else { + __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), + main_jni_conv->InterproceduralScratchRegister()); + if (is_64_bit_target) { + __ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister()); + } else { + __ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister()); + } + } + if (is_synchronized) { // Check for exceptions from monitor enter. + __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size); + } + FrameOffset saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset(); + __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4); + + // 7. Iterate over arguments placing values from managed calling convention in + // to the convention required for a native call (shuffling). For references + // place an index/pointer to the reference after checking whether it is + // null (which must be encoded as null). + // Note: we do this prior to materializing the JNIEnv* and static's jclass to + // give as many free registers for the shuffle as possible. + mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); + uint32_t args_count = 0; + while (mr_conv->HasNext()) { + args_count++; + mr_conv->Next(); + } + + // Do a backward pass over arguments, so that the generated code will be "mov + // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3." + // TODO: A reverse iterator to improve readability. + for (uint32_t i = 0; i < args_count; ++i) { + mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + main_jni_conv->Next(); // Skip JNIEnv*. + if (is_static) { + main_jni_conv->Next(); // Skip Class for now. + } + // Skip to the argument we're interested in. + for (uint32_t j = 0; j < args_count - i - 1; ++j) { + mr_conv->Next(); + main_jni_conv->Next(); + } + CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get(), frame_size, main_out_arg_size); + } + if (is_static) { + // Create argument for Class + mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + main_jni_conv->Next(); // Skip JNIEnv* + FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); + if (main_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, handle_scope_offset, + mr_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + } + + // 8. Create 1st argument, the JNI environment ptr. + main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); + // Register that will hold local indirect reference table + if (main_jni_conv->IsCurrentParamInRegister()) { + ManagedRegister jni_env = main_jni_conv->CurrentParamRegister(); + DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister())); + if (is_64_bit_target) { + __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>()); + } else { + __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>()); + } + } else { + FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset(); + if (is_64_bit_target) { + __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(), + main_jni_conv->InterproceduralScratchRegister()); + } else { + __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(), + main_jni_conv->InterproceduralScratchRegister()); + } + } + + // 9. Plant call to native code associated with method. + MemberOffset jni_entrypoint_offset = ArtMethod::EntryPointFromJniOffset( + InstructionSetPointerSize(instruction_set)); + __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset, + mr_conv->InterproceduralScratchRegister()); + + // 10. Fix differences in result widths. + if (main_jni_conv->RequiresSmallResultTypeExtension()) { + if (main_jni_conv->GetReturnType() == Primitive::kPrimByte || + main_jni_conv->GetReturnType() == Primitive::kPrimShort) { + __ SignExtend(main_jni_conv->ReturnRegister(), + Primitive::ComponentSize(main_jni_conv->GetReturnType())); + } else if (main_jni_conv->GetReturnType() == Primitive::kPrimBoolean || + main_jni_conv->GetReturnType() == Primitive::kPrimChar) { + __ ZeroExtend(main_jni_conv->ReturnRegister(), + Primitive::ComponentSize(main_jni_conv->GetReturnType())); + } + } + + // 11. Save return value + FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation(); + if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { + if ((instruction_set == kMips || instruction_set == kMips64) && + main_jni_conv->GetReturnType() == Primitive::kPrimDouble && + return_save_location.Uint32Value() % 8 != 0) { + // Ensure doubles are 8-byte aligned for MIPS + return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize); + } + CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size); + __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue()); + } + + // Increase frame size for out args if needed by the end_jni_conv. + const size_t end_out_arg_size = end_jni_conv->OutArgSize(); + if (end_out_arg_size > current_out_arg_size) { + size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size; + current_out_arg_size = end_out_arg_size; + __ IncreaseFrameSize(out_arg_size_diff); + saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff); + locked_object_handle_scope_offset = + FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff); + return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff); + } + // thread. + end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size)); + ThreadOffset<4> jni_end32(-1); + ThreadOffset<8> jni_end64(-1); + if (reference_return) { + // Pass result. + jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized) + : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference); + jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized) + : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference); + SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); + end_jni_conv->Next(); + } else { + jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized) + : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd); + jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized) + : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd); + } + // Pass saved local reference state. + if (end_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); + __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4); + } else { + ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); + __ Load(out_reg, saved_cookie_offset, 4); + } + end_jni_conv->Next(); + if (is_synchronized) { + // Pass object for unlocking. + if (end_jni_conv->IsCurrentParamOnStack()) { + FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); + __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, + end_jni_conv->InterproceduralScratchRegister(), + false); + } else { + ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); + __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, + ManagedRegister::NoRegister(), false); + } + end_jni_conv->Next(); + } + if (end_jni_conv->IsCurrentParamInRegister()) { + __ GetCurrentThread(end_jni_conv->CurrentParamRegister()); + if (is_64_bit_target) { + __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64), + end_jni_conv->InterproceduralScratchRegister()); + } else { + __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end32), + end_jni_conv->InterproceduralScratchRegister()); + } + } else { + __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(), + end_jni_conv->InterproceduralScratchRegister()); + if (is_64_bit_target) { + __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister()); + } else { + __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister()); + } + } + + // 13. Reload return value + if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { + __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue()); + } + + // 14. Move frame up now we're done with the out arg space. + __ DecreaseFrameSize(current_out_arg_size); + + // 15. Process pending exceptions from JNI call or monitor exit. + __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0); + + // 16. Remove activation - need to restore callee save registers since the GC may have changed + // them. + DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(frame_size)); + __ RemoveFrame(frame_size, callee_save_regs); + DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(frame_size)); + + // 17. Finalize code generation + __ FinalizeCode(); + size_t cs = __ CodeSize(); + std::vector managed_code(cs); + MemoryRegion code(&managed_code[0], managed_code.size()); + __ FinalizeInstructions(code); + + return CompiledMethod::SwapAllocCompiledMethod(driver, + instruction_set, + ArrayRef(managed_code), + frame_size, + main_jni_conv->CoreSpillMask(), + main_jni_conv->FpSpillMask(), + ArrayRef(), + ArrayRef(), // vmap_table. + ArrayRef(*jni_asm->cfi().data()), + ArrayRef()); +} + +// Copy a single parameter from the managed to the JNI calling convention. +static void CopyParameter(Assembler* jni_asm, + ManagedRuntimeCallingConvention* mr_conv, + JniCallingConvention* jni_conv, + size_t frame_size, size_t out_arg_size) { + bool input_in_reg = mr_conv->IsCurrentParamInRegister(); + bool output_in_reg = jni_conv->IsCurrentParamInRegister(); + FrameOffset handle_scope_offset(0); + bool null_allowed = false; + bool ref_param = jni_conv->IsCurrentParamAReference(); + CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); + // input may be in register, on stack or both - but not none! + CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack()); + if (output_in_reg) { // output shouldn't straddle registers and stack + CHECK(!jni_conv->IsCurrentParamOnStack()); + } else { + CHECK(jni_conv->IsCurrentParamOnStack()); + } + // References need placing in handle scope and the entry address passing. + if (ref_param) { + null_allowed = mr_conv->IsCurrentArgPossiblyNull(); + // Compute handle scope offset. Note null is placed in the handle scope but the jobject + // passed to the native code must be null (not a pointer into the handle scope + // as with regular references). + handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset(); + // Check handle scope offset is within frame. + CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size)); + } + if (input_in_reg && output_in_reg) { + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + if (ref_param) { + __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed); + } else { + if (!mr_conv->IsCurrentParamOnStack()) { + // regular non-straddling move + __ Move(out_reg, in_reg, mr_conv->CurrentParamSize()); + } else { + UNIMPLEMENTED(FATAL); // we currently don't expect to see this case + } + } + } else if (!input_in_reg && !output_in_reg) { + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + if (ref_param) { + __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), + null_allowed); + } else { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size); + } + } else if (!input_in_reg && output_in_reg) { + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + ManagedRegister out_reg = jni_conv->CurrentParamRegister(); + // Check that incoming stack arguments are above the current stack frame. + CHECK_GT(in_off.Uint32Value(), frame_size); + if (ref_param) { + __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed); + } else { + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + __ Load(out_reg, in_off, param_size); + } + } else { + CHECK(input_in_reg && !output_in_reg); + ManagedRegister in_reg = mr_conv->CurrentParamRegister(); + FrameOffset out_off = jni_conv->CurrentParamStackOffset(); + // Check outgoing argument is within frame + CHECK_LT(out_off.Uint32Value(), frame_size); + if (ref_param) { + // TODO: recycle value in in_reg rather than reload from handle scope + __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), + null_allowed); + } else { + size_t param_size = mr_conv->CurrentParamSize(); + CHECK_EQ(param_size, jni_conv->CurrentParamSize()); + if (!mr_conv->IsCurrentParamOnStack()) { + // regular non-straddling store + __ Store(out_off, in_reg, param_size); + } else { + // store where input straddles registers and stack + CHECK_EQ(param_size, 8u); + FrameOffset in_off = mr_conv->CurrentParamStackOffset(); + __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister()); + } + } + } +} + +static void SetNativeParameter(Assembler* jni_asm, + JniCallingConvention* jni_conv, + ManagedRegister in_reg) { + if (jni_conv->IsCurrentParamOnStack()) { + FrameOffset dest = jni_conv->CurrentParamStackOffset(); + __ StoreRawPtr(dest, in_reg); + } else { + if (!jni_conv->CurrentParamRegister().Equals(in_reg)) { + __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize()); + } + } +} + +CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags, + uint32_t method_idx, const DexFile& dex_file) { + return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file); +} + +} // namespace art diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h new file mode 100644 index 000000000..46277f105 --- /dev/null +++ b/compiler/jni/quick/jni_compiler.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ +#define ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ + +#include "dex_file.h" + +namespace art { + +class CompilerDriver; +class CompiledMethod; + +CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags, + uint32_t method_idx, const DexFile& dex_file); + +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc new file mode 100644 index 000000000..2d31a9881 --- /dev/null +++ b/compiler/jni/quick/mips/calling_convention_mips.cc @@ -0,0 +1,252 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_mips.h" + +#include "base/logging.h" +#include "handle_scope-inl.h" +#include "utils/mips/managed_register_mips.h" + +namespace art { +namespace mips { + +static const Register kCoreArgumentRegisters[] = { A0, A1, A2, A3 }; +static const FRegister kFArgumentRegisters[] = { F12, F14 }; +static const DRegister kDArgumentRegisters[] = { D6, D7 }; + +// Calling convention +ManagedRegister MipsManagedRuntimeCallingConvention::InterproceduralScratchRegister() { + return MipsManagedRegister::FromCoreRegister(T9); +} + +ManagedRegister MipsJniCallingConvention::InterproceduralScratchRegister() { + return MipsManagedRegister::FromCoreRegister(T9); +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty) { + if (shorty[0] == 'F') { + return MipsManagedRegister::FromFRegister(F0); + } else if (shorty[0] == 'D') { + return MipsManagedRegister::FromDRegister(D0); + } else if (shorty[0] == 'J') { + return MipsManagedRegister::FromRegisterPair(V0_V1); + } else if (shorty[0] == 'V') { + return MipsManagedRegister::NoRegister(); + } else { + return MipsManagedRegister::FromCoreRegister(V0); + } +} + +ManagedRegister MipsManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister MipsJniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister MipsJniCallingConvention::IntReturnRegister() { + return MipsManagedRegister::FromCoreRegister(V0); +} + +// Managed runtime calling convention + +ManagedRegister MipsManagedRuntimeCallingConvention::MethodRegister() { + return MipsManagedRegister::FromCoreRegister(A0); +} + +bool MipsManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything moved to stack on entry. +} + +bool MipsManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return true; +} + +ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + return ManagedRegister::NoRegister(); +} + +FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + FrameOffset result = + FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method* + (itr_slots_ * kFramePointerSize)); // offset into in args + return result; +} + +const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on MIPS to free them up for scratch use, we then assume + // all arguments are on the stack. + if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { + uint32_t gpr_index = 1; // Skip A0, it is used for ArtMethod*. + uint32_t fpr_index = 0; + + for (ResetIterator(FrameOffset(0)); HasNext(); Next()) { + if (IsCurrentParamAFloatOrDouble()) { + if (IsCurrentParamADouble()) { + if (fpr_index < arraysize(kDArgumentRegisters)) { + entry_spills_.push_back( + MipsManagedRegister::FromDRegister(kDArgumentRegisters[fpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 8); + } + } else { + if (fpr_index < arraysize(kFArgumentRegisters)) { + entry_spills_.push_back( + MipsManagedRegister::FromFRegister(kFArgumentRegisters[fpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + } else { + if (IsCurrentParamALong() && !IsCurrentParamAReference()) { + if (gpr_index == 1) { + // Don't use a1-a2 as a register pair, move to a2-a3 instead. + gpr_index++; + } + if (gpr_index < arraysize(kCoreArgumentRegisters) - 1) { + entry_spills_.push_back( + MipsManagedRegister::FromCoreRegister(kCoreArgumentRegisters[gpr_index++])); + } else if (gpr_index == arraysize(kCoreArgumentRegisters) - 1) { + gpr_index++; + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + + if (gpr_index < arraysize(kCoreArgumentRegisters)) { + entry_spills_.push_back( + MipsManagedRegister::FromCoreRegister(kCoreArgumentRegisters[gpr_index++])); + } else { + entry_spills_.push_back(ManagedRegister::NoRegister(), 4); + } + } + } + } + return entry_spills_; +} +// JNI calling convention + +MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized, + const char* shorty) + : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) { + // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject + // or jclass for static methods and the JNIEnv. We start at the aligned register A2. + size_t padding = 0; + for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) { + if (IsParamALongOrDouble(cur_arg)) { + if ((cur_reg & 1) != 0) { + padding += 4; + cur_reg++; // additional bump to ensure alignment + } + cur_reg++; // additional bump to skip extra long word + } + cur_reg++; // bump the iterator for every argument + } + padding_ = padding; + + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S2)); + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S3)); + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S4)); + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S5)); + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S6)); + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S7)); + callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(FP)); +} + +uint32_t MipsJniCallingConvention::CoreSpillMask() const { + // Compute spill mask to agree with callee saves initialized in the constructor + uint32_t result = 0; + result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << FP | 1 << RA; + return result; +} + +ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const { + return MipsManagedRegister::FromCoreRegister(AT); +} + +size_t MipsJniCallingConvention::FrameSize() { + // ArtMethod*, RA and callee save area size, local reference segment state + size_t frame_data_size = kMipsPointerSize + + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; + // References plus 2 words for HandleScope header + size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); + // Plus return value spill area size + return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment); +} + +size_t MipsJniCallingConvention::OutArgSize() { + return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_, kStackAlignment); +} + +// JniCallingConvention ABI follows AAPCS where longs and doubles must occur +// in even register numbers and stack slots +void MipsJniCallingConvention::Next() { + JniCallingConvention::Next(); + size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + if ((itr_args_ >= 2) && + (arg_pos < NumArgs()) && + IsParamALongOrDouble(arg_pos)) { + // itr_slots_ needs to be an even number, according to AAPCS. + if ((itr_slots_ & 0x1u) != 0) { + itr_slots_++; + } + } +} + +bool MipsJniCallingConvention::IsCurrentParamInRegister() { + return itr_slots_ < 4; +} + +bool MipsJniCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +static const Register kJniArgumentRegisters[] = { + A0, A1, A2, A3 +}; +ManagedRegister MipsJniCallingConvention::CurrentParamRegister() { + CHECK_LT(itr_slots_, 4u); + int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni(); + if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) { + CHECK_EQ(itr_slots_, 2u); + return MipsManagedRegister::FromRegisterPair(A2_A3); + } else { + return + MipsManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]); + } +} + +FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() { + CHECK_GE(itr_slots_, 4u); + size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() { + size_t static_args = IsStatic() ? 1 : 0; // count jclass + // regular argument parameters and this + size_t param_args = NumArgs() + NumLongOrDoubleArgs(); + // count JNIEnv* + return static_args + param_args + 1; +} +} // namespace mips +} // namespace art diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h new file mode 100644 index 000000000..dc4543241 --- /dev/null +++ b/compiler/jni/quick/mips/calling_convention_mips.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_ +#define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_ + +#include "jni/quick/calling_convention.h" + +namespace art { +namespace mips { + +constexpr size_t kFramePointerSize = 4; + +class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { + public: + MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {} + ~MipsManagedRuntimeCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // Managed runtime calling convention + ManagedRegister MethodRegister() OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; + + private: + ManagedRegisterEntrySpills entry_spills_; + + DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention); +}; + +class MipsJniCallingConvention FINAL : public JniCallingConvention { + public: + MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ~MipsJniCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister IntReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // JNI calling convention + void Next() OVERRIDE; // Override default behavior for AAPCS + size_t FrameSize() OVERRIDE; + size_t OutArgSize() OVERRIDE; + const std::vector& CalleeSaveRegisters() const OVERRIDE { + return callee_save_regs_; + } + ManagedRegister ReturnScratchRegister() const OVERRIDE; + uint32_t CoreSpillMask() const OVERRIDE; + uint32_t FpSpillMask() const OVERRIDE { + return 0; // Floats aren't spilled in JNI down call + } + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + + // Mips does not need to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return false; + } + + protected: + size_t NumberOfOutgoingStackArgs() OVERRIDE; + + private: + // TODO: these values aren't unique and can be shared amongst instances + std::vector callee_save_regs_; + + // Padding to ensure longs and doubles are not split in AAPCS + size_t padding_; + + DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention); +}; + +} // namespace mips +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_ diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc new file mode 100644 index 000000000..807d740b4 --- /dev/null +++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_mips64.h" + +#include "base/logging.h" +#include "handle_scope-inl.h" +#include "utils/mips64/managed_register_mips64.h" + +namespace art { +namespace mips64 { + +static const GpuRegister kGpuArgumentRegisters[] = { + A0, A1, A2, A3, A4, A5, A6, A7 +}; + +static const FpuRegister kFpuArgumentRegisters[] = { + F12, F13, F14, F15, F16, F17, F18, F19 +}; + +// Calling convention +ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { + return Mips64ManagedRegister::FromGpuRegister(T9); +} + +ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() { + return Mips64ManagedRegister::FromGpuRegister(T9); +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty) { + if (shorty[0] == 'F' || shorty[0] == 'D') { + return Mips64ManagedRegister::FromFpuRegister(F0); + } else if (shorty[0] == 'V') { + return Mips64ManagedRegister::NoRegister(); + } else { + return Mips64ManagedRegister::FromGpuRegister(V0); + } +} + +ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Mips64JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty()); +} + +ManagedRegister Mips64JniCallingConvention::IntReturnRegister() { + return Mips64ManagedRegister::FromGpuRegister(V0); +} + +// Managed runtime calling convention + +ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() { + return Mips64ManagedRegister::FromGpuRegister(A0); +} + +bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything moved to stack on entry. +} + +bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + return true; +} + +ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + return ManagedRegister::NoRegister(); +} + +FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + FrameOffset result = + FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method ref + (itr_slots_ * sizeof(uint32_t))); // offset into in args + return result; +} + +const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on MIPS64 to free them up for scratch use, + // we then assume all arguments are on the stack. + if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { + int reg_index = 1; // we start from A1, A0 holds ArtMethod*. + + // We need to choose the correct register size since the managed + // stack uses 32bit stack slots. + ResetIterator(FrameOffset(0)); + while (HasNext()) { + if (reg_index < 8) { + if (IsCurrentParamAFloatOrDouble()) { // FP regs. + FpuRegister arg = kFpuArgumentRegisters[reg_index]; + Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg); + entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4); + } else { // GP regs. + GpuRegister arg = kGpuArgumentRegisters[reg_index]; + Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg); + entry_spills_.push_back(reg, + (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4); + } + // e.g. A1, A2, F3, A4, F5, F6, A7 + reg_index++; + } + + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized, + const char* shorty) + : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) { + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP)); + callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8)); +} + +uint32_t Mips64JniCallingConvention::CoreSpillMask() const { + // Compute spill mask to agree with callee saves initialized in the constructor + uint32_t result = 0; + result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA; + DCHECK_EQ(static_cast(POPCOUNT(result)), callee_save_regs_.size() + 1); + return result; +} + +ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const { + return Mips64ManagedRegister::FromGpuRegister(AT); +} + +size_t Mips64JniCallingConvention::FrameSize() { + // ArtMethod*, RA and callee save area size, local reference segment state + size_t frame_data_size = kFramePointerSize + + (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t); + // References plus 2 words for HandleScope header + size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); + // Plus return value spill area size + return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment); +} + +size_t Mips64JniCallingConvention::OutArgSize() { + return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); +} + +bool Mips64JniCallingConvention::IsCurrentParamInRegister() { + return itr_args_ < 8; +} + +bool Mips64JniCallingConvention::IsCurrentParamOnStack() { + return !IsCurrentParamInRegister(); +} + +ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() { + CHECK(IsCurrentParamInRegister()); + if (IsCurrentParamAFloatOrDouble()) { + return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]); + } else { + return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]); + } +} + +FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() { + CHECK(IsCurrentParamOnStack()); + size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize); + CHECK_LT(offset, OutArgSize()); + return FrameOffset(offset); +} + +size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() { + // all arguments including JNI args + size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni(); + + // Nothing on the stack unless there are more than 8 arguments + return (all_args > 8) ? all_args - 8 : 0; +} +} // namespace mips64 +} // namespace art diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h new file mode 100644 index 000000000..3d6aab739 --- /dev/null +++ b/compiler/jni/quick/mips64/calling_convention_mips64.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_ +#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_ + +#include "jni/quick/calling_convention.h" + +namespace art { +namespace mips64 { + +constexpr size_t kFramePointerSize = 8; + +class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { + public: + Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {} + ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // Managed runtime calling convention + ManagedRegister MethodRegister() OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; + + private: + ManagedRegisterEntrySpills entry_spills_; + + DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention); +}; + +class Mips64JniCallingConvention FINAL : public JniCallingConvention { + public: + Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ~Mips64JniCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister IntReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // JNI calling convention + size_t FrameSize() OVERRIDE; + size_t OutArgSize() OVERRIDE; + const std::vector& CalleeSaveRegisters() const OVERRIDE { + return callee_save_regs_; + } + ManagedRegister ReturnScratchRegister() const OVERRIDE; + uint32_t CoreSpillMask() const OVERRIDE; + uint32_t FpSpillMask() const OVERRIDE { + return 0; // Floats aren't spilled in JNI down call + } + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + + // Mips64 does not need to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return false; + } + + protected: + size_t NumberOfOutgoingStackArgs() OVERRIDE; + + private: + // TODO: these values aren't unique and can be shared amongst instances + std::vector callee_save_regs_; + + DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention); +}; + +} // namespace mips64 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_ diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc new file mode 100644 index 000000000..322caca41 --- /dev/null +++ b/compiler/jni/quick/x86/calling_convention_x86.cc @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_x86.h" + +#include "base/logging.h" +#include "handle_scope-inl.h" +#include "utils/x86/managed_register_x86.h" + +namespace art { +namespace x86 { + +// Calling convention + +ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { + return X86ManagedRegister::FromCpuRegister(ECX); +} + +ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() { + return X86ManagedRegister::FromCpuRegister(ECX); +} + +ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const { + return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) { + if (shorty[0] == 'F' || shorty[0] == 'D') { + if (jni) { + return X86ManagedRegister::FromX87Register(ST0); + } else { + return X86ManagedRegister::FromXmmRegister(XMM0); + } + } else if (shorty[0] == 'J') { + return X86ManagedRegister::FromRegisterPair(EAX_EDX); + } else if (shorty[0] == 'V') { + return ManagedRegister::NoRegister(); + } else { + return X86ManagedRegister::FromCpuRegister(EAX); + } +} + +ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), false); +} + +ManagedRegister X86JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), true); +} + +ManagedRegister X86JniCallingConvention::IntReturnRegister() { + return X86ManagedRegister::FromCpuRegister(EAX); +} + +// Managed runtime calling convention + +ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() { + return X86ManagedRegister::FromCpuRegister(EAX); +} + +bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return false; // Everything is passed by stack +} + +bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + // We assume all parameters are on stack, args coming via registers are spilled as entry_spills. + return true; +} + +ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + if (!IsCurrentParamAFloatOrDouble()) { + switch (gpr_arg_count_) { + case 0: + res = X86ManagedRegister::FromCpuRegister(ECX); + break; + case 1: + res = X86ManagedRegister::FromCpuRegister(EDX); + break; + case 2: + // Don't split a long between the last register and the stack. + if (IsCurrentParamALong()) { + return ManagedRegister::NoRegister(); + } + res = X86ManagedRegister::FromCpuRegister(EBX); + break; + } + } else if (itr_float_and_doubles_ < 4) { + // First four float parameters are passed via XMM0..XMM3 + res = X86ManagedRegister::FromXmmRegister( + static_cast(XMM0 + itr_float_and_doubles_)); + } + return res; +} + +ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + DCHECK(IsCurrentParamALong()); + switch (gpr_arg_count_) { + case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break; + case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break; + } + return res; +} + +FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + return FrameOffset(displacement_.Int32Value() + // displacement + kFramePointerSize + // Method* + (itr_slots_ * kFramePointerSize)); // offset into in args +} + +const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on X86 to free them up for scratch use, we then assume + // all arguments are on the stack. + if (entry_spills_.size() == 0) { + ResetIterator(FrameOffset(0)); + while (HasNext()) { + ManagedRegister in_reg = CurrentParamRegister(); + bool is_long = IsCurrentParamALong(); + if (!in_reg.IsNoRegister()) { + int32_t size = IsParamADouble(itr_args_) ? 8 : 4; + int32_t spill_offset = CurrentParamStackOffset().Uint32Value(); + ManagedRegisterSpill spill(in_reg, size, spill_offset); + entry_spills_.push_back(spill); + if (is_long) { + // special case, as we need a second register here. + in_reg = CurrentParamHighLongRegister(); + DCHECK(!in_reg.IsNoRegister()); + // We have to spill the second half of the long. + ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4); + entry_spills_.push_back(spill2); + } + + // Keep track of the number of GPRs allocated. + if (!IsCurrentParamAFloatOrDouble()) { + if (is_long) { + // Long was allocated in 2 registers. + gpr_arg_count_ += 2; + } else { + gpr_arg_count_++; + } + } + } else if (is_long) { + // We need to skip the unused last register, which is empty. + // If we are already out of registers, this is harmless. + gpr_arg_count_ += 2; + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized, + const char* shorty) + : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) { + callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(EBP)); + callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(ESI)); + callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(EDI)); +} + +uint32_t X86JniCallingConvention::CoreSpillMask() const { + return 1 << EBP | 1 << ESI | 1 << EDI | 1 << kNumberOfCpuRegisters; +} + +size_t X86JniCallingConvention::FrameSize() { + // Method*, return address and callee save area size, local reference segment state + size_t frame_data_size = kX86PointerSize + + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; + // References plus 2 words for HandleScope header + size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); + // Plus return value spill area size + return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment); +} + +size_t X86JniCallingConvention::OutArgSize() { + return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); +} + +bool X86JniCallingConvention::IsCurrentParamInRegister() { + return false; // Everything is passed by stack. +} + +bool X86JniCallingConvention::IsCurrentParamOnStack() { + return true; // Everything is passed by stack. +} + +ManagedRegister X86JniCallingConvention::CurrentParamRegister() { + LOG(FATAL) << "Should not reach here"; + return ManagedRegister::NoRegister(); +} + +FrameOffset X86JniCallingConvention::CurrentParamStackOffset() { + return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize)); +} + +size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() { + size_t static_args = IsStatic() ? 1 : 0; // count jclass + // regular argument parameters and this + size_t param_args = NumArgs() + NumLongOrDoubleArgs(); + // count JNIEnv* and return pc (pushed after Method*) + size_t total_args = static_args + param_args + 2; + return total_args; +} + +} // namespace x86 +} // namespace art diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h new file mode 100644 index 000000000..cdf0956c9 --- /dev/null +++ b/compiler/jni/quick/x86/calling_convention_x86.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ +#define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ + +#include "jni/quick/calling_convention.h" + +namespace art { +namespace x86 { + +constexpr size_t kFramePointerSize = 4; + +class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { + public: + X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize), + gpr_arg_count_(0) {} + ~X86ManagedRuntimeCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // Managed runtime calling convention + ManagedRegister MethodRegister() OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; + + private: + int gpr_arg_count_; + ManagedRegister CurrentParamHighLongRegister(); + ManagedRegisterEntrySpills entry_spills_; + DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention); +}; + +class X86JniCallingConvention FINAL : public JniCallingConvention { + public: + X86JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ~X86JniCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister IntReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // JNI calling convention + size_t FrameSize() OVERRIDE; + size_t OutArgSize() OVERRIDE; + const std::vector& CalleeSaveRegisters() const OVERRIDE { + return callee_save_regs_; + } + ManagedRegister ReturnScratchRegister() const OVERRIDE; + uint32_t CoreSpillMask() const OVERRIDE; + uint32_t FpSpillMask() const OVERRIDE { + return 0; + } + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + + // x86 needs to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return true; + } + + protected: + size_t NumberOfOutgoingStackArgs() OVERRIDE; + + private: + // TODO: these values aren't unique and can be shared amongst instances + std::vector callee_save_regs_; + + DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention); +}; + +} // namespace x86 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc new file mode 100644 index 000000000..b6b11ca51 --- /dev/null +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "calling_convention_x86_64.h" + +#include "base/bit_utils.h" +#include "base/logging.h" +#include "handle_scope-inl.h" +#include "utils/x86_64/managed_register_x86_64.h" + +namespace art { +namespace x86_64 { + +// Calling convention + +ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { + return X86_64ManagedRegister::FromCpuRegister(RAX); +} + +ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() { + return X86_64ManagedRegister::FromCpuRegister(RAX); +} + +ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const { + return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop +} + +static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) { + if (shorty[0] == 'F' || shorty[0] == 'D') { + return X86_64ManagedRegister::FromXmmRegister(XMM0); + } else if (shorty[0] == 'J') { + return X86_64ManagedRegister::FromCpuRegister(RAX); + } else if (shorty[0] == 'V') { + return ManagedRegister::NoRegister(); + } else { + return X86_64ManagedRegister::FromCpuRegister(RAX); + } +} + +ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), false); +} + +ManagedRegister X86_64JniCallingConvention::ReturnRegister() { + return ReturnRegisterForShorty(GetShorty(), true); +} + +ManagedRegister X86_64JniCallingConvention::IntReturnRegister() { + return X86_64ManagedRegister::FromCpuRegister(RAX); +} + +// Managed runtime calling convention + +ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() { + return X86_64ManagedRegister::FromCpuRegister(RDI); +} + +bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { + return !IsCurrentParamOnStack(); +} + +bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { + // We assume all parameters are on stack, args coming via registers are spilled as entry_spills + return true; +} + +ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + if (!IsCurrentParamAFloatOrDouble()) { + switch (itr_args_ - itr_float_and_doubles_) { + case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break; + case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break; + case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break; + case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break; + case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break; + } + } else if (itr_float_and_doubles_ < 8) { + // First eight float parameters are passed via XMM0..XMM7 + res = X86_64ManagedRegister::FromXmmRegister( + static_cast(XMM0 + itr_float_and_doubles_)); + } + return res; +} + +FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { + return FrameOffset(displacement_.Int32Value() + // displacement + kX86_64PointerSize + // Method ref + itr_slots_ * sizeof(uint32_t)); // offset into in args +} + +const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() { + // We spill the argument registers on X86 to free them up for scratch use, we then assume + // all arguments are on the stack. + if (entry_spills_.size() == 0) { + ResetIterator(FrameOffset(0)); + while (HasNext()) { + ManagedRegister in_reg = CurrentParamRegister(); + if (!in_reg.IsNoRegister()) { + int32_t size = IsParamALongOrDouble(itr_args_)? 8 : 4; + int32_t spill_offset = CurrentParamStackOffset().Uint32Value(); + ManagedRegisterSpill spill(in_reg, size, spill_offset); + entry_spills_.push_back(spill); + } + Next(); + } + } + return entry_spills_; +} + +// JNI calling convention + +X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized, + const char* shorty) + : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) { + callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBX)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBP)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R12)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R13)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R14)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R15)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM12)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM13)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM14)); + callee_save_regs_.push_back(X86_64ManagedRegister::FromXmmRegister(XMM15)); +} + +uint32_t X86_64JniCallingConvention::CoreSpillMask() const { + return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 | + 1 << kNumberOfCpuRegisters; +} + +uint32_t X86_64JniCallingConvention::FpSpillMask() const { + return 1 << XMM12 | 1 << XMM13 | 1 << XMM14 | 1 << XMM15; +} + +size_t X86_64JniCallingConvention::FrameSize() { + // Method*, return address and callee save area size, local reference segment state + size_t frame_data_size = kX86_64PointerSize + + (2 + CalleeSaveRegisters().size()) * kFramePointerSize; + // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header + size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount()); + // Plus return value spill area size + return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment); +} + +size_t X86_64JniCallingConvention::OutArgSize() { + return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); +} + +bool X86_64JniCallingConvention::IsCurrentParamInRegister() { + return !IsCurrentParamOnStack(); +} + +bool X86_64JniCallingConvention::IsCurrentParamOnStack() { + return CurrentParamRegister().IsNoRegister(); +} + +ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() { + ManagedRegister res = ManagedRegister::NoRegister(); + if (!IsCurrentParamAFloatOrDouble()) { + switch (itr_args_ - itr_float_and_doubles_) { + case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break; + case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break; + case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break; + case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break; + case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break; + case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break; + } + } else if (itr_float_and_doubles_ < 8) { + // First eight float parameters are passed via XMM0..XMM7 + res = X86_64ManagedRegister::FromXmmRegister( + static_cast(XMM0 + itr_float_and_doubles_)); + } + return res; +} + +FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() { + size_t offset = itr_args_ + - std::min(8U, itr_float_and_doubles_) // Float arguments passed through Xmm0..Xmm7 + - std::min(6U, itr_args_ - itr_float_and_doubles_); // Integer arguments passed through GPR + return FrameOffset(displacement_.Int32Value() - OutArgSize() + (offset * kFramePointerSize)); +} + +size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() { + size_t static_args = IsStatic() ? 1 : 0; // count jclass + // regular argument parameters and this + size_t param_args = NumArgs() + NumLongOrDoubleArgs(); + // count JNIEnv* and return pc (pushed after Method*) + size_t total_args = static_args + param_args + 2; + + // Float arguments passed through Xmm0..Xmm7 + // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9) + size_t total_stack_args = total_args + - std::min(8U, static_cast(NumFloatOrDoubleArgs())) + - std::min(6U, static_cast(NumArgs() - NumFloatOrDoubleArgs())); + + return total_stack_args; +} + +} // namespace x86_64 +} // namespace art diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h new file mode 100644 index 000000000..6e47c9fae --- /dev/null +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ +#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ + +#include "jni/quick/calling_convention.h" + +namespace art { +namespace x86_64 { + +constexpr size_t kFramePointerSize = 8; + +class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { + public: + X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) + : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {} + ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // Managed runtime calling convention + ManagedRegister MethodRegister() OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; + private: + ManagedRegisterEntrySpills entry_spills_; + DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention); +}; + +class X86_64JniCallingConvention FINAL : public JniCallingConvention { + public: + X86_64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty); + ~X86_64JniCallingConvention() OVERRIDE {} + // Calling convention + ManagedRegister ReturnRegister() OVERRIDE; + ManagedRegister IntReturnRegister() OVERRIDE; + ManagedRegister InterproceduralScratchRegister() OVERRIDE; + // JNI calling convention + size_t FrameSize() OVERRIDE; + size_t OutArgSize() OVERRIDE; + const std::vector& CalleeSaveRegisters() const OVERRIDE { + return callee_save_regs_; + } + ManagedRegister ReturnScratchRegister() const OVERRIDE; + uint32_t CoreSpillMask() const OVERRIDE; + uint32_t FpSpillMask() const OVERRIDE; + bool IsCurrentParamInRegister() OVERRIDE; + bool IsCurrentParamOnStack() OVERRIDE; + ManagedRegister CurrentParamRegister() OVERRIDE; + FrameOffset CurrentParamStackOffset() OVERRIDE; + + // x86-64 needs to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return true; + } + + protected: + size_t NumberOfOutgoingStackArgs() OVERRIDE; + + private: + // TODO: these values aren't unique and can be shared amongst instances + std::vector callee_save_regs_; + + DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention); +}; + +} // namespace x86_64 +} // namespace art + +#endif // ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc new file mode 100644 index 000000000..d4dd978c5 --- /dev/null +++ b/compiler/linker/arm/relative_patcher_arm_base.cc @@ -0,0 +1,192 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "linker/arm/relative_patcher_arm_base.h" + +#include "compiled_method.h" +#include "linker/output_stream.h" +#include "oat.h" +#include "oat_quick_method_header.h" + +namespace art { +namespace linker { + +uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset, + const CompiledMethod* compiled_method, + MethodReference method_ref) { + return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u); +} + +uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) { + // NOTE: The final thunk can be reserved from InitCodeMethodVisitor::EndClass() while it + // may be written early by WriteCodeMethodVisitor::VisitMethod() for a deduplicated chunk + // of code. To avoid any alignment discrepancies for the final chunk, we always align the + // offset after reserving of writing any chunk. + uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_); + bool needs_thunk = ReserveSpaceProcessPatches(aligned_offset, + MethodReference(nullptr, 0u), + aligned_offset); + if (needs_thunk) { + // All remaining patches will be handled by this thunk. + DCHECK(!unprocessed_patches_.empty()); + DCHECK_LE(aligned_offset - unprocessed_patches_.front().second, max_positive_displacement_); + unprocessed_patches_.clear(); + + thunk_locations_.push_back(aligned_offset); + offset = CompiledMethod::AlignCode(aligned_offset + thunk_code_.size(), instruction_set_); + } + return offset; +} + +uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { + if (current_thunk_to_write_ == thunk_locations_.size()) { + return offset; + } + uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_); + if (UNLIKELY(aligned_offset == thunk_locations_[current_thunk_to_write_])) { + ++current_thunk_to_write_; + uint32_t aligned_code_delta = aligned_offset - offset; + if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { + return 0u; + } + if (UNLIKELY(!WriteRelCallThunk(out, ArrayRef(thunk_code_)))) { + return 0u; + } + uint32_t thunk_end_offset = aligned_offset + thunk_code_.size(); + // Align after writing chunk, see the ReserveSpace() above. + offset = CompiledMethod::AlignCode(thunk_end_offset, instruction_set_); + aligned_code_delta = offset - thunk_end_offset; + if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { + return 0u; + } + } + return offset; +} + +ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, + InstructionSet instruction_set, + std::vector thunk_code, + uint32_t max_positive_displacement, + uint32_t max_negative_displacement) + : provider_(provider), instruction_set_(instruction_set), thunk_code_(thunk_code), + max_positive_displacement_(max_positive_displacement), + max_negative_displacement_(max_negative_displacement), + thunk_locations_(), current_thunk_to_write_(0u), unprocessed_patches_() { +} + +uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset, + const CompiledMethod* compiled_method, + MethodReference method_ref, + uint32_t max_extra_space) { + uint32_t quick_code_size = compiled_method->GetQuickCode().size(); + uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader); + uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size); + // Adjust for extra space required by the subclass. + next_aligned_offset = compiled_method->AlignCode(next_aligned_offset + max_extra_space); + // TODO: ignore unprocessed patches targeting this method if they can reach quick_code_offset. + // We need the MethodReference for that. + if (!unprocessed_patches_.empty() && + next_aligned_offset - unprocessed_patches_.front().second > max_positive_displacement_) { + bool needs_thunk = ReserveSpaceProcessPatches(quick_code_offset, + method_ref, + next_aligned_offset); + if (needs_thunk) { + // A single thunk will cover all pending patches. + unprocessed_patches_.clear(); + uint32_t thunk_location = compiled_method->AlignCode(offset); + thunk_locations_.push_back(thunk_location); + offset = CompiledMethod::AlignCode(thunk_location + thunk_code_.size(), instruction_set_); + } + } + for (const LinkerPatch& patch : compiled_method->GetPatches()) { + if (patch.GetType() == LinkerPatch::Type::kCallRelative) { + unprocessed_patches_.emplace_back(patch.TargetMethod(), + quick_code_offset + patch.LiteralOffset()); + } + } + return offset; +} + +uint32_t ArmBaseRelativePatcher::CalculateDisplacement(uint32_t patch_offset, + uint32_t target_offset) { + // Unsigned arithmetic with its well-defined overflow behavior is just fine here. + uint32_t displacement = target_offset - patch_offset; + // NOTE: With unsigned arithmetic we do mean to use && rather than || below. + if (displacement > max_positive_displacement_ && displacement < -max_negative_displacement_) { + // Unwritten thunks have higher offsets, check if it's within range. + DCHECK(current_thunk_to_write_ == thunk_locations_.size() || + thunk_locations_[current_thunk_to_write_] > patch_offset); + if (current_thunk_to_write_ != thunk_locations_.size() && + thunk_locations_[current_thunk_to_write_] - patch_offset < max_positive_displacement_) { + displacement = thunk_locations_[current_thunk_to_write_] - patch_offset; + } else { + // We must have a previous thunk then. + DCHECK_NE(current_thunk_to_write_, 0u); + DCHECK_LT(thunk_locations_[current_thunk_to_write_ - 1], patch_offset); + displacement = thunk_locations_[current_thunk_to_write_ - 1] - patch_offset; + DCHECK(displacement >= -max_negative_displacement_); + } + } + return displacement; +} + +bool ArmBaseRelativePatcher::ReserveSpaceProcessPatches(uint32_t quick_code_offset, + MethodReference method_ref, + uint32_t next_aligned_offset) { + // Process as many patches as possible, stop only on unresolved targets or calls too far back. + while (!unprocessed_patches_.empty()) { + MethodReference patch_ref = unprocessed_patches_.front().first; + uint32_t patch_offset = unprocessed_patches_.front().second; + DCHECK(thunk_locations_.empty() || thunk_locations_.back() <= patch_offset); + if (patch_ref.dex_file == method_ref.dex_file && + patch_ref.dex_method_index == method_ref.dex_method_index) { + DCHECK_GT(quick_code_offset, patch_offset); + if (quick_code_offset - patch_offset > max_positive_displacement_) { + return true; + } + } else { + auto result = provider_->FindMethodOffset(patch_ref); + if (!result.first) { + // If still unresolved, check if we have a thunk within range. + if (thunk_locations_.empty() || + patch_offset - thunk_locations_.back() > max_negative_displacement_) { + // No thunk in range, we need a thunk if the next aligned offset + // is out of range, or if we're at the end of all code. + return (next_aligned_offset - patch_offset > max_positive_displacement_) || + (quick_code_offset == next_aligned_offset); // End of code. + } + } else { + uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_); + if (target_offset >= patch_offset) { + DCHECK_LE(target_offset - patch_offset, max_positive_displacement_); + } else { + // When calling back, check if we have a thunk that's closer than the actual target. + if (!thunk_locations_.empty()) { + target_offset = std::max(target_offset, thunk_locations_.back()); + } + if (patch_offset - target_offset > max_negative_displacement_) { + return true; + } + } + } + } + unprocessed_patches_.pop_front(); + } + return false; +} + +} // namespace linker +} // namespace art diff --git a/compiler/linker/arm/relative_patcher_arm_base.h b/compiler/linker/arm/relative_patcher_arm_base.h new file mode 100644 index 000000000..25fd35e1d --- /dev/null +++ b/compiler/linker/arm/relative_patcher_arm_base.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_ARM_BASE_H_ +#define ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_ARM_BASE_H_ + +#include + +#include "linker/relative_patcher.h" +#include "method_reference.h" + +namespace art { +namespace linker { + +class ArmBaseRelativePatcher : public RelativePatcher { + public: + uint32_t ReserveSpace(uint32_t offset, + const CompiledMethod* compiled_method, + MethodReference method_ref) OVERRIDE; + uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE; + uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE; + + protected: + ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, + InstructionSet instruction_set, + std::vector thunk_code, + uint32_t max_positive_displacement, + uint32_t max_negative_displacement); + + uint32_t ReserveSpaceInternal(uint32_t offset, + const CompiledMethod* compiled_method, + MethodReference method_ref, + uint32_t max_extra_space); + uint32_t CalculateDisplacement(uint32_t patch_offset, uint32_t target_offset); + + private: + bool ReserveSpaceProcessPatches(uint32_t quick_code_offset, MethodReference method_ref, + uint32_t next_aligned_offset); + + RelativePatcherTargetProvider* const provider_; + const InstructionSet instruction_set_; + const std::vector thunk_code_; + const uint32_t max_positive_displacement_; + const uint32_t max_negative_displacement_; + std::vector thunk_locations_; + size_t current_thunk_to_write_; + + // ReserveSpace() tracks unprocessed patches. + typedef std::pair UnprocessedPatch; + std::deque unprocessed_patches_; + + friend class Arm64RelativePatcherTest; + friend class Thumb2RelativePatcherTest; + + DISALLOW_COPY_AND_ASSIGN(ArmBaseRelativePatcher); +}; + +} // namespace linker +} // namespace art + +#endif // ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_ARM_BASE_H_ diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc new file mode 100644 index 000000000..fa49fc4e6 --- /dev/null +++ b/compiler/linker/arm/relative_patcher_thumb2.cc @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "linker/arm/relative_patcher_thumb2.h" + +#include "art_method.h" +#include "compiled_method.h" +#include "utils/arm/assembler_thumb2.h" + +namespace art { +namespace linker { + +Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider) + : ArmBaseRelativePatcher(provider, kThumb2, CompileThunkCode(), + kMaxPositiveDisplacement, kMaxNegativeDisplacement) { +} + +void Thumb2RelativePatcher::PatchCall(std::vector* code, + uint32_t literal_offset, + uint32_t patch_offset, + uint32_t target_offset) { + DCHECK_LE(literal_offset + 4u, code->size()); + DCHECK_EQ(literal_offset & 1u, 0u); + DCHECK_EQ(patch_offset & 1u, 0u); + DCHECK_EQ(target_offset & 1u, 1u); // Thumb2 mode bit. + uint32_t displacement = CalculateDisplacement(patch_offset, target_offset & ~1u); + displacement -= kPcDisplacement; // The base PC is at the end of the 4-byte patch. + DCHECK_EQ(displacement & 1u, 0u); + DCHECK((displacement >> 24) == 0u || (displacement >> 24) == 255u); // 25-bit signed. + uint32_t signbit = (displacement >> 31) & 0x1; + uint32_t i1 = (displacement >> 23) & 0x1; + uint32_t i2 = (displacement >> 22) & 0x1; + uint32_t imm10 = (displacement >> 12) & 0x03ff; + uint32_t imm11 = (displacement >> 1) & 0x07ff; + uint32_t j1 = i1 ^ (signbit ^ 1); + uint32_t j2 = i2 ^ (signbit ^ 1); + uint32_t value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) | imm11; + value |= 0xf000d000; // BL + + // Check that we're just overwriting an existing BL. + DCHECK_EQ(GetInsn32(code, literal_offset) & 0xf800d000, 0xf000d000); + // Write the new BL. + SetInsn32(code, literal_offset, value); +} + +void Thumb2RelativePatcher::PatchPcRelativeReference(std::vector* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { + uint32_t literal_offset = patch.LiteralOffset(); + uint32_t pc_literal_offset = patch.PcInsnOffset(); + uint32_t pc_base = patch_offset + (pc_literal_offset - literal_offset) + 4u /* PC adjustment */; + uint32_t diff = target_offset - pc_base; + + uint32_t insn = GetInsn32(code, literal_offset); + DCHECK_EQ(insn & 0xff7ff0ffu, 0xf2400000u); // MOVW/MOVT, unpatched (imm16 == 0). + uint32_t diff16 = ((insn & 0x00800000u) != 0u) ? (diff >> 16) : (diff & 0xffffu); + uint32_t imm4 = (diff16 >> 12) & 0xfu; + uint32_t imm = (diff16 >> 11) & 0x1u; + uint32_t imm3 = (diff16 >> 8) & 0x7u; + uint32_t imm8 = diff16 & 0xffu; + insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8; + SetInsn32(code, literal_offset, insn); +} + +std::vector Thumb2RelativePatcher::CompileThunkCode() { + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + ArenaPool pool; + ArenaAllocator arena(&pool); + arm::Thumb2Assembler assembler(&arena); + assembler.LoadFromOffset( + arm::kLoadWord, arm::PC, arm::R0, + ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); + assembler.bkpt(0); + assembler.FinalizeCode(); + std::vector thunk_code(assembler.CodeSize()); + MemoryRegion code(thunk_code.data(), thunk_code.size()); + assembler.FinalizeInstructions(code); + return thunk_code; +} + +void Thumb2RelativePatcher::SetInsn32(std::vector* code, uint32_t offset, uint32_t value) { + DCHECK_LE(offset + 4u, code->size()); + DCHECK_EQ(offset & 1u, 0u); + uint8_t* addr = &(*code)[offset]; + addr[0] = (value >> 16) & 0xff; + addr[1] = (value >> 24) & 0xff; + addr[2] = (value >> 0) & 0xff; + addr[3] = (value >> 8) & 0xff; +} + +uint32_t Thumb2RelativePatcher::GetInsn32(ArrayRef code, uint32_t offset) { + DCHECK_LE(offset + 4u, code.size()); + DCHECK_EQ(offset & 1u, 0u); + const uint8_t* addr = &code[offset]; + return + (static_cast(addr[0]) << 16) + + (static_cast(addr[1]) << 24) + + (static_cast(addr[2]) << 0)+ + (static_cast(addr[3]) << 8); +} + +template +uint32_t Thumb2RelativePatcher::GetInsn32(Vector* code, uint32_t offset) { + static_assert(std::is_same::value, "Invalid value type"); + return GetInsn32(ArrayRef(*code), offset); +} + +} // namespace linker +} // namespace art diff --git a/compiler/linker/arm/relative_patcher_thumb2.h b/compiler/linker/arm/relative_patcher_thumb2.h new file mode 100644 index 000000000..d85739c51 --- /dev/null +++ b/compiler/linker/arm/relative_patcher_thumb2.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_THUMB2_H_ +#define ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_THUMB2_H_ + +#include "linker/arm/relative_patcher_arm_base.h" + +namespace art { +namespace linker { + +class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher { + public: + explicit Thumb2RelativePatcher(RelativePatcherTargetProvider* provider); + + void PatchCall(std::vector* code, + uint32_t literal_offset, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE; + void PatchPcRelativeReference(std::vector* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) OVERRIDE; + + private: + static std::vector CompileThunkCode(); + + void SetInsn32(std::vector* code, uint32_t offset, uint32_t value); + static uint32_t GetInsn32(ArrayRef code, uint32_t offset); + + template + static uint32_t GetInsn32(Vector* code, uint32_t offset); + + // PC displacement from patch location; Thumb2 PC is always at instruction address + 4. + static constexpr int32_t kPcDisplacement = 4; + + // Maximum positive and negative displacement measured from the patch location. + // (Signed 25 bit displacement with the last bit 0 has range [-2^24, 2^24-2] measured from + // the Thumb2 PC pointing right after the BL, i.e. 4 bytes later than the patch location.) + static constexpr uint32_t kMaxPositiveDisplacement = (1u << 24) - 2 + kPcDisplacement; + static constexpr uint32_t kMaxNegativeDisplacement = (1u << 24) - kPcDisplacement; + + DISALLOW_COPY_AND_ASSIGN(Thumb2RelativePatcher); +}; + +} // namespace linker +} // namespace art + +#endif // ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_THUMB2_H_ diff --git a/compiler/linker/arm/relative_patcher_thumb2_test.cc b/compiler/linker/arm/relative_patcher_thumb2_test.cc new file mode 100644 index 000000000..a8078e304 --- /dev/null +++ b/compiler/linker/arm/relative_patcher_thumb2_test.cc @@ -0,0 +1,436 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "linker/relative_patcher_test.h" +#include "linker/arm/relative_patcher_thumb2.h" +#include "oat_quick_method_header.h" + +namespace art { +namespace linker { + +class Thumb2RelativePatcherTest : public RelativePatcherTest { + public: + Thumb2RelativePatcherTest() : RelativePatcherTest(kThumb2, "default") { } + + protected: + static const uint8_t kCallRawCode[]; + static const ArrayRef kCallCode; + static const uint8_t kNopRawCode[]; + static const ArrayRef kNopCode; + static const uint8_t kUnpatchedPcRelativeRawCode[]; + static const ArrayRef kUnpatchedPcRelativeCode; + static const uint32_t kPcInsnOffset; + + // Branches within range [-256, 256) can be created from these by adding the low 8 bits. + static constexpr uint32_t kBlPlus0 = 0xf000f800; + static constexpr uint32_t kBlMinus256 = 0xf7ffff00; + + // Special BL values. + static constexpr uint32_t kBlPlusMax = 0xf3ffd7ff; + static constexpr uint32_t kBlMinusMax = 0xf400d000; + + bool Create2MethodsWithGap(const ArrayRef& method1_code, + const ArrayRef& method1_patches, + const ArrayRef& method3_code, + const ArrayRef& method3_patches, + uint32_t distance_without_thunks) { + CHECK_EQ(distance_without_thunks % kArmAlignment, 0u); + const uint32_t method1_offset = + CompiledCode::AlignCode(kTrampolineSize, kThumb2) + sizeof(OatQuickMethodHeader); + AddCompiledMethod(MethodRef(1u), method1_code, method1_patches); + + // We want to put the method3 at a very precise offset. + const uint32_t method3_offset = method1_offset + distance_without_thunks; + CHECK_ALIGNED(method3_offset - sizeof(OatQuickMethodHeader), kArmAlignment); + + // Calculate size of method2 so that we put method3 at the correct place. + const uint32_t method2_offset = + CompiledCode::AlignCode(method1_offset + method1_code.size(), kThumb2) + + sizeof(OatQuickMethodHeader); + const uint32_t method2_size = (method3_offset - sizeof(OatQuickMethodHeader) - method2_offset); + std::vector method2_raw_code(method2_size); + ArrayRef method2_code(method2_raw_code); + AddCompiledMethod(MethodRef(2u), method2_code, ArrayRef()); + + AddCompiledMethod(MethodRef(3u), method3_code, method3_patches); + + Link(); + + // Check assumptions. + CHECK_EQ(GetMethodOffset(1), method1_offset); + CHECK_EQ(GetMethodOffset(2), method2_offset); + auto result3 = method_offset_map_.FindMethodOffset(MethodRef(3)); + CHECK(result3.first); + // There may be a thunk before method2. + if (result3.second == method3_offset + 1 /* thumb mode */) { + return false; // No thunk. + } else { + uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kThumb2); + CHECK_EQ(result3.second, method3_offset + aligned_thunk_size + 1 /* thumb mode */); + return true; // Thunk present. + } + } + + uint32_t GetMethodOffset(uint32_t method_idx) { + auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx)); + CHECK(result.first); + CHECK_NE(result.second & 1u, 0u); + return result.second - 1 /* thumb mode */; + } + + uint32_t ThunkSize() { + return static_cast(patcher_.get())->thunk_code_.size(); + } + + bool CheckThunk(uint32_t thunk_offset) { + Thumb2RelativePatcher* patcher = static_cast(patcher_.get()); + ArrayRef expected_code(patcher->thunk_code_); + if (output_.size() < thunk_offset + expected_code.size()) { + LOG(ERROR) << "output_.size() == " << output_.size() << " < " + << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size()); + return false; + } + ArrayRef linked_code(&output_[thunk_offset], expected_code.size()); + if (linked_code == expected_code) { + return true; + } + // Log failure info. + DumpDiff(expected_code, linked_code); + return false; + } + + std::vector GenNopsAndBl(size_t num_nops, uint32_t bl) { + std::vector result; + result.reserve(num_nops * 2u + 4u); + for (size_t i = 0; i != num_nops; ++i) { + result.push_back(0x00); + result.push_back(0xbf); + } + result.push_back(static_cast(bl >> 16)); + result.push_back(static_cast(bl >> 24)); + result.push_back(static_cast(bl)); + result.push_back(static_cast(bl >> 8)); + return result; + } + + void TestDexCacheReference(uint32_t dex_cache_arrays_begin, uint32_t element_offset); + void TestStringReference(uint32_t string_offset); + void CheckPcRelativePatch(const ArrayRef& patches, uint32_t target_offset); +}; + +const uint8_t Thumb2RelativePatcherTest::kCallRawCode[] = { + 0x00, 0xf0, 0x00, 0xf8 +}; + +const ArrayRef Thumb2RelativePatcherTest::kCallCode(kCallRawCode); + +const uint8_t Thumb2RelativePatcherTest::kNopRawCode[] = { + 0x00, 0xbf +}; + +const ArrayRef Thumb2RelativePatcherTest::kNopCode(kNopRawCode); + +const uint8_t Thumb2RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = { + 0x40, 0xf2, 0x00, 0x00, // MOVW r0, #0 (placeholder) + 0xc0, 0xf2, 0x00, 0x00, // MOVT r0, #0 (placeholder) + 0x78, 0x44, // ADD r0, pc +}; +const ArrayRef Thumb2RelativePatcherTest::kUnpatchedPcRelativeCode( + kUnpatchedPcRelativeRawCode); +const uint32_t Thumb2RelativePatcherTest::kPcInsnOffset = 8u; + +void Thumb2RelativePatcherTest::TestDexCacheReference(uint32_t dex_cache_arrays_begin, + uint32_t element_offset) { + dex_cache_arrays_begin_ = dex_cache_arrays_begin; + LinkerPatch patches[] = { + LinkerPatch::DexCacheArrayPatch(0u, nullptr, kPcInsnOffset, element_offset), + LinkerPatch::DexCacheArrayPatch(4u, nullptr, kPcInsnOffset, element_offset), + }; + CheckPcRelativePatch(ArrayRef(patches), + dex_cache_arrays_begin_ + element_offset); +} + +void Thumb2RelativePatcherTest::TestStringReference(uint32_t string_offset) { + constexpr uint32_t kStringIndex = 1u; + string_index_to_offset_map_.Put(kStringIndex, string_offset); + LinkerPatch patches[] = { + LinkerPatch::RelativeStringPatch(0u, nullptr, kPcInsnOffset, kStringIndex), + LinkerPatch::RelativeStringPatch(4u, nullptr, kPcInsnOffset, kStringIndex), + }; + CheckPcRelativePatch(ArrayRef(patches), string_offset); +} + +void Thumb2RelativePatcherTest::CheckPcRelativePatch(const ArrayRef& patches, + uint32_t target_offset) { + AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef(patches)); + Link(); + + uint32_t method1_offset = GetMethodOffset(1u); + uint32_t pc_base_offset = method1_offset + kPcInsnOffset + 4u /* PC adjustment */; + uint32_t diff = target_offset - pc_base_offset; + // Distribute the bits of the diff between the MOVW and MOVT: + uint32_t diffw = diff & 0xffffu; + uint32_t difft = diff >> 16; + uint32_t movw = 0xf2400000u | // MOVW r0, #0 (placeholder), + ((diffw & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, + ((diffw & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, + ((diffw & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, + ((diffw & 0x00ffu)); // keep imm8 at bits 0-7. + uint32_t movt = 0xf2c00000u | // MOVT r0, #0 (placeholder), + ((difft & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, + ((difft & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, + ((difft & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, + ((difft & 0x00ffu)); // keep imm8 at bits 0-7. + const uint8_t expected_code[] = { + static_cast(movw >> 16), static_cast(movw >> 24), + static_cast(movw >> 0), static_cast(movw >> 8), + static_cast(movt >> 16), static_cast(movt >> 24), + static_cast(movt >> 0), static_cast(movt >> 8), + 0x78, 0x44, + }; + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); +} + +TEST_F(Thumb2RelativePatcherTest, CallSelf) { + LinkerPatch patches[] = { + LinkerPatch::RelativeCodePatch(0u, nullptr, 1u), + }; + AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef(patches)); + Link(); + + static const uint8_t expected_code[] = { + 0xff, 0xf7, 0xfe, 0xff + }; + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); +} + +TEST_F(Thumb2RelativePatcherTest, CallOther) { + LinkerPatch method1_patches[] = { + LinkerPatch::RelativeCodePatch(0u, nullptr, 2u), + }; + AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef(method1_patches)); + LinkerPatch method2_patches[] = { + LinkerPatch::RelativeCodePatch(0u, nullptr, 1u), + }; + AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef(method2_patches)); + Link(); + + uint32_t method1_offset = GetMethodOffset(1u); + uint32_t method2_offset = GetMethodOffset(2u); + uint32_t diff_after = method2_offset - (method1_offset + 4u /* PC adjustment */); + ASSERT_EQ(diff_after & 1u, 0u); + ASSERT_LT(diff_after >> 1, 1u << 8); // Simple encoding, (diff_after >> 1) fits into 8 bits. + static const uint8_t method1_expected_code[] = { + 0x00, 0xf0, static_cast(diff_after >> 1), 0xf8 + }; + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(method1_expected_code))); + uint32_t diff_before = method1_offset - (method2_offset + 4u /* PC adjustment */); + ASSERT_EQ(diff_before & 1u, 0u); + ASSERT_GE(diff_before, -1u << 9); // Simple encoding, -256 <= (diff >> 1) < 0. + auto method2_expected_code = GenNopsAndBl(0u, kBlMinus256 | ((diff_before >> 1) & 0xffu)); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef(method2_expected_code))); +} + +TEST_F(Thumb2RelativePatcherTest, CallTrampoline) { + LinkerPatch patches[] = { + LinkerPatch::RelativeCodePatch(0u, nullptr, 2u), + }; + AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef(patches)); + Link(); + + uint32_t method1_offset = GetMethodOffset(1u); + uint32_t diff = kTrampolineOffset - (method1_offset + 4u); + ASSERT_EQ(diff & 1u, 0u); + ASSERT_GE(diff, -1u << 9); // Simple encoding, -256 <= (diff >> 1) < 0 (checked as unsigned). + auto expected_code = GenNopsAndBl(0u, kBlMinus256 | ((diff >> 1) & 0xffu)); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); +} + +TEST_F(Thumb2RelativePatcherTest, CallTrampolineTooFar) { + constexpr uint32_t missing_method_index = 1024u; + auto method3_raw_code = GenNopsAndBl(3u, kBlPlus0); + constexpr uint32_t bl_offset_in_method3 = 3u * 2u; // After NOPs. + ArrayRef method3_code(method3_raw_code); + ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size()); + LinkerPatch method3_patches[] = { + LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, missing_method_index), + }; + + constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */; + bool thunk_in_gap = Create2MethodsWithGap(kNopCode, + ArrayRef(), + method3_code, + ArrayRef(method3_patches), + just_over_max_negative_disp - bl_offset_in_method3); + ASSERT_FALSE(thunk_in_gap); // There should be a thunk but it should be after the method2. + ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first); + + // Check linked code. + uint32_t method3_offset = GetMethodOffset(3u); + uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2); + uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */); + ASSERT_EQ(diff & 1u, 0u); + ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits. + auto expected_code = GenNopsAndBl(3u, kBlPlus0 | ((diff >> 1) & 0xffu)); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef(expected_code))); + EXPECT_TRUE(CheckThunk(thunk_offset)); +} + +TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarAfter) { + auto method1_raw_code = GenNopsAndBl(3u, kBlPlus0); + constexpr uint32_t bl_offset_in_method1 = 3u * 2u; // After NOPs. + ArrayRef method1_code(method1_raw_code); + ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size()); + LinkerPatch method1_patches[] = { + LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, 3u), + }; + + constexpr uint32_t max_positive_disp = 16 * MB - 2u + 4u /* PC adjustment */; + bool thunk_in_gap = Create2MethodsWithGap(method1_code, + ArrayRef(method1_patches), + kNopCode, + ArrayRef(), + bl_offset_in_method1 + max_positive_disp); + ASSERT_FALSE(thunk_in_gap); // There should be no thunk. + + // Check linked code. + auto expected_code = GenNopsAndBl(3u, kBlPlusMax); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); +} + +TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarBefore) { + auto method3_raw_code = GenNopsAndBl(2u, kBlPlus0); + constexpr uint32_t bl_offset_in_method3 = 2u * 2u; // After NOPs. + ArrayRef method3_code(method3_raw_code); + ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size()); + LinkerPatch method3_patches[] = { + LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, 1u), + }; + + constexpr uint32_t just_over_max_negative_disp = 16 * MB - 4u /* PC adjustment */; + bool thunk_in_gap = Create2MethodsWithGap(kNopCode, + ArrayRef(), + method3_code, + ArrayRef(method3_patches), + just_over_max_negative_disp - bl_offset_in_method3); + ASSERT_FALSE(thunk_in_gap); // There should be no thunk. + + // Check linked code. + auto expected_code = GenNopsAndBl(2u, kBlMinusMax); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef(expected_code))); +} + +TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) { + auto method1_raw_code = GenNopsAndBl(2u, kBlPlus0); + constexpr uint32_t bl_offset_in_method1 = 2u * 2u; // After NOPs. + ArrayRef method1_code(method1_raw_code); + ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size()); + LinkerPatch method1_patches[] = { + LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, 3u), + }; + + constexpr uint32_t just_over_max_positive_disp = 16 * MB + 4u /* PC adjustment */; + bool thunk_in_gap = Create2MethodsWithGap(method1_code, + ArrayRef(method1_patches), + kNopCode, + ArrayRef(), + bl_offset_in_method1 + just_over_max_positive_disp); + ASSERT_TRUE(thunk_in_gap); + + uint32_t method1_offset = GetMethodOffset(1u); + uint32_t method3_offset = GetMethodOffset(3u); + uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader); + ASSERT_TRUE(IsAligned(method3_header_offset)); + uint32_t thunk_offset = method3_header_offset - CompiledCode::AlignCode(ThunkSize(), kThumb2); + ASSERT_TRUE(IsAligned(thunk_offset)); + uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1 + 4u /* PC adjustment */); + ASSERT_EQ(diff & 1u, 0u); + ASSERT_GE(diff, 16 * MB - (1u << 9)); // Simple encoding, unknown bits fit into the low 8 bits. + auto expected_code = GenNopsAndBl(2u, 0xf3ffd700 | ((diff >> 1) & 0xffu)); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); + CheckThunk(thunk_offset); +} + +TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) { + auto method3_raw_code = GenNopsAndBl(3u, kBlPlus0); + constexpr uint32_t bl_offset_in_method3 = 3u * 2u; // After NOPs. + ArrayRef method3_code(method3_raw_code); + ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size()); + LinkerPatch method3_patches[] = { + LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, 1u), + }; + + constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */; + bool thunk_in_gap = Create2MethodsWithGap(kNopCode, + ArrayRef(), + method3_code, + ArrayRef(method3_patches), + just_over_max_negative_disp - bl_offset_in_method3); + ASSERT_FALSE(thunk_in_gap); // There should be a thunk but it should be after the method2. + + // Check linked code. + uint32_t method3_offset = GetMethodOffset(3u); + uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2); + uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */); + ASSERT_EQ(diff & 1u, 0u); + ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits. + auto expected_code = GenNopsAndBl(3u, kBlPlus0 | ((diff >> 1) & 0xffu)); + EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef(expected_code))); + EXPECT_TRUE(CheckThunk(thunk_offset)); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference1) { + TestDexCacheReference(0x00ff0000u, 0x00fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference2) { + TestDexCacheReference(0x02ff0000u, 0x05fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference3) { + TestDexCacheReference(0x08ff0000u, 0x08fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, DexCacheReference4) { + TestDexCacheReference(0xd0ff0000u, 0x60fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, StringReference1) { + TestStringReference(0x00ff00fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, StringReference2) { + TestStringReference(0x02ff05fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, StringReference3) { + TestStringReference(0x08ff08fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +TEST_F(Thumb2RelativePatcherTest, StringReference4) { + TestStringReference(0xd0ff60fcu); + ASSERT_LT(GetMethodOffset(1u), 0xfcu); +} + +} // namespace linker +} // namespace art diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc new file mode 100644 index 000000000..b4ecbd8c5 --- /dev/null +++ b/compiler/linker/arm64/relative_patcher_arm64.cc @@ -0,0 +1,355 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "linker/arm64/relative_patcher_arm64.h" + +#include "arch/arm64/instruction_set_features_arm64.h" +#include "art_method.h" +#include "compiled_method.h" +#include "driver/compiler_driver.h" +#include "linker/output_stream.h" +#include "oat.h" +#include "oat_quick_method_header.h" +#include "utils/arm64/assembler_arm64.h" + +namespace art { +namespace linker { + +namespace { + +inline bool IsAdrpPatch(const LinkerPatch& patch) { + LinkerPatch::Type type = patch.GetType(); + return + (type == LinkerPatch::Type::kStringRelative || type == LinkerPatch::Type::kDexCacheArray) && + patch.LiteralOffset() == patch.PcInsnOffset(); +} + +} // anonymous namespace + +Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider, + const Arm64InstructionSetFeatures* features) + : ArmBaseRelativePatcher(provider, kArm64, CompileThunkCode(), + kMaxPositiveDisplacement, kMaxNegativeDisplacement), + fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()), + reserved_adrp_thunks_(0u), + processed_adrp_thunks_(0u) { + if (fix_cortex_a53_843419_) { + adrp_thunk_locations_.reserve(16u); + current_method_thunks_.reserve(16u * kAdrpThunkSize); + } +} + +uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset, + const CompiledMethod* compiled_method, + MethodReference method_ref) { + if (!fix_cortex_a53_843419_) { + DCHECK(adrp_thunk_locations_.empty()); + return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u); + } + + // Add thunks for previous method if any. + if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { + size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; + offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; + reserved_adrp_thunks_ = adrp_thunk_locations_.size(); + } + + // Count the number of ADRP insns as the upper bound on the number of thunks needed + // and use it to reserve space for other linker patches. + size_t num_adrp = 0u; + DCHECK(compiled_method != nullptr); + for (const LinkerPatch& patch : compiled_method->GetPatches()) { + if (IsAdrpPatch(patch)) { + ++num_adrp; + } + } + offset = ReserveSpaceInternal(offset, compiled_method, method_ref, kAdrpThunkSize * num_adrp); + if (num_adrp == 0u) { + return offset; + } + + // Now that we have the actual offset where the code will be placed, locate the ADRP insns + // that actually require the thunk. + uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader); + ArrayRef code = compiled_method->GetQuickCode(); + uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size()); + DCHECK(compiled_method != nullptr); + for (const LinkerPatch& patch : compiled_method->GetPatches()) { + if (IsAdrpPatch(patch)) { + uint32_t patch_offset = quick_code_offset + patch.LiteralOffset(); + if (NeedsErratum843419Thunk(code, patch.LiteralOffset(), patch_offset)) { + adrp_thunk_locations_.emplace_back(patch_offset, thunk_offset); + thunk_offset += kAdrpThunkSize; + } + } + } + return offset; +} + +uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) { + if (!fix_cortex_a53_843419_) { + DCHECK(adrp_thunk_locations_.empty()); + } else { + // Add thunks for the last method if any. + if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { + size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; + offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; + reserved_adrp_thunks_ = adrp_thunk_locations_.size(); + } + } + return ArmBaseRelativePatcher::ReserveSpaceEnd(offset); +} + +uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { + if (fix_cortex_a53_843419_) { + if (!current_method_thunks_.empty()) { + uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64); + if (kIsDebugBuild) { + CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); + size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; + CHECK_LE(num_thunks, processed_adrp_thunks_); + for (size_t i = 0u; i != num_thunks; ++i) { + const auto& entry = adrp_thunk_locations_[processed_adrp_thunks_ - num_thunks + i]; + CHECK_EQ(entry.second, aligned_offset + i * kAdrpThunkSize); + } + } + uint32_t aligned_code_delta = aligned_offset - offset; + if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { + return 0u; + } + if (!WriteMiscThunk(out, ArrayRef(current_method_thunks_))) { + return 0u; + } + offset = aligned_offset + current_method_thunks_.size(); + current_method_thunks_.clear(); + } + } + return ArmBaseRelativePatcher::WriteThunks(out, offset); +} + +void Arm64RelativePatcher::PatchCall(std::vector* code, + uint32_t literal_offset, + uint32_t patch_offset, uint32_t + target_offset) { + DCHECK_LE(literal_offset + 4u, code->size()); + DCHECK_EQ(literal_offset & 3u, 0u); + DCHECK_EQ(patch_offset & 3u, 0u); + DCHECK_EQ(target_offset & 3u, 0u); + uint32_t displacement = CalculateDisplacement(patch_offset, target_offset & ~1u); + DCHECK_EQ(displacement & 3u, 0u); + DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u); // 28-bit signed. + uint32_t insn = (displacement & 0x0fffffffu) >> 2; + insn |= 0x94000000; // BL + + // Check that we're just overwriting an existing BL. + DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u); + // Write the new BL. + SetInsn(code, literal_offset, insn); +} + +void Arm64RelativePatcher::PatchPcRelativeReference(std::vector* code, + const LinkerPatch& patch, + uint32_t patch_offset, + uint32_t target_offset) { + DCHECK_EQ(patch_offset & 3u, 0u); + DCHECK_EQ(target_offset & 3u, 0u); + uint32_t literal_offset = patch.LiteralOffset(); + uint32_t insn = GetInsn(code, literal_offset); + uint32_t pc_insn_offset = patch.PcInsnOffset(); + uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu); + bool wide = (insn & 0x40000000) != 0; + uint32_t shift = wide ? 3u : 2u; + if (literal_offset == pc_insn_offset) { + // Check it's an ADRP with imm == 0 (unset). + DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u) + << literal_offset << ", " << pc_insn_offset << ", 0x" << std::hex << insn; + if (fix_cortex_a53_843419_ && processed_adrp_thunks_ != adrp_thunk_locations_.size() && + adrp_thunk_locations_[processed_adrp_thunks_].first == patch_offset) { + DCHECK(NeedsErratum843419Thunk(ArrayRef(*code), + literal_offset, patch_offset)); + uint32_t thunk_offset = adrp_thunk_locations_[processed_adrp_thunks_].second; + uint32_t adrp_disp = target_offset - (thunk_offset & ~0xfffu); + uint32_t adrp = PatchAdrp(insn, adrp_disp); + + uint32_t out_disp = thunk_offset - patch_offset; + DCHECK_EQ(out_disp & 3u, 0u); + DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed. + insn = (out_disp & 0x0fffffffu) >> shift; + insn |= 0x14000000; // B + + uint32_t back_disp = -out_disp; + DCHECK_EQ(back_disp & 3u, 0u); + DCHECK((back_disp >> 27) == 0u || (back_disp >> 27) == 31u); // 28-bit signed. + uint32_t b_back = (back_disp & 0x0fffffffu) >> 2; + b_back |= 0x14000000; // B + size_t thunks_code_offset = current_method_thunks_.size(); + current_method_thunks_.resize(thunks_code_offset + kAdrpThunkSize); + SetInsn(¤t_method_thunks_, thunks_code_offset, adrp); + SetInsn(¤t_method_thunks_, thunks_code_offset + 4u, b_back); + static_assert(kAdrpThunkSize == 2 * 4u, "thunk has 2 instructions"); + + processed_adrp_thunks_ += 1u; + } else { + insn = PatchAdrp(insn, disp); + } + // Write the new ADRP (or B to the erratum 843419 thunk). + SetInsn(code, literal_offset, insn); + } else { + if ((insn & 0xfffffc00) == 0x91000000) { + // ADD immediate, 64-bit with imm12 == 0 (unset). + DCHECK(patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType(); + shift = 0u; // No shift for ADD. + } else { + // LDR 32-bit or 64-bit with imm12 == 0 (unset). + DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray) << patch.GetType(); + DCHECK_EQ(insn & 0xbffffc00, 0xb9400000) << std::hex << insn; + } + if (kIsDebugBuild) { + uint32_t adrp = GetInsn(code, pc_insn_offset); + if ((adrp & 0x9f000000u) != 0x90000000u) { + CHECK(fix_cortex_a53_843419_); + CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B + CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); + size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; + CHECK_LE(num_thunks, processed_adrp_thunks_); + uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset; + for (size_t i = processed_adrp_thunks_ - num_thunks; ; ++i) { + CHECK_NE(i, processed_adrp_thunks_); + if (adrp_thunk_locations_[i].first == b_offset) { + size_t idx = num_thunks - (processed_adrp_thunks_ - i); + adrp = GetInsn(¤t_method_thunks_, idx * kAdrpThunkSize); + break; + } + } + } + CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points + 0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register. + } + uint32_t imm12 = (disp & 0xfffu) >> shift; + insn = (insn & ~(0xfffu << 10)) | (imm12 << 10); + SetInsn(code, literal_offset, insn); + } +} + +std::vector Arm64RelativePatcher::CompileThunkCode() { + // The thunk just uses the entry point in the ArtMethod. This works even for calls + // to the generic JNI and interpreter trampolines. + ArenaPool pool; + ArenaAllocator arena(&pool); + arm64::Arm64Assembler assembler(&arena); + Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64PointerSize).Int32Value()); + assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); + // Ensure we emit the literal pool. + assembler.FinalizeCode(); + std::vector thunk_code(assembler.CodeSize()); + MemoryRegion code(thunk_code.data(), thunk_code.size()); + assembler.FinalizeInstructions(code); + return thunk_code; +} + +uint32_t Arm64RelativePatcher::PatchAdrp(uint32_t adrp, uint32_t disp) { + return (adrp & 0x9f00001fu) | // Clear offset bits, keep ADRP with destination reg. + // Bottom 12 bits are ignored, the next 2 lowest bits are encoded in bits 29-30. + ((disp & 0x00003000u) << (29 - 12)) | + // The next 16 bits are encoded in bits 5-22. + ((disp & 0xffffc000u) >> (12 + 2 - 5)) | + // Since the target_offset is based on the beginning of the oat file and the + // image space precedes the oat file, the target_offset into image space will + // be negative yet passed as uint32_t. Therefore we limit the displacement + // to +-2GiB (rather than the maximim +-4GiB) and determine the sign bit from + // the highest bit of the displacement. This is encoded in bit 23. + ((disp & 0x80000000u) >> (31 - 23)); +} + +bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef code, + uint32_t literal_offset, + uint32_t patch_offset) { + DCHECK_EQ(patch_offset & 0x3u, 0u); + if ((patch_offset & 0xff8) == 0xff8) { // ...ff8 or ...ffc + uint32_t adrp = GetInsn(code, literal_offset); + DCHECK_EQ(adrp & 0x9f000000, 0x90000000); + uint32_t next_offset = patch_offset + 4u; + uint32_t next_insn = GetInsn(code, literal_offset + 4u); + + // Below we avoid patching sequences where the adrp is followed by a load which can easily + // be proved to be aligned. + + // First check if the next insn is the LDR using the result of the ADRP. + // LDR , [, #pimm], where == ADRP destination reg. + if ((next_insn & 0xffc00000) == 0xb9400000 && + (((next_insn >> 5) ^ adrp) & 0x1f) == 0) { + return false; + } + + // And since LinkerPatch::Type::kStringRelative is using the result of the ADRP + // for an ADD immediate, check for that as well. We generalize a bit to include + // ADD/ADDS/SUB/SUBS immediate that either uses the ADRP destination or stores + // the result to a different register. + if ((next_insn & 0x1f000000) == 0x11000000 && + ((((next_insn >> 5) ^ adrp) & 0x1f) == 0 || ((next_insn ^ adrp) & 0x1f) != 0)) { + return false; + } + + // LDR ,