pax_global_header00006660000000000000000000000064133657725230014527gustar00rootroot0000000000000052 comment=622cdc20792accc408618ad1bb7a60c40df71de2 android-platform-art-8.1.0+r23/000077500000000000000000000000001336577252300161655ustar00rootroot00000000000000android-platform-art-8.1.0+r23/.gitignore000066400000000000000000000000321336577252300201500ustar00rootroot00000000000000JIT_ART **/__pycache__/** android-platform-art-8.1.0+r23/Android.bp000066400000000000000000000014471336577252300200760ustar00rootroot00000000000000// TODO: These should be handled with transitive static library dependencies art_static_dependencies = [ // Note: the order is important because of static linking resolution. "libziparchive", "libnativehelper", "libnativebridge", "libnativeloader", "libsigchain_dummy", "liblog", "libz", "libbacktrace", "libcutils", "libunwindbacktrace", "libutils", "libbase", "liblz4", "liblzma", ] subdirs = [ "benchmark", "build", "cmdline", "compiler", "dalvikvm", "dex2oat", "dexdump", "dexlayout", "dexlist", "dexoptanalyzer", "disassembler", "imgdiag", "oatdump", "patchoat", "profman", "runtime", "sigchainlib", "test", "tools/cpp-define-generator", "tools/dmtracedump", ] android-platform-art-8.1.0+r23/Android.mk000066400000000000000000000540351336577252300201050ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # LOCAL_PATH := $(call my-dir) art_path := $(LOCAL_PATH) ######################################################################## # clean-oat rules # include $(art_path)/build/Android.common_path.mk include $(art_path)/build/Android.oat.mk # Following the example of build's dont_bother for clean targets. art_dont_bother := false ifneq (,$(filter clean-oat%,$(MAKECMDGOALS))) art_dont_bother := true endif # Don't bother with tests unless there is a test-art*, build-art*, or related target. art_test_bother := false ifneq (,$(filter tests test-art% valgrind-test-art% build-art% checkbuild,$(MAKECMDGOALS))) art_test_bother := true endif .PHONY: clean-oat clean-oat: clean-oat-host clean-oat-target .PHONY: clean-oat-host clean-oat-host: find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f ifneq ($(TMPDIR),) rm -rf $(TMPDIR)/$(USER)/test-*/dalvik-cache/* rm -rf $(TMPDIR)/android-data/dalvik-cache/* else rm -rf /tmp/$(USER)/test-*/dalvik-cache/* rm -rf /tmp/android-data/dalvik-cache/* endif .PHONY: clean-oat-target clean-oat-target: adb root adb wait-for-device remount adb shell rm -rf $(ART_TARGET_NATIVETEST_DIR) adb shell rm -rf $(ART_TARGET_TEST_DIR) adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/*/* adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$(DEX2OAT_TARGET_ARCH) adb shell rm -rf system/app/$(DEX2OAT_TARGET_ARCH) ifdef TARGET_2ND_ARCH adb shell rm -rf $(DEXPREOPT_BOOT_JAR_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) adb shell rm -rf system/app/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) endif adb shell rm -rf data/run-test/test-*/dalvik-cache/* ifneq ($(art_dont_bother),true) ######################################################################## # cpplint rules to style check art source files include $(art_path)/build/Android.cpplint.mk ######################################################################## # product rules include $(art_path)/oatdump/Android.mk include $(art_path)/tools/Android.mk include $(art_path)/tools/ahat/Android.mk include $(art_path)/tools/dexfuzz/Android.mk include $(art_path)/libart_fake/Android.mk ART_HOST_DEPENDENCIES := \ $(ART_HOST_EXECUTABLES) \ $(ART_HOST_DEX_DEPENDENCIES) \ $(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) ifeq ($(ART_BUILD_HOST_DEBUG),true) ART_HOST_DEPENDENCIES += $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) endif ART_TARGET_DEPENDENCIES := \ $(ART_TARGET_EXECUTABLES) \ $(ART_TARGET_DEX_DEPENDENCIES) \ $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES) ifeq ($(ART_BUILD_TARGET_DEBUG),true) ART_TARGET_DEPENDENCIES += $(ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES) endif ######################################################################## # test rules ifeq ($(art_test_bother),true) # All the dependencies that must be built ahead of sync-ing them onto the target device. TEST_ART_TARGET_SYNC_DEPS := include $(art_path)/build/Android.common_test.mk include $(art_path)/build/Android.gtest.mk include $(art_path)/test/Android.run-test.mk TEST_ART_ADB_ROOT_AND_REMOUNT := \ (adb root && \ adb wait-for-device remount && \ ((adb shell touch /system/testfile && \ (adb shell rm /system/testfile || true)) || \ (adb disable-verity && \ adb reboot && \ adb wait-for-device root && \ adb wait-for-device remount))) # Sync test files to the target, depends upon all things that must be pushed to the target. .PHONY: test-art-target-sync # Check if we need to sync. In case ART_TEST_ANDROID_ROOT is not empty, # the code below uses 'adb push' instead of 'adb sync', which does not # check if the files on the device have changed. ifneq ($(ART_TEST_NO_SYNC),true) ifeq ($(ART_TEST_ANDROID_ROOT),) test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) $(TEST_ART_ADB_ROOT_AND_REMOUNT) adb sync system && adb sync data else test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS) $(TEST_ART_ADB_ROOT_AND_REMOUNT) adb wait-for-device push $(ANDROID_PRODUCT_OUT)/system $(ART_TEST_ANDROID_ROOT) # Push the contents of the `data` dir into `/data` on the device. If # `/data` already exists on the device, it is not overwritten, but its # contents are updated. adb push $(ANDROID_PRODUCT_OUT)/data / endif endif # "mm test-art" to build and run all tests on host and device .PHONY: test-art test-art: test-art-host test-art-target $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-gtest test-art-gtest: test-art-host-gtest test-art-target-gtest $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-run-test test-art-run-test: test-art-host-run-test test-art-target-run-test $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) ######################################################################## # host test rules VIXL_TEST_DEPENDENCY := # We can only run the vixl tests on 64-bit hosts (vixl testing issue) when its a # top-level build (to declare the vixl test rule). ifneq ($(HOST_PREFER_32_BIT),true) ifeq ($(ONE_SHOT_MAKEFILE),) VIXL_TEST_DEPENDENCY := run-vixl-tests endif endif .PHONY: test-art-host-vixl test-art-host-vixl: $(VIXL_TEST_DEPENDENCY) # "mm test-art-host" to build and run all host tests. .PHONY: test-art-host test-art-host: test-art-host-gtest test-art-host-run-test \ test-art-host-vixl test-art-host-dexdump $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All host tests that run solely with the default compiler. .PHONY: test-art-host-default test-art-host-default: test-art-host-run-test-default $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All host tests that run solely with the optimizing compiler. .PHONY: test-art-host-optimizing test-art-host-optimizing: test-art-host-run-test-optimizing $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All host tests that run solely on the interpreter. .PHONY: test-art-host-interpreter test-art-host-interpreter: test-art-host-run-test-interpreter $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All host tests that run solely on the jit. .PHONY: test-art-host-jit test-art-host-jit: test-art-host-run-test-jit $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # Primary host architecture variants: .PHONY: test-art-host$(ART_PHONY_TEST_HOST_SUFFIX) test-art-host$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(ART_PHONY_TEST_HOST_SUFFIX) \ test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX) test-art-host-default$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) test-art-host-optimizing$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) test-art-host-interpreter$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX) test-art-host-jit$(ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # Secondary host architecture variants: ifneq ($(HOST_PREFER_32_BIT),true) .PHONY: test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX) test-art-host$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-gtest$(2ND_ART_PHONY_TEST_HOST_SUFFIX) \ test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) test-art-host-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) test-art-host-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) test-art-host-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX) test-art-host-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX): test-art-host-run-test-jit$(2ND_ART_PHONY_TEST_HOST_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) endif # Dexdump/list regression test. .PHONY: test-art-host-dexdump test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist) ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests # Valgrind. .PHONY: valgrind-test-art-host valgrind-test-art-host: valgrind-test-art-host-gtest $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: valgrind-test-art-host32 valgrind-test-art-host32: valgrind-test-art-host-gtest32 $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: valgrind-test-art-host64 valgrind-test-art-host64: valgrind-test-art-host-gtest64 $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) ######################################################################## # target test rules # "mm test-art-target" to build and run all target tests. .PHONY: test-art-target test-art-target: test-art-target-gtest test-art-target-run-test $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All target tests that run solely with the default compiler. .PHONY: test-art-target-default test-art-target-default: test-art-target-run-test-default $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All target tests that run solely with the optimizing compiler. .PHONY: test-art-target-optimizing test-art-target-optimizing: test-art-target-run-test-optimizing $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All target tests that run solely on the interpreter. .PHONY: test-art-target-interpreter test-art-target-interpreter: test-art-target-run-test-interpreter $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # All target tests that run solely on the jit. .PHONY: test-art-target-jit test-art-target-jit: test-art-target-run-test-jit $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # Primary target architecture variants: .PHONY: test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(ART_PHONY_TEST_TARGET_SUFFIX) \ test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-default$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-jit$(ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) # Secondary target architecture variants: ifdef TARGET_2ND_ARCH .PHONY: test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) test-art-target$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-gtest$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) \ test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX) $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) endif # Valgrind. .PHONY: valgrind-test-art-target valgrind-test-art-target: valgrind-test-art-target-gtest $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: valgrind-test-art-target32 valgrind-test-art-target32: valgrind-test-art-target-gtest32 $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) .PHONY: valgrind-test-art-target64 valgrind-test-art-target64: valgrind-test-art-target-gtest64 $(hide) $(call ART_TEST_PREREQ_FINISHED,$@) endif # art_test_bother ####################### # Fake packages for ART # The art-runtime package depends on the core ART libraries and binaries. It exists so we can # manipulate the set of things shipped, e.g., add debug versions and so on. include $(CLEAR_VARS) LOCAL_MODULE := art-runtime # Base requirements. LOCAL_REQUIRED_MODULES := \ dalvikvm \ dex2oat \ dexoptanalyzer \ libart \ libart-compiler \ libopenjdkjvm \ libopenjdkjvmti \ patchoat \ profman \ # For nosy apps, we provide a fake library that avoids namespace issues and gives some warnings. LOCAL_REQUIRED_MODULES += libart_fake # Potentially add in debug variants: # # * We will never add them if PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD = false. # * We will always add them if PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD = true. # * Otherwise, we will add them by default to userdebug and eng builds. art_target_include_debug_build := $(PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD) ifneq (false,$(art_target_include_debug_build)) ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT))) art_target_include_debug_build := true endif ifeq (true,$(art_target_include_debug_build)) LOCAL_REQUIRED_MODULES += \ dex2oatd \ dexoptanalyzerd \ libartd \ libartd-compiler \ libopenjdkd \ libopenjdkjvmd \ libopenjdkjvmtid \ patchoatd \ profmand \ endif endif include $(BUILD_PHONY_PACKAGE) # The art-tools package depends on helpers and tools that are useful for developers and on-device # investigations. include $(CLEAR_VARS) LOCAL_MODULE := art-tools LOCAL_REQUIRED_MODULES := \ ahat \ dexdiag \ dexdump \ dexlist \ hprof-conv \ oatdump \ include $(BUILD_PHONY_PACKAGE) #################################################################################################### # Fake packages to ensure generation of libopenjdkd when one builds with mm/mmm/mmma. # # The library is required for starting a runtime in debug mode, but libartd does not depend on it # (dependency cycle otherwise). # # Note: * As the package is phony to create a dependency the package name is irrelevant. # * We make MULTILIB explicit to "both," just to state here that we want both libraries on # 64-bit systems, even if it is the default. # ART on the host. ifeq ($(ART_BUILD_HOST_DEBUG),true) include $(CLEAR_VARS) LOCAL_MODULE := art-libartd-libopenjdkd-host-dependency LOCAL_MULTILIB := both LOCAL_REQUIRED_MODULES := libopenjdkd LOCAL_IS_HOST_MODULE := true include $(BUILD_PHONY_PACKAGE) endif # ART on the target. ifeq ($(ART_BUILD_TARGET_DEBUG),true) include $(CLEAR_VARS) LOCAL_MODULE := art-libartd-libopenjdkd-target-dependency LOCAL_MULTILIB := both LOCAL_REQUIRED_MODULES := libopenjdkd include $(BUILD_PHONY_PACKAGE) endif ######################################################################## # "m build-art" for quick minimal build .PHONY: build-art build-art: build-art-host build-art-target .PHONY: build-art-host build-art-host: $(HOST_OUT_EXECUTABLES)/art $(ART_HOST_DEPENDENCIES) $(HOST_CORE_IMG_OUTS) .PHONY: build-art-target build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TARGET_CORE_IMG_OUTS) ######################################################################## # Phony target for only building what go/lem requires on target. .PHONY: build-art-target-golem # Also include libartbenchmark, we always include it when running golem. # libstdc++ is needed when building for ART_TARGET_LINUX. ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so build-art-target-golem: dex2oat dalvikvm patchoat linker libstdc++ \ $(TARGET_OUT_EXECUTABLES)/art \ $(TARGET_OUT)/etc/public.libraries.txt \ $(ART_TARGET_DEX_DEPENDENCIES) \ $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES) \ $(ART_TARGET_SHARED_LIBRARY_BENCHMARK) \ $(TARGET_CORE_IMG_OUT_BASE).art \ $(TARGET_CORE_IMG_OUT_BASE)-interpreter.art sed -i '/libartd.so/d' $(TARGET_OUT)/etc/public.libraries.txt # remove libartd.so from public.libraries.txt because golem builds won't have it. ######################################################################## # Phony target for building what go/lem requires on host. .PHONY: build-art-host-golem # Also include libartbenchmark, we always include it when running golem. ART_HOST_SHARED_LIBRARY_BENCHMARK := $(ART_HOST_OUT_SHARED_LIBRARIES)/libartbenchmark.so build-art-host-golem: build-art-host \ $(ART_HOST_SHARED_LIBRARY_BENCHMARK) ######################################################################## # Rules for building all dependencies for tests. .PHONY: build-art-host-tests build-art-host-tests: build-art-host $(TEST_ART_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) $(ART_TEST_HOST_GTEST_DEPENDENCIES) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES) .PHONY: build-art-target-tests build-art-target-tests: build-art-target $(TEST_ART_RUN_TEST_DEPENDENCIES) $(TEST_ART_TARGET_SYNC_DEPS) | $(TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES) ######################################################################## # targets to switch back and forth from libdvm to libart .PHONY: use-art use-art: adb root adb wait-for-device shell stop adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so adb shell start .PHONY: use-artd use-artd: adb root adb wait-for-device shell stop adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so adb shell start .PHONY: use-dalvik use-dalvik: adb root adb wait-for-device shell stop adb shell setprop persist.sys.dalvik.vm.lib.2 libdvm.so adb shell start .PHONY: use-art-full use-art-full: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter \"\" adb shell setprop dalvik.vm.image-dex2oat-filter \"\" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-artd-full use-artd-full: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter \"\" adb shell setprop dalvik.vm.image-dex2oat-filter \"\" adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-art-jit use-art-jit: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "verify-at-runtime" adb shell setprop dalvik.vm.image-dex2oat-filter "verify-at-runtime" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so adb shell setprop dalvik.vm.usejit true adb shell start .PHONY: use-art-interpret-only use-art-interpret-only: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-artd-interpret-only use-artd-interpret-only: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "interpret-only" adb shell setprop dalvik.vm.image-dex2oat-filter "interpret-only" adb shell setprop persist.sys.dalvik.vm.lib.2 libartd.so adb shell setprop dalvik.vm.usejit false adb shell start .PHONY: use-art-verify-none use-art-verify-none: adb root adb wait-for-device shell stop adb shell rm -rf $(ART_TARGET_DALVIK_CACHE_DIR)/* adb shell setprop dalvik.vm.dex2oat-filter "verify-none" adb shell setprop dalvik.vm.image-dex2oat-filter "verify-none" adb shell setprop persist.sys.dalvik.vm.lib.2 libart.so adb shell setprop dalvik.vm.usejit false adb shell start ######################################################################## endif # !art_dont_bother # Clear locally used variables. art_dont_bother := art_test_bother := TEST_ART_TARGET_SYNC_DEPS := # Helper target that depends on boot image creation. # # Can be used, for example, to dump initialization failures: # m art-boot-image ART_BOOT_IMAGE_EXTRA_ARGS=--dump-init-failures=fails.txt .PHONY: art-boot-image art-boot-image: $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) .PHONY: art-job-images art-job-images: \ $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \ $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) \ $(HOST_OUT_EXECUTABLES)/dex2oats \ $(HOST_OUT_EXECUTABLES)/dex2oatds \ $(HOST_OUT_EXECUTABLES)/profman android-platform-art-8.1.0+r23/CleanSpec.mk000066400000000000000000000050351336577252300203560ustar00rootroot00000000000000# Copyright (C) 2007 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # If you don't need to do a full clean build but would like to touch # a file or delete some intermediate files, add a clean step to the end # of the list. These steps will only be run once, if they haven't been # run before. # # E.g.: # $(call add-clean-step, touch -c external/sqlite/sqlite3.h) # $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates) # # Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with # files that are missing or have been moved. # # Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory. # Use $(OUT_DIR) to refer to the "out" directory. # # If you need to re-do something that's already mentioned, just copy # the command and add it to the bottom of the list. E.g., if a change # that you made last week required touching a file and a change you # made today requires touching the same file, just copy the old # touch step and add it to the end of the list. # # ************************************************ # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST # ************************************************ # For example: #$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates) #$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates) #$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f) #$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*) # Switching to jemalloc requires deleting these files. $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libart_*) $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libartd_*) $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libart_*) $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libartd_*) # ************************************************ # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST # ************************************************ android-platform-art-8.1.0+r23/MODULE_LICENSE_APACHE2000066400000000000000000000000001336577252300213100ustar00rootroot00000000000000android-platform-art-8.1.0+r23/NOTICE000066400000000000000000000334521336577252300171000ustar00rootroot00000000000000 Copyright (c) 2005-2013, The Android Open Source Project Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS ------------------------------------------------------------------- For art/runtime/elf.h derived from external/llvm/include/llvm/Support/ELF.h ============================================================================== LLVM Release License ============================================================================== University of Illinois/NCSA Open Source License Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign. All rights reserved. Developed by: LLVM Team University of Illinois at Urbana-Champaign http://llvm.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. * Neither the names of the LLVM Team, University of Illinois at Urbana-Champaign, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. ============================================================================== Copyrights and Licenses for Third Party Software Distributed with LLVM: ============================================================================== The LLVM software contains code written by third parties. Such software will have its own individual LICENSE.TXT file in the directory in which it appears. This file will describe the copyrights, license, and restrictions which apply to that code. The disclaimer of warranty in the University of Illinois Open Source License applies to all code in the LLVM Distribution, and nothing in any of the other licenses gives permission to use the names of the LLVM Team or the University of Illinois to endorse or promote products derived from this Software. The following pieces of software have additional or alternate copyrights, licenses, and/or restrictions: Program Directory ------- --------- Autoconf llvm/autoconf llvm/projects/ModuleMaker/autoconf Google Test llvm/utils/unittest/googletest OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex} pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT} ARM contributions llvm/lib/Target/ARM/LICENSE.TXT md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h android-platform-art-8.1.0+r23/OWNERS000066400000000000000000000000471336577252300171260ustar00rootroot00000000000000ngeoffray@google.com sehr@google.com * android-platform-art-8.1.0+r23/PREUPLOAD.cfg000066400000000000000000000003451336577252300202030ustar00rootroot00000000000000[Hook Scripts] check_generated_files_up_to_date = tools/cpp-define-generator/presubmit-check-files-up-to-date check_generated_tests_up_to_date = tools/test_presubmit.py check_cpplint_on_changed_files = tools/cpplint_presubmit.py android-platform-art-8.1.0+r23/benchmark/000077500000000000000000000000001336577252300201175ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/Android.bp000066400000000000000000000034631336577252300220300ustar00rootroot00000000000000// // Copyright (C) 2015 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // art_cc_library { name: "libartbenchmark", host_supported: true, defaults: ["art_defaults" ], srcs: [ "jni_loader.cc", "jobject-benchmark/jobject_benchmark.cc", "jni-perf/perf_jni.cc", "micro-native/micro_native.cc", "scoped-primitive-array/scoped_primitive_array.cc", ], shared_libs: [ "libart", "libbacktrace", "libbase", "libnativehelper", ], clang: true, target: { android: { shared_libs: ["libdl"], }, host: { host_ldlibs: ["-ldl", "-lpthread"], }, }, cflags: [ "-Wno-frame-larger-than=", ], } art_cc_library { name: "libartbenchmark-micronative-host", host_supported: true, device_supported: false, defaults: ["art_debug_defaults", "art_defaults" ], srcs: [ "jni_loader.cc", "micro-native/micro_native.cc", ], shared_libs: [ ], static_libs: [ ], header_libs: ["jni_headers"], stl: "libc++_static", clang: true, target: { host: { host_ldlibs: ["-ldl", "-lpthread"], }, }, cflags: [ "-Wno-frame-larger-than=", ], } android-platform-art-8.1.0+r23/benchmark/const-class/000077500000000000000000000000001336577252300223505ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/const-class/info.txt000066400000000000000000000000751336577252300240460ustar00rootroot00000000000000Benchmarks for repeating const-class instructions in a loop. android-platform-art-8.1.0+r23/benchmark/const-class/src/000077500000000000000000000000001336577252300231375ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/const-class/src/ConstClassBenchmark.java000066400000000000000000001274501336577252300277020ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public class ConstClassBenchmark { // Define 1025 classes with consecutive type indexes in the dex file. // The tests below rely on the knowledge that ART uses the low 10 bits // of the type index as the hash into DexCache types array. // Note: n == n + 1024 (mod 2^10), n + 1 != n + 1023 (mod 2^10). public static class TestClass_0000 {} public static class TestClass_0001 {} public static class TestClass_0002 {} public static class TestClass_0003 {} public static class TestClass_0004 {} public static class TestClass_0005 {} public static class TestClass_0006 {} public static class TestClass_0007 {} public static class TestClass_0008 {} public static class TestClass_0009 {} public static class TestClass_0010 {} public static class TestClass_0011 {} public static class TestClass_0012 {} public static class TestClass_0013 {} public static class TestClass_0014 {} public static class TestClass_0015 {} public static class TestClass_0016 {} public static class TestClass_0017 {} public static class TestClass_0018 {} public static class TestClass_0019 {} public static class TestClass_0020 {} public static class TestClass_0021 {} public static class TestClass_0022 {} public static class TestClass_0023 {} public static class TestClass_0024 {} public static class TestClass_0025 {} public static class TestClass_0026 {} public static class TestClass_0027 {} public static class TestClass_0028 {} public static class TestClass_0029 {} public static class TestClass_0030 {} public static class TestClass_0031 {} public static class TestClass_0032 {} public static class TestClass_0033 {} public static class TestClass_0034 {} public static class TestClass_0035 {} public static class TestClass_0036 {} public static class TestClass_0037 {} public static class TestClass_0038 {} public static class TestClass_0039 {} public static class TestClass_0040 {} public static class TestClass_0041 {} public static class TestClass_0042 {} public static class TestClass_0043 {} public static class TestClass_0044 {} public static class TestClass_0045 {} public static class TestClass_0046 {} public static class TestClass_0047 {} public static class TestClass_0048 {} public static class TestClass_0049 {} public static class TestClass_0050 {} public static class TestClass_0051 {} public static class TestClass_0052 {} public static class TestClass_0053 {} public static class TestClass_0054 {} public static class TestClass_0055 {} public static class TestClass_0056 {} public static class TestClass_0057 {} public static class TestClass_0058 {} public static class TestClass_0059 {} public static class TestClass_0060 {} public static class TestClass_0061 {} public static class TestClass_0062 {} public static class TestClass_0063 {} public static class TestClass_0064 {} public static class TestClass_0065 {} public static class TestClass_0066 {} public static class TestClass_0067 {} public static class TestClass_0068 {} public static class TestClass_0069 {} public static class TestClass_0070 {} public static class TestClass_0071 {} public static class TestClass_0072 {} public static class TestClass_0073 {} public static class TestClass_0074 {} public static class TestClass_0075 {} public static class TestClass_0076 {} public static class TestClass_0077 {} public static class TestClass_0078 {} public static class TestClass_0079 {} public static class TestClass_0080 {} public static class TestClass_0081 {} public static class TestClass_0082 {} public static class TestClass_0083 {} public static class TestClass_0084 {} public static class TestClass_0085 {} public static class TestClass_0086 {} public static class TestClass_0087 {} public static class TestClass_0088 {} public static class TestClass_0089 {} public static class TestClass_0090 {} public static class TestClass_0091 {} public static class TestClass_0092 {} public static class TestClass_0093 {} public static class TestClass_0094 {} public static class TestClass_0095 {} public static class TestClass_0096 {} public static class TestClass_0097 {} public static class TestClass_0098 {} public static class TestClass_0099 {} public static class TestClass_0100 {} public static class TestClass_0101 {} public static class TestClass_0102 {} public static class TestClass_0103 {} public static class TestClass_0104 {} public static class TestClass_0105 {} public static class TestClass_0106 {} public static class TestClass_0107 {} public static class TestClass_0108 {} public static class TestClass_0109 {} public static class TestClass_0110 {} public static class TestClass_0111 {} public static class TestClass_0112 {} public static class TestClass_0113 {} public static class TestClass_0114 {} public static class TestClass_0115 {} public static class TestClass_0116 {} public static class TestClass_0117 {} public static class TestClass_0118 {} public static class TestClass_0119 {} public static class TestClass_0120 {} public static class TestClass_0121 {} public static class TestClass_0122 {} public static class TestClass_0123 {} public static class TestClass_0124 {} public static class TestClass_0125 {} public static class TestClass_0126 {} public static class TestClass_0127 {} public static class TestClass_0128 {} public static class TestClass_0129 {} public static class TestClass_0130 {} public static class TestClass_0131 {} public static class TestClass_0132 {} public static class TestClass_0133 {} public static class TestClass_0134 {} public static class TestClass_0135 {} public static class TestClass_0136 {} public static class TestClass_0137 {} public static class TestClass_0138 {} public static class TestClass_0139 {} public static class TestClass_0140 {} public static class TestClass_0141 {} public static class TestClass_0142 {} public static class TestClass_0143 {} public static class TestClass_0144 {} public static class TestClass_0145 {} public static class TestClass_0146 {} public static class TestClass_0147 {} public static class TestClass_0148 {} public static class TestClass_0149 {} public static class TestClass_0150 {} public static class TestClass_0151 {} public static class TestClass_0152 {} public static class TestClass_0153 {} public static class TestClass_0154 {} public static class TestClass_0155 {} public static class TestClass_0156 {} public static class TestClass_0157 {} public static class TestClass_0158 {} public static class TestClass_0159 {} public static class TestClass_0160 {} public static class TestClass_0161 {} public static class TestClass_0162 {} public static class TestClass_0163 {} public static class TestClass_0164 {} public static class TestClass_0165 {} public static class TestClass_0166 {} public static class TestClass_0167 {} public static class TestClass_0168 {} public static class TestClass_0169 {} public static class TestClass_0170 {} public static class TestClass_0171 {} public static class TestClass_0172 {} public static class TestClass_0173 {} public static class TestClass_0174 {} public static class TestClass_0175 {} public static class TestClass_0176 {} public static class TestClass_0177 {} public static class TestClass_0178 {} public static class TestClass_0179 {} public static class TestClass_0180 {} public static class TestClass_0181 {} public static class TestClass_0182 {} public static class TestClass_0183 {} public static class TestClass_0184 {} public static class TestClass_0185 {} public static class TestClass_0186 {} public static class TestClass_0187 {} public static class TestClass_0188 {} public static class TestClass_0189 {} public static class TestClass_0190 {} public static class TestClass_0191 {} public static class TestClass_0192 {} public static class TestClass_0193 {} public static class TestClass_0194 {} public static class TestClass_0195 {} public static class TestClass_0196 {} public static class TestClass_0197 {} public static class TestClass_0198 {} public static class TestClass_0199 {} public static class TestClass_0200 {} public static class TestClass_0201 {} public static class TestClass_0202 {} public static class TestClass_0203 {} public static class TestClass_0204 {} public static class TestClass_0205 {} public static class TestClass_0206 {} public static class TestClass_0207 {} public static class TestClass_0208 {} public static class TestClass_0209 {} public static class TestClass_0210 {} public static class TestClass_0211 {} public static class TestClass_0212 {} public static class TestClass_0213 {} public static class TestClass_0214 {} public static class TestClass_0215 {} public static class TestClass_0216 {} public static class TestClass_0217 {} public static class TestClass_0218 {} public static class TestClass_0219 {} public static class TestClass_0220 {} public static class TestClass_0221 {} public static class TestClass_0222 {} public static class TestClass_0223 {} public static class TestClass_0224 {} public static class TestClass_0225 {} public static class TestClass_0226 {} public static class TestClass_0227 {} public static class TestClass_0228 {} public static class TestClass_0229 {} public static class TestClass_0230 {} public static class TestClass_0231 {} public static class TestClass_0232 {} public static class TestClass_0233 {} public static class TestClass_0234 {} public static class TestClass_0235 {} public static class TestClass_0236 {} public static class TestClass_0237 {} public static class TestClass_0238 {} public static class TestClass_0239 {} public static class TestClass_0240 {} public static class TestClass_0241 {} public static class TestClass_0242 {} public static class TestClass_0243 {} public static class TestClass_0244 {} public static class TestClass_0245 {} public static class TestClass_0246 {} public static class TestClass_0247 {} public static class TestClass_0248 {} public static class TestClass_0249 {} public static class TestClass_0250 {} public static class TestClass_0251 {} public static class TestClass_0252 {} public static class TestClass_0253 {} public static class TestClass_0254 {} public static class TestClass_0255 {} public static class TestClass_0256 {} public static class TestClass_0257 {} public static class TestClass_0258 {} public static class TestClass_0259 {} public static class TestClass_0260 {} public static class TestClass_0261 {} public static class TestClass_0262 {} public static class TestClass_0263 {} public static class TestClass_0264 {} public static class TestClass_0265 {} public static class TestClass_0266 {} public static class TestClass_0267 {} public static class TestClass_0268 {} public static class TestClass_0269 {} public static class TestClass_0270 {} public static class TestClass_0271 {} public static class TestClass_0272 {} public static class TestClass_0273 {} public static class TestClass_0274 {} public static class TestClass_0275 {} public static class TestClass_0276 {} public static class TestClass_0277 {} public static class TestClass_0278 {} public static class TestClass_0279 {} public static class TestClass_0280 {} public static class TestClass_0281 {} public static class TestClass_0282 {} public static class TestClass_0283 {} public static class TestClass_0284 {} public static class TestClass_0285 {} public static class TestClass_0286 {} public static class TestClass_0287 {} public static class TestClass_0288 {} public static class TestClass_0289 {} public static class TestClass_0290 {} public static class TestClass_0291 {} public static class TestClass_0292 {} public static class TestClass_0293 {} public static class TestClass_0294 {} public static class TestClass_0295 {} public static class TestClass_0296 {} public static class TestClass_0297 {} public static class TestClass_0298 {} public static class TestClass_0299 {} public static class TestClass_0300 {} public static class TestClass_0301 {} public static class TestClass_0302 {} public static class TestClass_0303 {} public static class TestClass_0304 {} public static class TestClass_0305 {} public static class TestClass_0306 {} public static class TestClass_0307 {} public static class TestClass_0308 {} public static class TestClass_0309 {} public static class TestClass_0310 {} public static class TestClass_0311 {} public static class TestClass_0312 {} public static class TestClass_0313 {} public static class TestClass_0314 {} public static class TestClass_0315 {} public static class TestClass_0316 {} public static class TestClass_0317 {} public static class TestClass_0318 {} public static class TestClass_0319 {} public static class TestClass_0320 {} public static class TestClass_0321 {} public static class TestClass_0322 {} public static class TestClass_0323 {} public static class TestClass_0324 {} public static class TestClass_0325 {} public static class TestClass_0326 {} public static class TestClass_0327 {} public static class TestClass_0328 {} public static class TestClass_0329 {} public static class TestClass_0330 {} public static class TestClass_0331 {} public static class TestClass_0332 {} public static class TestClass_0333 {} public static class TestClass_0334 {} public static class TestClass_0335 {} public static class TestClass_0336 {} public static class TestClass_0337 {} public static class TestClass_0338 {} public static class TestClass_0339 {} public static class TestClass_0340 {} public static class TestClass_0341 {} public static class TestClass_0342 {} public static class TestClass_0343 {} public static class TestClass_0344 {} public static class TestClass_0345 {} public static class TestClass_0346 {} public static class TestClass_0347 {} public static class TestClass_0348 {} public static class TestClass_0349 {} public static class TestClass_0350 {} public static class TestClass_0351 {} public static class TestClass_0352 {} public static class TestClass_0353 {} public static class TestClass_0354 {} public static class TestClass_0355 {} public static class TestClass_0356 {} public static class TestClass_0357 {} public static class TestClass_0358 {} public static class TestClass_0359 {} public static class TestClass_0360 {} public static class TestClass_0361 {} public static class TestClass_0362 {} public static class TestClass_0363 {} public static class TestClass_0364 {} public static class TestClass_0365 {} public static class TestClass_0366 {} public static class TestClass_0367 {} public static class TestClass_0368 {} public static class TestClass_0369 {} public static class TestClass_0370 {} public static class TestClass_0371 {} public static class TestClass_0372 {} public static class TestClass_0373 {} public static class TestClass_0374 {} public static class TestClass_0375 {} public static class TestClass_0376 {} public static class TestClass_0377 {} public static class TestClass_0378 {} public static class TestClass_0379 {} public static class TestClass_0380 {} public static class TestClass_0381 {} public static class TestClass_0382 {} public static class TestClass_0383 {} public static class TestClass_0384 {} public static class TestClass_0385 {} public static class TestClass_0386 {} public static class TestClass_0387 {} public static class TestClass_0388 {} public static class TestClass_0389 {} public static class TestClass_0390 {} public static class TestClass_0391 {} public static class TestClass_0392 {} public static class TestClass_0393 {} public static class TestClass_0394 {} public static class TestClass_0395 {} public static class TestClass_0396 {} public static class TestClass_0397 {} public static class TestClass_0398 {} public static class TestClass_0399 {} public static class TestClass_0400 {} public static class TestClass_0401 {} public static class TestClass_0402 {} public static class TestClass_0403 {} public static class TestClass_0404 {} public static class TestClass_0405 {} public static class TestClass_0406 {} public static class TestClass_0407 {} public static class TestClass_0408 {} public static class TestClass_0409 {} public static class TestClass_0410 {} public static class TestClass_0411 {} public static class TestClass_0412 {} public static class TestClass_0413 {} public static class TestClass_0414 {} public static class TestClass_0415 {} public static class TestClass_0416 {} public static class TestClass_0417 {} public static class TestClass_0418 {} public static class TestClass_0419 {} public static class TestClass_0420 {} public static class TestClass_0421 {} public static class TestClass_0422 {} public static class TestClass_0423 {} public static class TestClass_0424 {} public static class TestClass_0425 {} public static class TestClass_0426 {} public static class TestClass_0427 {} public static class TestClass_0428 {} public static class TestClass_0429 {} public static class TestClass_0430 {} public static class TestClass_0431 {} public static class TestClass_0432 {} public static class TestClass_0433 {} public static class TestClass_0434 {} public static class TestClass_0435 {} public static class TestClass_0436 {} public static class TestClass_0437 {} public static class TestClass_0438 {} public static class TestClass_0439 {} public static class TestClass_0440 {} public static class TestClass_0441 {} public static class TestClass_0442 {} public static class TestClass_0443 {} public static class TestClass_0444 {} public static class TestClass_0445 {} public static class TestClass_0446 {} public static class TestClass_0447 {} public static class TestClass_0448 {} public static class TestClass_0449 {} public static class TestClass_0450 {} public static class TestClass_0451 {} public static class TestClass_0452 {} public static class TestClass_0453 {} public static class TestClass_0454 {} public static class TestClass_0455 {} public static class TestClass_0456 {} public static class TestClass_0457 {} public static class TestClass_0458 {} public static class TestClass_0459 {} public static class TestClass_0460 {} public static class TestClass_0461 {} public static class TestClass_0462 {} public static class TestClass_0463 {} public static class TestClass_0464 {} public static class TestClass_0465 {} public static class TestClass_0466 {} public static class TestClass_0467 {} public static class TestClass_0468 {} public static class TestClass_0469 {} public static class TestClass_0470 {} public static class TestClass_0471 {} public static class TestClass_0472 {} public static class TestClass_0473 {} public static class TestClass_0474 {} public static class TestClass_0475 {} public static class TestClass_0476 {} public static class TestClass_0477 {} public static class TestClass_0478 {} public static class TestClass_0479 {} public static class TestClass_0480 {} public static class TestClass_0481 {} public static class TestClass_0482 {} public static class TestClass_0483 {} public static class TestClass_0484 {} public static class TestClass_0485 {} public static class TestClass_0486 {} public static class TestClass_0487 {} public static class TestClass_0488 {} public static class TestClass_0489 {} public static class TestClass_0490 {} public static class TestClass_0491 {} public static class TestClass_0492 {} public static class TestClass_0493 {} public static class TestClass_0494 {} public static class TestClass_0495 {} public static class TestClass_0496 {} public static class TestClass_0497 {} public static class TestClass_0498 {} public static class TestClass_0499 {} public static class TestClass_0500 {} public static class TestClass_0501 {} public static class TestClass_0502 {} public static class TestClass_0503 {} public static class TestClass_0504 {} public static class TestClass_0505 {} public static class TestClass_0506 {} public static class TestClass_0507 {} public static class TestClass_0508 {} public static class TestClass_0509 {} public static class TestClass_0510 {} public static class TestClass_0511 {} public static class TestClass_0512 {} public static class TestClass_0513 {} public static class TestClass_0514 {} public static class TestClass_0515 {} public static class TestClass_0516 {} public static class TestClass_0517 {} public static class TestClass_0518 {} public static class TestClass_0519 {} public static class TestClass_0520 {} public static class TestClass_0521 {} public static class TestClass_0522 {} public static class TestClass_0523 {} public static class TestClass_0524 {} public static class TestClass_0525 {} public static class TestClass_0526 {} public static class TestClass_0527 {} public static class TestClass_0528 {} public static class TestClass_0529 {} public static class TestClass_0530 {} public static class TestClass_0531 {} public static class TestClass_0532 {} public static class TestClass_0533 {} public static class TestClass_0534 {} public static class TestClass_0535 {} public static class TestClass_0536 {} public static class TestClass_0537 {} public static class TestClass_0538 {} public static class TestClass_0539 {} public static class TestClass_0540 {} public static class TestClass_0541 {} public static class TestClass_0542 {} public static class TestClass_0543 {} public static class TestClass_0544 {} public static class TestClass_0545 {} public static class TestClass_0546 {} public static class TestClass_0547 {} public static class TestClass_0548 {} public static class TestClass_0549 {} public static class TestClass_0550 {} public static class TestClass_0551 {} public static class TestClass_0552 {} public static class TestClass_0553 {} public static class TestClass_0554 {} public static class TestClass_0555 {} public static class TestClass_0556 {} public static class TestClass_0557 {} public static class TestClass_0558 {} public static class TestClass_0559 {} public static class TestClass_0560 {} public static class TestClass_0561 {} public static class TestClass_0562 {} public static class TestClass_0563 {} public static class TestClass_0564 {} public static class TestClass_0565 {} public static class TestClass_0566 {} public static class TestClass_0567 {} public static class TestClass_0568 {} public static class TestClass_0569 {} public static class TestClass_0570 {} public static class TestClass_0571 {} public static class TestClass_0572 {} public static class TestClass_0573 {} public static class TestClass_0574 {} public static class TestClass_0575 {} public static class TestClass_0576 {} public static class TestClass_0577 {} public static class TestClass_0578 {} public static class TestClass_0579 {} public static class TestClass_0580 {} public static class TestClass_0581 {} public static class TestClass_0582 {} public static class TestClass_0583 {} public static class TestClass_0584 {} public static class TestClass_0585 {} public static class TestClass_0586 {} public static class TestClass_0587 {} public static class TestClass_0588 {} public static class TestClass_0589 {} public static class TestClass_0590 {} public static class TestClass_0591 {} public static class TestClass_0592 {} public static class TestClass_0593 {} public static class TestClass_0594 {} public static class TestClass_0595 {} public static class TestClass_0596 {} public static class TestClass_0597 {} public static class TestClass_0598 {} public static class TestClass_0599 {} public static class TestClass_0600 {} public static class TestClass_0601 {} public static class TestClass_0602 {} public static class TestClass_0603 {} public static class TestClass_0604 {} public static class TestClass_0605 {} public static class TestClass_0606 {} public static class TestClass_0607 {} public static class TestClass_0608 {} public static class TestClass_0609 {} public static class TestClass_0610 {} public static class TestClass_0611 {} public static class TestClass_0612 {} public static class TestClass_0613 {} public static class TestClass_0614 {} public static class TestClass_0615 {} public static class TestClass_0616 {} public static class TestClass_0617 {} public static class TestClass_0618 {} public static class TestClass_0619 {} public static class TestClass_0620 {} public static class TestClass_0621 {} public static class TestClass_0622 {} public static class TestClass_0623 {} public static class TestClass_0624 {} public static class TestClass_0625 {} public static class TestClass_0626 {} public static class TestClass_0627 {} public static class TestClass_0628 {} public static class TestClass_0629 {} public static class TestClass_0630 {} public static class TestClass_0631 {} public static class TestClass_0632 {} public static class TestClass_0633 {} public static class TestClass_0634 {} public static class TestClass_0635 {} public static class TestClass_0636 {} public static class TestClass_0637 {} public static class TestClass_0638 {} public static class TestClass_0639 {} public static class TestClass_0640 {} public static class TestClass_0641 {} public static class TestClass_0642 {} public static class TestClass_0643 {} public static class TestClass_0644 {} public static class TestClass_0645 {} public static class TestClass_0646 {} public static class TestClass_0647 {} public static class TestClass_0648 {} public static class TestClass_0649 {} public static class TestClass_0650 {} public static class TestClass_0651 {} public static class TestClass_0652 {} public static class TestClass_0653 {} public static class TestClass_0654 {} public static class TestClass_0655 {} public static class TestClass_0656 {} public static class TestClass_0657 {} public static class TestClass_0658 {} public static class TestClass_0659 {} public static class TestClass_0660 {} public static class TestClass_0661 {} public static class TestClass_0662 {} public static class TestClass_0663 {} public static class TestClass_0664 {} public static class TestClass_0665 {} public static class TestClass_0666 {} public static class TestClass_0667 {} public static class TestClass_0668 {} public static class TestClass_0669 {} public static class TestClass_0670 {} public static class TestClass_0671 {} public static class TestClass_0672 {} public static class TestClass_0673 {} public static class TestClass_0674 {} public static class TestClass_0675 {} public static class TestClass_0676 {} public static class TestClass_0677 {} public static class TestClass_0678 {} public static class TestClass_0679 {} public static class TestClass_0680 {} public static class TestClass_0681 {} public static class TestClass_0682 {} public static class TestClass_0683 {} public static class TestClass_0684 {} public static class TestClass_0685 {} public static class TestClass_0686 {} public static class TestClass_0687 {} public static class TestClass_0688 {} public static class TestClass_0689 {} public static class TestClass_0690 {} public static class TestClass_0691 {} public static class TestClass_0692 {} public static class TestClass_0693 {} public static class TestClass_0694 {} public static class TestClass_0695 {} public static class TestClass_0696 {} public static class TestClass_0697 {} public static class TestClass_0698 {} public static class TestClass_0699 {} public static class TestClass_0700 {} public static class TestClass_0701 {} public static class TestClass_0702 {} public static class TestClass_0703 {} public static class TestClass_0704 {} public static class TestClass_0705 {} public static class TestClass_0706 {} public static class TestClass_0707 {} public static class TestClass_0708 {} public static class TestClass_0709 {} public static class TestClass_0710 {} public static class TestClass_0711 {} public static class TestClass_0712 {} public static class TestClass_0713 {} public static class TestClass_0714 {} public static class TestClass_0715 {} public static class TestClass_0716 {} public static class TestClass_0717 {} public static class TestClass_0718 {} public static class TestClass_0719 {} public static class TestClass_0720 {} public static class TestClass_0721 {} public static class TestClass_0722 {} public static class TestClass_0723 {} public static class TestClass_0724 {} public static class TestClass_0725 {} public static class TestClass_0726 {} public static class TestClass_0727 {} public static class TestClass_0728 {} public static class TestClass_0729 {} public static class TestClass_0730 {} public static class TestClass_0731 {} public static class TestClass_0732 {} public static class TestClass_0733 {} public static class TestClass_0734 {} public static class TestClass_0735 {} public static class TestClass_0736 {} public static class TestClass_0737 {} public static class TestClass_0738 {} public static class TestClass_0739 {} public static class TestClass_0740 {} public static class TestClass_0741 {} public static class TestClass_0742 {} public static class TestClass_0743 {} public static class TestClass_0744 {} public static class TestClass_0745 {} public static class TestClass_0746 {} public static class TestClass_0747 {} public static class TestClass_0748 {} public static class TestClass_0749 {} public static class TestClass_0750 {} public static class TestClass_0751 {} public static class TestClass_0752 {} public static class TestClass_0753 {} public static class TestClass_0754 {} public static class TestClass_0755 {} public static class TestClass_0756 {} public static class TestClass_0757 {} public static class TestClass_0758 {} public static class TestClass_0759 {} public static class TestClass_0760 {} public static class TestClass_0761 {} public static class TestClass_0762 {} public static class TestClass_0763 {} public static class TestClass_0764 {} public static class TestClass_0765 {} public static class TestClass_0766 {} public static class TestClass_0767 {} public static class TestClass_0768 {} public static class TestClass_0769 {} public static class TestClass_0770 {} public static class TestClass_0771 {} public static class TestClass_0772 {} public static class TestClass_0773 {} public static class TestClass_0774 {} public static class TestClass_0775 {} public static class TestClass_0776 {} public static class TestClass_0777 {} public static class TestClass_0778 {} public static class TestClass_0779 {} public static class TestClass_0780 {} public static class TestClass_0781 {} public static class TestClass_0782 {} public static class TestClass_0783 {} public static class TestClass_0784 {} public static class TestClass_0785 {} public static class TestClass_0786 {} public static class TestClass_0787 {} public static class TestClass_0788 {} public static class TestClass_0789 {} public static class TestClass_0790 {} public static class TestClass_0791 {} public static class TestClass_0792 {} public static class TestClass_0793 {} public static class TestClass_0794 {} public static class TestClass_0795 {} public static class TestClass_0796 {} public static class TestClass_0797 {} public static class TestClass_0798 {} public static class TestClass_0799 {} public static class TestClass_0800 {} public static class TestClass_0801 {} public static class TestClass_0802 {} public static class TestClass_0803 {} public static class TestClass_0804 {} public static class TestClass_0805 {} public static class TestClass_0806 {} public static class TestClass_0807 {} public static class TestClass_0808 {} public static class TestClass_0809 {} public static class TestClass_0810 {} public static class TestClass_0811 {} public static class TestClass_0812 {} public static class TestClass_0813 {} public static class TestClass_0814 {} public static class TestClass_0815 {} public static class TestClass_0816 {} public static class TestClass_0817 {} public static class TestClass_0818 {} public static class TestClass_0819 {} public static class TestClass_0820 {} public static class TestClass_0821 {} public static class TestClass_0822 {} public static class TestClass_0823 {} public static class TestClass_0824 {} public static class TestClass_0825 {} public static class TestClass_0826 {} public static class TestClass_0827 {} public static class TestClass_0828 {} public static class TestClass_0829 {} public static class TestClass_0830 {} public static class TestClass_0831 {} public static class TestClass_0832 {} public static class TestClass_0833 {} public static class TestClass_0834 {} public static class TestClass_0835 {} public static class TestClass_0836 {} public static class TestClass_0837 {} public static class TestClass_0838 {} public static class TestClass_0839 {} public static class TestClass_0840 {} public static class TestClass_0841 {} public static class TestClass_0842 {} public static class TestClass_0843 {} public static class TestClass_0844 {} public static class TestClass_0845 {} public static class TestClass_0846 {} public static class TestClass_0847 {} public static class TestClass_0848 {} public static class TestClass_0849 {} public static class TestClass_0850 {} public static class TestClass_0851 {} public static class TestClass_0852 {} public static class TestClass_0853 {} public static class TestClass_0854 {} public static class TestClass_0855 {} public static class TestClass_0856 {} public static class TestClass_0857 {} public static class TestClass_0858 {} public static class TestClass_0859 {} public static class TestClass_0860 {} public static class TestClass_0861 {} public static class TestClass_0862 {} public static class TestClass_0863 {} public static class TestClass_0864 {} public static class TestClass_0865 {} public static class TestClass_0866 {} public static class TestClass_0867 {} public static class TestClass_0868 {} public static class TestClass_0869 {} public static class TestClass_0870 {} public static class TestClass_0871 {} public static class TestClass_0872 {} public static class TestClass_0873 {} public static class TestClass_0874 {} public static class TestClass_0875 {} public static class TestClass_0876 {} public static class TestClass_0877 {} public static class TestClass_0878 {} public static class TestClass_0879 {} public static class TestClass_0880 {} public static class TestClass_0881 {} public static class TestClass_0882 {} public static class TestClass_0883 {} public static class TestClass_0884 {} public static class TestClass_0885 {} public static class TestClass_0886 {} public static class TestClass_0887 {} public static class TestClass_0888 {} public static class TestClass_0889 {} public static class TestClass_0890 {} public static class TestClass_0891 {} public static class TestClass_0892 {} public static class TestClass_0893 {} public static class TestClass_0894 {} public static class TestClass_0895 {} public static class TestClass_0896 {} public static class TestClass_0897 {} public static class TestClass_0898 {} public static class TestClass_0899 {} public static class TestClass_0900 {} public static class TestClass_0901 {} public static class TestClass_0902 {} public static class TestClass_0903 {} public static class TestClass_0904 {} public static class TestClass_0905 {} public static class TestClass_0906 {} public static class TestClass_0907 {} public static class TestClass_0908 {} public static class TestClass_0909 {} public static class TestClass_0910 {} public static class TestClass_0911 {} public static class TestClass_0912 {} public static class TestClass_0913 {} public static class TestClass_0914 {} public static class TestClass_0915 {} public static class TestClass_0916 {} public static class TestClass_0917 {} public static class TestClass_0918 {} public static class TestClass_0919 {} public static class TestClass_0920 {} public static class TestClass_0921 {} public static class TestClass_0922 {} public static class TestClass_0923 {} public static class TestClass_0924 {} public static class TestClass_0925 {} public static class TestClass_0926 {} public static class TestClass_0927 {} public static class TestClass_0928 {} public static class TestClass_0929 {} public static class TestClass_0930 {} public static class TestClass_0931 {} public static class TestClass_0932 {} public static class TestClass_0933 {} public static class TestClass_0934 {} public static class TestClass_0935 {} public static class TestClass_0936 {} public static class TestClass_0937 {} public static class TestClass_0938 {} public static class TestClass_0939 {} public static class TestClass_0940 {} public static class TestClass_0941 {} public static class TestClass_0942 {} public static class TestClass_0943 {} public static class TestClass_0944 {} public static class TestClass_0945 {} public static class TestClass_0946 {} public static class TestClass_0947 {} public static class TestClass_0948 {} public static class TestClass_0949 {} public static class TestClass_0950 {} public static class TestClass_0951 {} public static class TestClass_0952 {} public static class TestClass_0953 {} public static class TestClass_0954 {} public static class TestClass_0955 {} public static class TestClass_0956 {} public static class TestClass_0957 {} public static class TestClass_0958 {} public static class TestClass_0959 {} public static class TestClass_0960 {} public static class TestClass_0961 {} public static class TestClass_0962 {} public static class TestClass_0963 {} public static class TestClass_0964 {} public static class TestClass_0965 {} public static class TestClass_0966 {} public static class TestClass_0967 {} public static class TestClass_0968 {} public static class TestClass_0969 {} public static class TestClass_0970 {} public static class TestClass_0971 {} public static class TestClass_0972 {} public static class TestClass_0973 {} public static class TestClass_0974 {} public static class TestClass_0975 {} public static class TestClass_0976 {} public static class TestClass_0977 {} public static class TestClass_0978 {} public static class TestClass_0979 {} public static class TestClass_0980 {} public static class TestClass_0981 {} public static class TestClass_0982 {} public static class TestClass_0983 {} public static class TestClass_0984 {} public static class TestClass_0985 {} public static class TestClass_0986 {} public static class TestClass_0987 {} public static class TestClass_0988 {} public static class TestClass_0989 {} public static class TestClass_0990 {} public static class TestClass_0991 {} public static class TestClass_0992 {} public static class TestClass_0993 {} public static class TestClass_0994 {} public static class TestClass_0995 {} public static class TestClass_0996 {} public static class TestClass_0997 {} public static class TestClass_0998 {} public static class TestClass_0999 {} public static class TestClass_1000 {} public static class TestClass_1001 {} public static class TestClass_1002 {} public static class TestClass_1003 {} public static class TestClass_1004 {} public static class TestClass_1005 {} public static class TestClass_1006 {} public static class TestClass_1007 {} public static class TestClass_1008 {} public static class TestClass_1009 {} public static class TestClass_1010 {} public static class TestClass_1011 {} public static class TestClass_1012 {} public static class TestClass_1013 {} public static class TestClass_1014 {} public static class TestClass_1015 {} public static class TestClass_1016 {} public static class TestClass_1017 {} public static class TestClass_1018 {} public static class TestClass_1019 {} public static class TestClass_1020 {} public static class TestClass_1021 {} public static class TestClass_1022 {} public static class TestClass_1023 {} public static class TestClass_1024 {} public void timeConstClassWithConflict(int count) { Class class0001 = TestClass_0001.class; for (int i = 0; i < count; ++i) { $noinline$foo(class0001); // Prevent LICM on the TestClass_xxxx.class below. $noinline$foo(TestClass_0000.class); $noinline$foo(TestClass_1024.class); } } public void timeConstClassWithoutConflict(int count) { Class class0000 = TestClass_0000.class; for (int i = 0; i < count; ++i) { $noinline$foo(class0000); // Prevent LICM on the TestClass_xxxx.class below. $noinline$foo(TestClass_0001.class); $noinline$foo(TestClass_1023.class); } } static void $noinline$foo(Class s) { if (doThrow) { throw new Error(); } } public static boolean doThrow = false; } android-platform-art-8.1.0+r23/benchmark/const-string/000077500000000000000000000000001336577252300225515ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/const-string/info.txt000066400000000000000000000000761336577252300242500ustar00rootroot00000000000000Benchmarks for repeating const-string instructions in a loop. android-platform-art-8.1.0+r23/benchmark/const-string/src/000077500000000000000000000000001336577252300233405ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/const-string/src/ConstStringBenchmark.java000066400000000000000000002030451336577252300302770ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public class ConstStringBenchmark { // Initialize 1025 strings with consecutive string indexes in the dex file. // The tests below rely on the knowledge that ART uses the low 10 bits // of the string index as the hash into DexCache strings array. // Note: n == n + 1024 (mod 2^10), n + 1 != n + 1023 (mod 2^10). public static final String string_0000 = "TestString_0000"; public static final String string_0001 = "TestString_0001"; public static final String string_0002 = "TestString_0002"; public static final String string_0003 = "TestString_0003"; public static final String string_0004 = "TestString_0004"; public static final String string_0005 = "TestString_0005"; public static final String string_0006 = "TestString_0006"; public static final String string_0007 = "TestString_0007"; public static final String string_0008 = "TestString_0008"; public static final String string_0009 = "TestString_0009"; public static final String string_0010 = "TestString_0010"; public static final String string_0011 = "TestString_0011"; public static final String string_0012 = "TestString_0012"; public static final String string_0013 = "TestString_0013"; public static final String string_0014 = "TestString_0014"; public static final String string_0015 = "TestString_0015"; public static final String string_0016 = "TestString_0016"; public static final String string_0017 = "TestString_0017"; public static final String string_0018 = "TestString_0018"; public static final String string_0019 = "TestString_0019"; public static final String string_0020 = "TestString_0020"; public static final String string_0021 = "TestString_0021"; public static final String string_0022 = "TestString_0022"; public static final String string_0023 = "TestString_0023"; public static final String string_0024 = "TestString_0024"; public static final String string_0025 = "TestString_0025"; public static final String string_0026 = "TestString_0026"; public static final String string_0027 = "TestString_0027"; public static final String string_0028 = "TestString_0028"; public static final String string_0029 = "TestString_0029"; public static final String string_0030 = "TestString_0030"; public static final String string_0031 = "TestString_0031"; public static final String string_0032 = "TestString_0032"; public static final String string_0033 = "TestString_0033"; public static final String string_0034 = "TestString_0034"; public static final String string_0035 = "TestString_0035"; public static final String string_0036 = "TestString_0036"; public static final String string_0037 = "TestString_0037"; public static final String string_0038 = "TestString_0038"; public static final String string_0039 = "TestString_0039"; public static final String string_0040 = "TestString_0040"; public static final String string_0041 = "TestString_0041"; public static final String string_0042 = "TestString_0042"; public static final String string_0043 = "TestString_0043"; public static final String string_0044 = "TestString_0044"; public static final String string_0045 = "TestString_0045"; public static final String string_0046 = "TestString_0046"; public static final String string_0047 = "TestString_0047"; public static final String string_0048 = "TestString_0048"; public static final String string_0049 = "TestString_0049"; public static final String string_0050 = "TestString_0050"; public static final String string_0051 = "TestString_0051"; public static final String string_0052 = "TestString_0052"; public static final String string_0053 = "TestString_0053"; public static final String string_0054 = "TestString_0054"; public static final String string_0055 = "TestString_0055"; public static final String string_0056 = "TestString_0056"; public static final String string_0057 = "TestString_0057"; public static final String string_0058 = "TestString_0058"; public static final String string_0059 = "TestString_0059"; public static final String string_0060 = "TestString_0060"; public static final String string_0061 = "TestString_0061"; public static final String string_0062 = "TestString_0062"; public static final String string_0063 = "TestString_0063"; public static final String string_0064 = "TestString_0064"; public static final String string_0065 = "TestString_0065"; public static final String string_0066 = "TestString_0066"; public static final String string_0067 = "TestString_0067"; public static final String string_0068 = "TestString_0068"; public static final String string_0069 = "TestString_0069"; public static final String string_0070 = "TestString_0070"; public static final String string_0071 = "TestString_0071"; public static final String string_0072 = "TestString_0072"; public static final String string_0073 = "TestString_0073"; public static final String string_0074 = "TestString_0074"; public static final String string_0075 = "TestString_0075"; public static final String string_0076 = "TestString_0076"; public static final String string_0077 = "TestString_0077"; public static final String string_0078 = "TestString_0078"; public static final String string_0079 = "TestString_0079"; public static final String string_0080 = "TestString_0080"; public static final String string_0081 = "TestString_0081"; public static final String string_0082 = "TestString_0082"; public static final String string_0083 = "TestString_0083"; public static final String string_0084 = "TestString_0084"; public static final String string_0085 = "TestString_0085"; public static final String string_0086 = "TestString_0086"; public static final String string_0087 = "TestString_0087"; public static final String string_0088 = "TestString_0088"; public static final String string_0089 = "TestString_0089"; public static final String string_0090 = "TestString_0090"; public static final String string_0091 = "TestString_0091"; public static final String string_0092 = "TestString_0092"; public static final String string_0093 = "TestString_0093"; public static final String string_0094 = "TestString_0094"; public static final String string_0095 = "TestString_0095"; public static final String string_0096 = "TestString_0096"; public static final String string_0097 = "TestString_0097"; public static final String string_0098 = "TestString_0098"; public static final String string_0099 = "TestString_0099"; public static final String string_0100 = "TestString_0100"; public static final String string_0101 = "TestString_0101"; public static final String string_0102 = "TestString_0102"; public static final String string_0103 = "TestString_0103"; public static final String string_0104 = "TestString_0104"; public static final String string_0105 = "TestString_0105"; public static final String string_0106 = "TestString_0106"; public static final String string_0107 = "TestString_0107"; public static final String string_0108 = "TestString_0108"; public static final String string_0109 = "TestString_0109"; public static final String string_0110 = "TestString_0110"; public static final String string_0111 = "TestString_0111"; public static final String string_0112 = "TestString_0112"; public static final String string_0113 = "TestString_0113"; public static final String string_0114 = "TestString_0114"; public static final String string_0115 = "TestString_0115"; public static final String string_0116 = "TestString_0116"; public static final String string_0117 = "TestString_0117"; public static final String string_0118 = "TestString_0118"; public static final String string_0119 = "TestString_0119"; public static final String string_0120 = "TestString_0120"; public static final String string_0121 = "TestString_0121"; public static final String string_0122 = "TestString_0122"; public static final String string_0123 = "TestString_0123"; public static final String string_0124 = "TestString_0124"; public static final String string_0125 = "TestString_0125"; public static final String string_0126 = "TestString_0126"; public static final String string_0127 = "TestString_0127"; public static final String string_0128 = "TestString_0128"; public static final String string_0129 = "TestString_0129"; public static final String string_0130 = "TestString_0130"; public static final String string_0131 = "TestString_0131"; public static final String string_0132 = "TestString_0132"; public static final String string_0133 = "TestString_0133"; public static final String string_0134 = "TestString_0134"; public static final String string_0135 = "TestString_0135"; public static final String string_0136 = "TestString_0136"; public static final String string_0137 = "TestString_0137"; public static final String string_0138 = "TestString_0138"; public static final String string_0139 = "TestString_0139"; public static final String string_0140 = "TestString_0140"; public static final String string_0141 = "TestString_0141"; public static final String string_0142 = "TestString_0142"; public static final String string_0143 = "TestString_0143"; public static final String string_0144 = "TestString_0144"; public static final String string_0145 = "TestString_0145"; public static final String string_0146 = "TestString_0146"; public static final String string_0147 = "TestString_0147"; public static final String string_0148 = "TestString_0148"; public static final String string_0149 = "TestString_0149"; public static final String string_0150 = "TestString_0150"; public static final String string_0151 = "TestString_0151"; public static final String string_0152 = "TestString_0152"; public static final String string_0153 = "TestString_0153"; public static final String string_0154 = "TestString_0154"; public static final String string_0155 = "TestString_0155"; public static final String string_0156 = "TestString_0156"; public static final String string_0157 = "TestString_0157"; public static final String string_0158 = "TestString_0158"; public static final String string_0159 = "TestString_0159"; public static final String string_0160 = "TestString_0160"; public static final String string_0161 = "TestString_0161"; public static final String string_0162 = "TestString_0162"; public static final String string_0163 = "TestString_0163"; public static final String string_0164 = "TestString_0164"; public static final String string_0165 = "TestString_0165"; public static final String string_0166 = "TestString_0166"; public static final String string_0167 = "TestString_0167"; public static final String string_0168 = "TestString_0168"; public static final String string_0169 = "TestString_0169"; public static final String string_0170 = "TestString_0170"; public static final String string_0171 = "TestString_0171"; public static final String string_0172 = "TestString_0172"; public static final String string_0173 = "TestString_0173"; public static final String string_0174 = "TestString_0174"; public static final String string_0175 = "TestString_0175"; public static final String string_0176 = "TestString_0176"; public static final String string_0177 = "TestString_0177"; public static final String string_0178 = "TestString_0178"; public static final String string_0179 = "TestString_0179"; public static final String string_0180 = "TestString_0180"; public static final String string_0181 = "TestString_0181"; public static final String string_0182 = "TestString_0182"; public static final String string_0183 = "TestString_0183"; public static final String string_0184 = "TestString_0184"; public static final String string_0185 = "TestString_0185"; public static final String string_0186 = "TestString_0186"; public static final String string_0187 = "TestString_0187"; public static final String string_0188 = "TestString_0188"; public static final String string_0189 = "TestString_0189"; public static final String string_0190 = "TestString_0190"; public static final String string_0191 = "TestString_0191"; public static final String string_0192 = "TestString_0192"; public static final String string_0193 = "TestString_0193"; public static final String string_0194 = "TestString_0194"; public static final String string_0195 = "TestString_0195"; public static final String string_0196 = "TestString_0196"; public static final String string_0197 = "TestString_0197"; public static final String string_0198 = "TestString_0198"; public static final String string_0199 = "TestString_0199"; public static final String string_0200 = "TestString_0200"; public static final String string_0201 = "TestString_0201"; public static final String string_0202 = "TestString_0202"; public static final String string_0203 = "TestString_0203"; public static final String string_0204 = "TestString_0204"; public static final String string_0205 = "TestString_0205"; public static final String string_0206 = "TestString_0206"; public static final String string_0207 = "TestString_0207"; public static final String string_0208 = "TestString_0208"; public static final String string_0209 = "TestString_0209"; public static final String string_0210 = "TestString_0210"; public static final String string_0211 = "TestString_0211"; public static final String string_0212 = "TestString_0212"; public static final String string_0213 = "TestString_0213"; public static final String string_0214 = "TestString_0214"; public static final String string_0215 = "TestString_0215"; public static final String string_0216 = "TestString_0216"; public static final String string_0217 = "TestString_0217"; public static final String string_0218 = "TestString_0218"; public static final String string_0219 = "TestString_0219"; public static final String string_0220 = "TestString_0220"; public static final String string_0221 = "TestString_0221"; public static final String string_0222 = "TestString_0222"; public static final String string_0223 = "TestString_0223"; public static final String string_0224 = "TestString_0224"; public static final String string_0225 = "TestString_0225"; public static final String string_0226 = "TestString_0226"; public static final String string_0227 = "TestString_0227"; public static final String string_0228 = "TestString_0228"; public static final String string_0229 = "TestString_0229"; public static final String string_0230 = "TestString_0230"; public static final String string_0231 = "TestString_0231"; public static final String string_0232 = "TestString_0232"; public static final String string_0233 = "TestString_0233"; public static final String string_0234 = "TestString_0234"; public static final String string_0235 = "TestString_0235"; public static final String string_0236 = "TestString_0236"; public static final String string_0237 = "TestString_0237"; public static final String string_0238 = "TestString_0238"; public static final String string_0239 = "TestString_0239"; public static final String string_0240 = "TestString_0240"; public static final String string_0241 = "TestString_0241"; public static final String string_0242 = "TestString_0242"; public static final String string_0243 = "TestString_0243"; public static final String string_0244 = "TestString_0244"; public static final String string_0245 = "TestString_0245"; public static final String string_0246 = "TestString_0246"; public static final String string_0247 = "TestString_0247"; public static final String string_0248 = "TestString_0248"; public static final String string_0249 = "TestString_0249"; public static final String string_0250 = "TestString_0250"; public static final String string_0251 = "TestString_0251"; public static final String string_0252 = "TestString_0252"; public static final String string_0253 = "TestString_0253"; public static final String string_0254 = "TestString_0254"; public static final String string_0255 = "TestString_0255"; public static final String string_0256 = "TestString_0256"; public static final String string_0257 = "TestString_0257"; public static final String string_0258 = "TestString_0258"; public static final String string_0259 = "TestString_0259"; public static final String string_0260 = "TestString_0260"; public static final String string_0261 = "TestString_0261"; public static final String string_0262 = "TestString_0262"; public static final String string_0263 = "TestString_0263"; public static final String string_0264 = "TestString_0264"; public static final String string_0265 = "TestString_0265"; public static final String string_0266 = "TestString_0266"; public static final String string_0267 = "TestString_0267"; public static final String string_0268 = "TestString_0268"; public static final String string_0269 = "TestString_0269"; public static final String string_0270 = "TestString_0270"; public static final String string_0271 = "TestString_0271"; public static final String string_0272 = "TestString_0272"; public static final String string_0273 = "TestString_0273"; public static final String string_0274 = "TestString_0274"; public static final String string_0275 = "TestString_0275"; public static final String string_0276 = "TestString_0276"; public static final String string_0277 = "TestString_0277"; public static final String string_0278 = "TestString_0278"; public static final String string_0279 = "TestString_0279"; public static final String string_0280 = "TestString_0280"; public static final String string_0281 = "TestString_0281"; public static final String string_0282 = "TestString_0282"; public static final String string_0283 = "TestString_0283"; public static final String string_0284 = "TestString_0284"; public static final String string_0285 = "TestString_0285"; public static final String string_0286 = "TestString_0286"; public static final String string_0287 = "TestString_0287"; public static final String string_0288 = "TestString_0288"; public static final String string_0289 = "TestString_0289"; public static final String string_0290 = "TestString_0290"; public static final String string_0291 = "TestString_0291"; public static final String string_0292 = "TestString_0292"; public static final String string_0293 = "TestString_0293"; public static final String string_0294 = "TestString_0294"; public static final String string_0295 = "TestString_0295"; public static final String string_0296 = "TestString_0296"; public static final String string_0297 = "TestString_0297"; public static final String string_0298 = "TestString_0298"; public static final String string_0299 = "TestString_0299"; public static final String string_0300 = "TestString_0300"; public static final String string_0301 = "TestString_0301"; public static final String string_0302 = "TestString_0302"; public static final String string_0303 = "TestString_0303"; public static final String string_0304 = "TestString_0304"; public static final String string_0305 = "TestString_0305"; public static final String string_0306 = "TestString_0306"; public static final String string_0307 = "TestString_0307"; public static final String string_0308 = "TestString_0308"; public static final String string_0309 = "TestString_0309"; public static final String string_0310 = "TestString_0310"; public static final String string_0311 = "TestString_0311"; public static final String string_0312 = "TestString_0312"; public static final String string_0313 = "TestString_0313"; public static final String string_0314 = "TestString_0314"; public static final String string_0315 = "TestString_0315"; public static final String string_0316 = "TestString_0316"; public static final String string_0317 = "TestString_0317"; public static final String string_0318 = "TestString_0318"; public static final String string_0319 = "TestString_0319"; public static final String string_0320 = "TestString_0320"; public static final String string_0321 = "TestString_0321"; public static final String string_0322 = "TestString_0322"; public static final String string_0323 = "TestString_0323"; public static final String string_0324 = "TestString_0324"; public static final String string_0325 = "TestString_0325"; public static final String string_0326 = "TestString_0326"; public static final String string_0327 = "TestString_0327"; public static final String string_0328 = "TestString_0328"; public static final String string_0329 = "TestString_0329"; public static final String string_0330 = "TestString_0330"; public static final String string_0331 = "TestString_0331"; public static final String string_0332 = "TestString_0332"; public static final String string_0333 = "TestString_0333"; public static final String string_0334 = "TestString_0334"; public static final String string_0335 = "TestString_0335"; public static final String string_0336 = "TestString_0336"; public static final String string_0337 = "TestString_0337"; public static final String string_0338 = "TestString_0338"; public static final String string_0339 = "TestString_0339"; public static final String string_0340 = "TestString_0340"; public static final String string_0341 = "TestString_0341"; public static final String string_0342 = "TestString_0342"; public static final String string_0343 = "TestString_0343"; public static final String string_0344 = "TestString_0344"; public static final String string_0345 = "TestString_0345"; public static final String string_0346 = "TestString_0346"; public static final String string_0347 = "TestString_0347"; public static final String string_0348 = "TestString_0348"; public static final String string_0349 = "TestString_0349"; public static final String string_0350 = "TestString_0350"; public static final String string_0351 = "TestString_0351"; public static final String string_0352 = "TestString_0352"; public static final String string_0353 = "TestString_0353"; public static final String string_0354 = "TestString_0354"; public static final String string_0355 = "TestString_0355"; public static final String string_0356 = "TestString_0356"; public static final String string_0357 = "TestString_0357"; public static final String string_0358 = "TestString_0358"; public static final String string_0359 = "TestString_0359"; public static final String string_0360 = "TestString_0360"; public static final String string_0361 = "TestString_0361"; public static final String string_0362 = "TestString_0362"; public static final String string_0363 = "TestString_0363"; public static final String string_0364 = "TestString_0364"; public static final String string_0365 = "TestString_0365"; public static final String string_0366 = "TestString_0366"; public static final String string_0367 = "TestString_0367"; public static final String string_0368 = "TestString_0368"; public static final String string_0369 = "TestString_0369"; public static final String string_0370 = "TestString_0370"; public static final String string_0371 = "TestString_0371"; public static final String string_0372 = "TestString_0372"; public static final String string_0373 = "TestString_0373"; public static final String string_0374 = "TestString_0374"; public static final String string_0375 = "TestString_0375"; public static final String string_0376 = "TestString_0376"; public static final String string_0377 = "TestString_0377"; public static final String string_0378 = "TestString_0378"; public static final String string_0379 = "TestString_0379"; public static final String string_0380 = "TestString_0380"; public static final String string_0381 = "TestString_0381"; public static final String string_0382 = "TestString_0382"; public static final String string_0383 = "TestString_0383"; public static final String string_0384 = "TestString_0384"; public static final String string_0385 = "TestString_0385"; public static final String string_0386 = "TestString_0386"; public static final String string_0387 = "TestString_0387"; public static final String string_0388 = "TestString_0388"; public static final String string_0389 = "TestString_0389"; public static final String string_0390 = "TestString_0390"; public static final String string_0391 = "TestString_0391"; public static final String string_0392 = "TestString_0392"; public static final String string_0393 = "TestString_0393"; public static final String string_0394 = "TestString_0394"; public static final String string_0395 = "TestString_0395"; public static final String string_0396 = "TestString_0396"; public static final String string_0397 = "TestString_0397"; public static final String string_0398 = "TestString_0398"; public static final String string_0399 = "TestString_0399"; public static final String string_0400 = "TestString_0400"; public static final String string_0401 = "TestString_0401"; public static final String string_0402 = "TestString_0402"; public static final String string_0403 = "TestString_0403"; public static final String string_0404 = "TestString_0404"; public static final String string_0405 = "TestString_0405"; public static final String string_0406 = "TestString_0406"; public static final String string_0407 = "TestString_0407"; public static final String string_0408 = "TestString_0408"; public static final String string_0409 = "TestString_0409"; public static final String string_0410 = "TestString_0410"; public static final String string_0411 = "TestString_0411"; public static final String string_0412 = "TestString_0412"; public static final String string_0413 = "TestString_0413"; public static final String string_0414 = "TestString_0414"; public static final String string_0415 = "TestString_0415"; public static final String string_0416 = "TestString_0416"; public static final String string_0417 = "TestString_0417"; public static final String string_0418 = "TestString_0418"; public static final String string_0419 = "TestString_0419"; public static final String string_0420 = "TestString_0420"; public static final String string_0421 = "TestString_0421"; public static final String string_0422 = "TestString_0422"; public static final String string_0423 = "TestString_0423"; public static final String string_0424 = "TestString_0424"; public static final String string_0425 = "TestString_0425"; public static final String string_0426 = "TestString_0426"; public static final String string_0427 = "TestString_0427"; public static final String string_0428 = "TestString_0428"; public static final String string_0429 = "TestString_0429"; public static final String string_0430 = "TestString_0430"; public static final String string_0431 = "TestString_0431"; public static final String string_0432 = "TestString_0432"; public static final String string_0433 = "TestString_0433"; public static final String string_0434 = "TestString_0434"; public static final String string_0435 = "TestString_0435"; public static final String string_0436 = "TestString_0436"; public static final String string_0437 = "TestString_0437"; public static final String string_0438 = "TestString_0438"; public static final String string_0439 = "TestString_0439"; public static final String string_0440 = "TestString_0440"; public static final String string_0441 = "TestString_0441"; public static final String string_0442 = "TestString_0442"; public static final String string_0443 = "TestString_0443"; public static final String string_0444 = "TestString_0444"; public static final String string_0445 = "TestString_0445"; public static final String string_0446 = "TestString_0446"; public static final String string_0447 = "TestString_0447"; public static final String string_0448 = "TestString_0448"; public static final String string_0449 = "TestString_0449"; public static final String string_0450 = "TestString_0450"; public static final String string_0451 = "TestString_0451"; public static final String string_0452 = "TestString_0452"; public static final String string_0453 = "TestString_0453"; public static final String string_0454 = "TestString_0454"; public static final String string_0455 = "TestString_0455"; public static final String string_0456 = "TestString_0456"; public static final String string_0457 = "TestString_0457"; public static final String string_0458 = "TestString_0458"; public static final String string_0459 = "TestString_0459"; public static final String string_0460 = "TestString_0460"; public static final String string_0461 = "TestString_0461"; public static final String string_0462 = "TestString_0462"; public static final String string_0463 = "TestString_0463"; public static final String string_0464 = "TestString_0464"; public static final String string_0465 = "TestString_0465"; public static final String string_0466 = "TestString_0466"; public static final String string_0467 = "TestString_0467"; public static final String string_0468 = "TestString_0468"; public static final String string_0469 = "TestString_0469"; public static final String string_0470 = "TestString_0470"; public static final String string_0471 = "TestString_0471"; public static final String string_0472 = "TestString_0472"; public static final String string_0473 = "TestString_0473"; public static final String string_0474 = "TestString_0474"; public static final String string_0475 = "TestString_0475"; public static final String string_0476 = "TestString_0476"; public static final String string_0477 = "TestString_0477"; public static final String string_0478 = "TestString_0478"; public static final String string_0479 = "TestString_0479"; public static final String string_0480 = "TestString_0480"; public static final String string_0481 = "TestString_0481"; public static final String string_0482 = "TestString_0482"; public static final String string_0483 = "TestString_0483"; public static final String string_0484 = "TestString_0484"; public static final String string_0485 = "TestString_0485"; public static final String string_0486 = "TestString_0486"; public static final String string_0487 = "TestString_0487"; public static final String string_0488 = "TestString_0488"; public static final String string_0489 = "TestString_0489"; public static final String string_0490 = "TestString_0490"; public static final String string_0491 = "TestString_0491"; public static final String string_0492 = "TestString_0492"; public static final String string_0493 = "TestString_0493"; public static final String string_0494 = "TestString_0494"; public static final String string_0495 = "TestString_0495"; public static final String string_0496 = "TestString_0496"; public static final String string_0497 = "TestString_0497"; public static final String string_0498 = "TestString_0498"; public static final String string_0499 = "TestString_0499"; public static final String string_0500 = "TestString_0500"; public static final String string_0501 = "TestString_0501"; public static final String string_0502 = "TestString_0502"; public static final String string_0503 = "TestString_0503"; public static final String string_0504 = "TestString_0504"; public static final String string_0505 = "TestString_0505"; public static final String string_0506 = "TestString_0506"; public static final String string_0507 = "TestString_0507"; public static final String string_0508 = "TestString_0508"; public static final String string_0509 = "TestString_0509"; public static final String string_0510 = "TestString_0510"; public static final String string_0511 = "TestString_0511"; public static final String string_0512 = "TestString_0512"; public static final String string_0513 = "TestString_0513"; public static final String string_0514 = "TestString_0514"; public static final String string_0515 = "TestString_0515"; public static final String string_0516 = "TestString_0516"; public static final String string_0517 = "TestString_0517"; public static final String string_0518 = "TestString_0518"; public static final String string_0519 = "TestString_0519"; public static final String string_0520 = "TestString_0520"; public static final String string_0521 = "TestString_0521"; public static final String string_0522 = "TestString_0522"; public static final String string_0523 = "TestString_0523"; public static final String string_0524 = "TestString_0524"; public static final String string_0525 = "TestString_0525"; public static final String string_0526 = "TestString_0526"; public static final String string_0527 = "TestString_0527"; public static final String string_0528 = "TestString_0528"; public static final String string_0529 = "TestString_0529"; public static final String string_0530 = "TestString_0530"; public static final String string_0531 = "TestString_0531"; public static final String string_0532 = "TestString_0532"; public static final String string_0533 = "TestString_0533"; public static final String string_0534 = "TestString_0534"; public static final String string_0535 = "TestString_0535"; public static final String string_0536 = "TestString_0536"; public static final String string_0537 = "TestString_0537"; public static final String string_0538 = "TestString_0538"; public static final String string_0539 = "TestString_0539"; public static final String string_0540 = "TestString_0540"; public static final String string_0541 = "TestString_0541"; public static final String string_0542 = "TestString_0542"; public static final String string_0543 = "TestString_0543"; public static final String string_0544 = "TestString_0544"; public static final String string_0545 = "TestString_0545"; public static final String string_0546 = "TestString_0546"; public static final String string_0547 = "TestString_0547"; public static final String string_0548 = "TestString_0548"; public static final String string_0549 = "TestString_0549"; public static final String string_0550 = "TestString_0550"; public static final String string_0551 = "TestString_0551"; public static final String string_0552 = "TestString_0552"; public static final String string_0553 = "TestString_0553"; public static final String string_0554 = "TestString_0554"; public static final String string_0555 = "TestString_0555"; public static final String string_0556 = "TestString_0556"; public static final String string_0557 = "TestString_0557"; public static final String string_0558 = "TestString_0558"; public static final String string_0559 = "TestString_0559"; public static final String string_0560 = "TestString_0560"; public static final String string_0561 = "TestString_0561"; public static final String string_0562 = "TestString_0562"; public static final String string_0563 = "TestString_0563"; public static final String string_0564 = "TestString_0564"; public static final String string_0565 = "TestString_0565"; public static final String string_0566 = "TestString_0566"; public static final String string_0567 = "TestString_0567"; public static final String string_0568 = "TestString_0568"; public static final String string_0569 = "TestString_0569"; public static final String string_0570 = "TestString_0570"; public static final String string_0571 = "TestString_0571"; public static final String string_0572 = "TestString_0572"; public static final String string_0573 = "TestString_0573"; public static final String string_0574 = "TestString_0574"; public static final String string_0575 = "TestString_0575"; public static final String string_0576 = "TestString_0576"; public static final String string_0577 = "TestString_0577"; public static final String string_0578 = "TestString_0578"; public static final String string_0579 = "TestString_0579"; public static final String string_0580 = "TestString_0580"; public static final String string_0581 = "TestString_0581"; public static final String string_0582 = "TestString_0582"; public static final String string_0583 = "TestString_0583"; public static final String string_0584 = "TestString_0584"; public static final String string_0585 = "TestString_0585"; public static final String string_0586 = "TestString_0586"; public static final String string_0587 = "TestString_0587"; public static final String string_0588 = "TestString_0588"; public static final String string_0589 = "TestString_0589"; public static final String string_0590 = "TestString_0590"; public static final String string_0591 = "TestString_0591"; public static final String string_0592 = "TestString_0592"; public static final String string_0593 = "TestString_0593"; public static final String string_0594 = "TestString_0594"; public static final String string_0595 = "TestString_0595"; public static final String string_0596 = "TestString_0596"; public static final String string_0597 = "TestString_0597"; public static final String string_0598 = "TestString_0598"; public static final String string_0599 = "TestString_0599"; public static final String string_0600 = "TestString_0600"; public static final String string_0601 = "TestString_0601"; public static final String string_0602 = "TestString_0602"; public static final String string_0603 = "TestString_0603"; public static final String string_0604 = "TestString_0604"; public static final String string_0605 = "TestString_0605"; public static final String string_0606 = "TestString_0606"; public static final String string_0607 = "TestString_0607"; public static final String string_0608 = "TestString_0608"; public static final String string_0609 = "TestString_0609"; public static final String string_0610 = "TestString_0610"; public static final String string_0611 = "TestString_0611"; public static final String string_0612 = "TestString_0612"; public static final String string_0613 = "TestString_0613"; public static final String string_0614 = "TestString_0614"; public static final String string_0615 = "TestString_0615"; public static final String string_0616 = "TestString_0616"; public static final String string_0617 = "TestString_0617"; public static final String string_0618 = "TestString_0618"; public static final String string_0619 = "TestString_0619"; public static final String string_0620 = "TestString_0620"; public static final String string_0621 = "TestString_0621"; public static final String string_0622 = "TestString_0622"; public static final String string_0623 = "TestString_0623"; public static final String string_0624 = "TestString_0624"; public static final String string_0625 = "TestString_0625"; public static final String string_0626 = "TestString_0626"; public static final String string_0627 = "TestString_0627"; public static final String string_0628 = "TestString_0628"; public static final String string_0629 = "TestString_0629"; public static final String string_0630 = "TestString_0630"; public static final String string_0631 = "TestString_0631"; public static final String string_0632 = "TestString_0632"; public static final String string_0633 = "TestString_0633"; public static final String string_0634 = "TestString_0634"; public static final String string_0635 = "TestString_0635"; public static final String string_0636 = "TestString_0636"; public static final String string_0637 = "TestString_0637"; public static final String string_0638 = "TestString_0638"; public static final String string_0639 = "TestString_0639"; public static final String string_0640 = "TestString_0640"; public static final String string_0641 = "TestString_0641"; public static final String string_0642 = "TestString_0642"; public static final String string_0643 = "TestString_0643"; public static final String string_0644 = "TestString_0644"; public static final String string_0645 = "TestString_0645"; public static final String string_0646 = "TestString_0646"; public static final String string_0647 = "TestString_0647"; public static final String string_0648 = "TestString_0648"; public static final String string_0649 = "TestString_0649"; public static final String string_0650 = "TestString_0650"; public static final String string_0651 = "TestString_0651"; public static final String string_0652 = "TestString_0652"; public static final String string_0653 = "TestString_0653"; public static final String string_0654 = "TestString_0654"; public static final String string_0655 = "TestString_0655"; public static final String string_0656 = "TestString_0656"; public static final String string_0657 = "TestString_0657"; public static final String string_0658 = "TestString_0658"; public static final String string_0659 = "TestString_0659"; public static final String string_0660 = "TestString_0660"; public static final String string_0661 = "TestString_0661"; public static final String string_0662 = "TestString_0662"; public static final String string_0663 = "TestString_0663"; public static final String string_0664 = "TestString_0664"; public static final String string_0665 = "TestString_0665"; public static final String string_0666 = "TestString_0666"; public static final String string_0667 = "TestString_0667"; public static final String string_0668 = "TestString_0668"; public static final String string_0669 = "TestString_0669"; public static final String string_0670 = "TestString_0670"; public static final String string_0671 = "TestString_0671"; public static final String string_0672 = "TestString_0672"; public static final String string_0673 = "TestString_0673"; public static final String string_0674 = "TestString_0674"; public static final String string_0675 = "TestString_0675"; public static final String string_0676 = "TestString_0676"; public static final String string_0677 = "TestString_0677"; public static final String string_0678 = "TestString_0678"; public static final String string_0679 = "TestString_0679"; public static final String string_0680 = "TestString_0680"; public static final String string_0681 = "TestString_0681"; public static final String string_0682 = "TestString_0682"; public static final String string_0683 = "TestString_0683"; public static final String string_0684 = "TestString_0684"; public static final String string_0685 = "TestString_0685"; public static final String string_0686 = "TestString_0686"; public static final String string_0687 = "TestString_0687"; public static final String string_0688 = "TestString_0688"; public static final String string_0689 = "TestString_0689"; public static final String string_0690 = "TestString_0690"; public static final String string_0691 = "TestString_0691"; public static final String string_0692 = "TestString_0692"; public static final String string_0693 = "TestString_0693"; public static final String string_0694 = "TestString_0694"; public static final String string_0695 = "TestString_0695"; public static final String string_0696 = "TestString_0696"; public static final String string_0697 = "TestString_0697"; public static final String string_0698 = "TestString_0698"; public static final String string_0699 = "TestString_0699"; public static final String string_0700 = "TestString_0700"; public static final String string_0701 = "TestString_0701"; public static final String string_0702 = "TestString_0702"; public static final String string_0703 = "TestString_0703"; public static final String string_0704 = "TestString_0704"; public static final String string_0705 = "TestString_0705"; public static final String string_0706 = "TestString_0706"; public static final String string_0707 = "TestString_0707"; public static final String string_0708 = "TestString_0708"; public static final String string_0709 = "TestString_0709"; public static final String string_0710 = "TestString_0710"; public static final String string_0711 = "TestString_0711"; public static final String string_0712 = "TestString_0712"; public static final String string_0713 = "TestString_0713"; public static final String string_0714 = "TestString_0714"; public static final String string_0715 = "TestString_0715"; public static final String string_0716 = "TestString_0716"; public static final String string_0717 = "TestString_0717"; public static final String string_0718 = "TestString_0718"; public static final String string_0719 = "TestString_0719"; public static final String string_0720 = "TestString_0720"; public static final String string_0721 = "TestString_0721"; public static final String string_0722 = "TestString_0722"; public static final String string_0723 = "TestString_0723"; public static final String string_0724 = "TestString_0724"; public static final String string_0725 = "TestString_0725"; public static final String string_0726 = "TestString_0726"; public static final String string_0727 = "TestString_0727"; public static final String string_0728 = "TestString_0728"; public static final String string_0729 = "TestString_0729"; public static final String string_0730 = "TestString_0730"; public static final String string_0731 = "TestString_0731"; public static final String string_0732 = "TestString_0732"; public static final String string_0733 = "TestString_0733"; public static final String string_0734 = "TestString_0734"; public static final String string_0735 = "TestString_0735"; public static final String string_0736 = "TestString_0736"; public static final String string_0737 = "TestString_0737"; public static final String string_0738 = "TestString_0738"; public static final String string_0739 = "TestString_0739"; public static final String string_0740 = "TestString_0740"; public static final String string_0741 = "TestString_0741"; public static final String string_0742 = "TestString_0742"; public static final String string_0743 = "TestString_0743"; public static final String string_0744 = "TestString_0744"; public static final String string_0745 = "TestString_0745"; public static final String string_0746 = "TestString_0746"; public static final String string_0747 = "TestString_0747"; public static final String string_0748 = "TestString_0748"; public static final String string_0749 = "TestString_0749"; public static final String string_0750 = "TestString_0750"; public static final String string_0751 = "TestString_0751"; public static final String string_0752 = "TestString_0752"; public static final String string_0753 = "TestString_0753"; public static final String string_0754 = "TestString_0754"; public static final String string_0755 = "TestString_0755"; public static final String string_0756 = "TestString_0756"; public static final String string_0757 = "TestString_0757"; public static final String string_0758 = "TestString_0758"; public static final String string_0759 = "TestString_0759"; public static final String string_0760 = "TestString_0760"; public static final String string_0761 = "TestString_0761"; public static final String string_0762 = "TestString_0762"; public static final String string_0763 = "TestString_0763"; public static final String string_0764 = "TestString_0764"; public static final String string_0765 = "TestString_0765"; public static final String string_0766 = "TestString_0766"; public static final String string_0767 = "TestString_0767"; public static final String string_0768 = "TestString_0768"; public static final String string_0769 = "TestString_0769"; public static final String string_0770 = "TestString_0770"; public static final String string_0771 = "TestString_0771"; public static final String string_0772 = "TestString_0772"; public static final String string_0773 = "TestString_0773"; public static final String string_0774 = "TestString_0774"; public static final String string_0775 = "TestString_0775"; public static final String string_0776 = "TestString_0776"; public static final String string_0777 = "TestString_0777"; public static final String string_0778 = "TestString_0778"; public static final String string_0779 = "TestString_0779"; public static final String string_0780 = "TestString_0780"; public static final String string_0781 = "TestString_0781"; public static final String string_0782 = "TestString_0782"; public static final String string_0783 = "TestString_0783"; public static final String string_0784 = "TestString_0784"; public static final String string_0785 = "TestString_0785"; public static final String string_0786 = "TestString_0786"; public static final String string_0787 = "TestString_0787"; public static final String string_0788 = "TestString_0788"; public static final String string_0789 = "TestString_0789"; public static final String string_0790 = "TestString_0790"; public static final String string_0791 = "TestString_0791"; public static final String string_0792 = "TestString_0792"; public static final String string_0793 = "TestString_0793"; public static final String string_0794 = "TestString_0794"; public static final String string_0795 = "TestString_0795"; public static final String string_0796 = "TestString_0796"; public static final String string_0797 = "TestString_0797"; public static final String string_0798 = "TestString_0798"; public static final String string_0799 = "TestString_0799"; public static final String string_0800 = "TestString_0800"; public static final String string_0801 = "TestString_0801"; public static final String string_0802 = "TestString_0802"; public static final String string_0803 = "TestString_0803"; public static final String string_0804 = "TestString_0804"; public static final String string_0805 = "TestString_0805"; public static final String string_0806 = "TestString_0806"; public static final String string_0807 = "TestString_0807"; public static final String string_0808 = "TestString_0808"; public static final String string_0809 = "TestString_0809"; public static final String string_0810 = "TestString_0810"; public static final String string_0811 = "TestString_0811"; public static final String string_0812 = "TestString_0812"; public static final String string_0813 = "TestString_0813"; public static final String string_0814 = "TestString_0814"; public static final String string_0815 = "TestString_0815"; public static final String string_0816 = "TestString_0816"; public static final String string_0817 = "TestString_0817"; public static final String string_0818 = "TestString_0818"; public static final String string_0819 = "TestString_0819"; public static final String string_0820 = "TestString_0820"; public static final String string_0821 = "TestString_0821"; public static final String string_0822 = "TestString_0822"; public static final String string_0823 = "TestString_0823"; public static final String string_0824 = "TestString_0824"; public static final String string_0825 = "TestString_0825"; public static final String string_0826 = "TestString_0826"; public static final String string_0827 = "TestString_0827"; public static final String string_0828 = "TestString_0828"; public static final String string_0829 = "TestString_0829"; public static final String string_0830 = "TestString_0830"; public static final String string_0831 = "TestString_0831"; public static final String string_0832 = "TestString_0832"; public static final String string_0833 = "TestString_0833"; public static final String string_0834 = "TestString_0834"; public static final String string_0835 = "TestString_0835"; public static final String string_0836 = "TestString_0836"; public static final String string_0837 = "TestString_0837"; public static final String string_0838 = "TestString_0838"; public static final String string_0839 = "TestString_0839"; public static final String string_0840 = "TestString_0840"; public static final String string_0841 = "TestString_0841"; public static final String string_0842 = "TestString_0842"; public static final String string_0843 = "TestString_0843"; public static final String string_0844 = "TestString_0844"; public static final String string_0845 = "TestString_0845"; public static final String string_0846 = "TestString_0846"; public static final String string_0847 = "TestString_0847"; public static final String string_0848 = "TestString_0848"; public static final String string_0849 = "TestString_0849"; public static final String string_0850 = "TestString_0850"; public static final String string_0851 = "TestString_0851"; public static final String string_0852 = "TestString_0852"; public static final String string_0853 = "TestString_0853"; public static final String string_0854 = "TestString_0854"; public static final String string_0855 = "TestString_0855"; public static final String string_0856 = "TestString_0856"; public static final String string_0857 = "TestString_0857"; public static final String string_0858 = "TestString_0858"; public static final String string_0859 = "TestString_0859"; public static final String string_0860 = "TestString_0860"; public static final String string_0861 = "TestString_0861"; public static final String string_0862 = "TestString_0862"; public static final String string_0863 = "TestString_0863"; public static final String string_0864 = "TestString_0864"; public static final String string_0865 = "TestString_0865"; public static final String string_0866 = "TestString_0866"; public static final String string_0867 = "TestString_0867"; public static final String string_0868 = "TestString_0868"; public static final String string_0869 = "TestString_0869"; public static final String string_0870 = "TestString_0870"; public static final String string_0871 = "TestString_0871"; public static final String string_0872 = "TestString_0872"; public static final String string_0873 = "TestString_0873"; public static final String string_0874 = "TestString_0874"; public static final String string_0875 = "TestString_0875"; public static final String string_0876 = "TestString_0876"; public static final String string_0877 = "TestString_0877"; public static final String string_0878 = "TestString_0878"; public static final String string_0879 = "TestString_0879"; public static final String string_0880 = "TestString_0880"; public static final String string_0881 = "TestString_0881"; public static final String string_0882 = "TestString_0882"; public static final String string_0883 = "TestString_0883"; public static final String string_0884 = "TestString_0884"; public static final String string_0885 = "TestString_0885"; public static final String string_0886 = "TestString_0886"; public static final String string_0887 = "TestString_0887"; public static final String string_0888 = "TestString_0888"; public static final String string_0889 = "TestString_0889"; public static final String string_0890 = "TestString_0890"; public static final String string_0891 = "TestString_0891"; public static final String string_0892 = "TestString_0892"; public static final String string_0893 = "TestString_0893"; public static final String string_0894 = "TestString_0894"; public static final String string_0895 = "TestString_0895"; public static final String string_0896 = "TestString_0896"; public static final String string_0897 = "TestString_0897"; public static final String string_0898 = "TestString_0898"; public static final String string_0899 = "TestString_0899"; public static final String string_0900 = "TestString_0900"; public static final String string_0901 = "TestString_0901"; public static final String string_0902 = "TestString_0902"; public static final String string_0903 = "TestString_0903"; public static final String string_0904 = "TestString_0904"; public static final String string_0905 = "TestString_0905"; public static final String string_0906 = "TestString_0906"; public static final String string_0907 = "TestString_0907"; public static final String string_0908 = "TestString_0908"; public static final String string_0909 = "TestString_0909"; public static final String string_0910 = "TestString_0910"; public static final String string_0911 = "TestString_0911"; public static final String string_0912 = "TestString_0912"; public static final String string_0913 = "TestString_0913"; public static final String string_0914 = "TestString_0914"; public static final String string_0915 = "TestString_0915"; public static final String string_0916 = "TestString_0916"; public static final String string_0917 = "TestString_0917"; public static final String string_0918 = "TestString_0918"; public static final String string_0919 = "TestString_0919"; public static final String string_0920 = "TestString_0920"; public static final String string_0921 = "TestString_0921"; public static final String string_0922 = "TestString_0922"; public static final String string_0923 = "TestString_0923"; public static final String string_0924 = "TestString_0924"; public static final String string_0925 = "TestString_0925"; public static final String string_0926 = "TestString_0926"; public static final String string_0927 = "TestString_0927"; public static final String string_0928 = "TestString_0928"; public static final String string_0929 = "TestString_0929"; public static final String string_0930 = "TestString_0930"; public static final String string_0931 = "TestString_0931"; public static final String string_0932 = "TestString_0932"; public static final String string_0933 = "TestString_0933"; public static final String string_0934 = "TestString_0934"; public static final String string_0935 = "TestString_0935"; public static final String string_0936 = "TestString_0936"; public static final String string_0937 = "TestString_0937"; public static final String string_0938 = "TestString_0938"; public static final String string_0939 = "TestString_0939"; public static final String string_0940 = "TestString_0940"; public static final String string_0941 = "TestString_0941"; public static final String string_0942 = "TestString_0942"; public static final String string_0943 = "TestString_0943"; public static final String string_0944 = "TestString_0944"; public static final String string_0945 = "TestString_0945"; public static final String string_0946 = "TestString_0946"; public static final String string_0947 = "TestString_0947"; public static final String string_0948 = "TestString_0948"; public static final String string_0949 = "TestString_0949"; public static final String string_0950 = "TestString_0950"; public static final String string_0951 = "TestString_0951"; public static final String string_0952 = "TestString_0952"; public static final String string_0953 = "TestString_0953"; public static final String string_0954 = "TestString_0954"; public static final String string_0955 = "TestString_0955"; public static final String string_0956 = "TestString_0956"; public static final String string_0957 = "TestString_0957"; public static final String string_0958 = "TestString_0958"; public static final String string_0959 = "TestString_0959"; public static final String string_0960 = "TestString_0960"; public static final String string_0961 = "TestString_0961"; public static final String string_0962 = "TestString_0962"; public static final String string_0963 = "TestString_0963"; public static final String string_0964 = "TestString_0964"; public static final String string_0965 = "TestString_0965"; public static final String string_0966 = "TestString_0966"; public static final String string_0967 = "TestString_0967"; public static final String string_0968 = "TestString_0968"; public static final String string_0969 = "TestString_0969"; public static final String string_0970 = "TestString_0970"; public static final String string_0971 = "TestString_0971"; public static final String string_0972 = "TestString_0972"; public static final String string_0973 = "TestString_0973"; public static final String string_0974 = "TestString_0974"; public static final String string_0975 = "TestString_0975"; public static final String string_0976 = "TestString_0976"; public static final String string_0977 = "TestString_0977"; public static final String string_0978 = "TestString_0978"; public static final String string_0979 = "TestString_0979"; public static final String string_0980 = "TestString_0980"; public static final String string_0981 = "TestString_0981"; public static final String string_0982 = "TestString_0982"; public static final String string_0983 = "TestString_0983"; public static final String string_0984 = "TestString_0984"; public static final String string_0985 = "TestString_0985"; public static final String string_0986 = "TestString_0986"; public static final String string_0987 = "TestString_0987"; public static final String string_0988 = "TestString_0988"; public static final String string_0989 = "TestString_0989"; public static final String string_0990 = "TestString_0990"; public static final String string_0991 = "TestString_0991"; public static final String string_0992 = "TestString_0992"; public static final String string_0993 = "TestString_0993"; public static final String string_0994 = "TestString_0994"; public static final String string_0995 = "TestString_0995"; public static final String string_0996 = "TestString_0996"; public static final String string_0997 = "TestString_0997"; public static final String string_0998 = "TestString_0998"; public static final String string_0999 = "TestString_0999"; public static final String string_1000 = "TestString_1000"; public static final String string_1001 = "TestString_1001"; public static final String string_1002 = "TestString_1002"; public static final String string_1003 = "TestString_1003"; public static final String string_1004 = "TestString_1004"; public static final String string_1005 = "TestString_1005"; public static final String string_1006 = "TestString_1006"; public static final String string_1007 = "TestString_1007"; public static final String string_1008 = "TestString_1008"; public static final String string_1009 = "TestString_1009"; public static final String string_1010 = "TestString_1010"; public static final String string_1011 = "TestString_1011"; public static final String string_1012 = "TestString_1012"; public static final String string_1013 = "TestString_1013"; public static final String string_1014 = "TestString_1014"; public static final String string_1015 = "TestString_1015"; public static final String string_1016 = "TestString_1016"; public static final String string_1017 = "TestString_1017"; public static final String string_1018 = "TestString_1018"; public static final String string_1019 = "TestString_1019"; public static final String string_1020 = "TestString_1020"; public static final String string_1021 = "TestString_1021"; public static final String string_1022 = "TestString_1022"; public static final String string_1023 = "TestString_1023"; public static final String string_1024 = "TestString_1024"; public void timeConstStringsWithConflict(int count) { for (int i = 0; i < count; ++i) { $noinline$foo("TestString_0000"); $noinline$foo("TestString_1024"); } } public void timeConstStringsWithoutConflict(int count) { for (int i = 0; i < count; ++i) { $noinline$foo("TestString_0001"); $noinline$foo("TestString_1023"); } } static void $noinline$foo(String s) { if (doThrow) { throw new Error(); } } public static boolean doThrow = false; } android-platform-art-8.1.0+r23/benchmark/jni-perf/000077500000000000000000000000001336577252300216315ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/jni-perf/info.txt000066400000000000000000000000661336577252300233270ustar00rootroot00000000000000Tests for measuring performance of JNI state changes. android-platform-art-8.1.0+r23/benchmark/jni-perf/perf_jni.cc000066400000000000000000000022161336577252300237350ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "jni.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" namespace art { namespace { extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfJniEmptyCall(JNIEnv*, jobject) {} extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOACall(JNIEnv* env, jobject) { ScopedObjectAccess soa(env); } extern "C" JNIEXPORT void JNICALL Java_JniPerfBenchmark_perfSOAUncheckedCall(JNIEnv*, jobject) { ScopedObjectAccessUnchecked soa(Thread::Current()); } } // namespace } // namespace art android-platform-art-8.1.0+r23/benchmark/jni-perf/src/000077500000000000000000000000001336577252300224205ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/jni-perf/src/JniPerfBenchmark.java000066400000000000000000000024661336577252300264430ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public class JniPerfBenchmark { private static final String MSG = "ABCDE"; native void perfJniEmptyCall(); native void perfSOACall(); native void perfSOAUncheckedCall(); public void timeFastJNI(int N) { // TODO: This might be an intrinsic. for (long i = 0; i < N; i++) { char c = MSG.charAt(2); } } public void timeEmptyCall(int N) { for (long i = 0; i < N; i++) { perfJniEmptyCall(); } } public void timeSOACall(int N) { for (long i = 0; i < N; i++) { perfSOACall(); } } public void timeSOAUncheckedCall(int N) { for (long i = 0; i < N; i++) { perfSOAUncheckedCall(); } } { System.loadLibrary("artbenchmark"); } } android-platform-art-8.1.0+r23/benchmark/jni_loader.cc000066400000000000000000000020571336577252300225400ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include extern void register_micro_native_methods(JNIEnv* env); jint JNI_OnLoad(JavaVM* vm, void* /*reserved*/) { JNIEnv* env; if (vm->GetEnv(reinterpret_cast(&env), JNI_VERSION_1_6) != JNI_OK) { return -1; } // List of functions to call to register methods explicitly. // Otherwise we use the regular JNI naming conventions to register implicitly. register_micro_native_methods(env); return JNI_VERSION_1_6; } android-platform-art-8.1.0+r23/benchmark/jobject-benchmark/000077500000000000000000000000001336577252300234675ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/jobject-benchmark/info.txt000066400000000000000000000002561336577252300251660ustar00rootroot00000000000000Benchmark for jobject functions Measures performance of: Add/RemoveLocalRef Add/RemoveGlobalRef Add/RemoveWeakGlobalRef Decoding local, weak, global, handle scope jobjects. android-platform-art-8.1.0+r23/benchmark/jobject-benchmark/jobject_benchmark.cc000066400000000000000000000067151336577252300274410ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jni.h" #include "java_vm_ext.h" #include "mirror/class-inl.h" #include "scoped_thread_state_change-inl.h" namespace art { namespace { extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveLocal( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); ObjPtr obj = soa.Decode(jobj); CHECK(obj != nullptr); for (jint i = 0; i < reps; ++i) { jobject ref = soa.Env()->AddLocalReference(obj); soa.Env()->DeleteLocalRef(ref); } } extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeLocal( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); ObjPtr obj = soa.Decode(jobj); CHECK(obj != nullptr); jobject ref = soa.Env()->AddLocalReference(obj); for (jint i = 0; i < reps; ++i) { CHECK_EQ(soa.Decode(ref), obj); } soa.Env()->DeleteLocalRef(ref); } extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveGlobal( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); ObjPtr obj = soa.Decode(jobj); CHECK(obj != nullptr); for (jint i = 0; i < reps; ++i) { jobject ref = soa.Vm()->AddGlobalRef(soa.Self(), obj); soa.Vm()->DeleteGlobalRef(soa.Self(), ref); } } extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeGlobal( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); ObjPtr obj = soa.Decode(jobj); CHECK(obj != nullptr); jobject ref = soa.Vm()->AddGlobalRef(soa.Self(), obj); for (jint i = 0; i < reps; ++i) { CHECK_EQ(soa.Decode(ref), obj); } soa.Vm()->DeleteGlobalRef(soa.Self(), ref); } extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeAddRemoveWeakGlobal( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); ObjPtr obj = soa.Decode(jobj); CHECK(obj != nullptr); for (jint i = 0; i < reps; ++i) { jobject ref = soa.Vm()->AddWeakGlobalRef(soa.Self(), obj); soa.Vm()->DeleteWeakGlobalRef(soa.Self(), ref); } } extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeWeakGlobal( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); ObjPtr obj = soa.Decode(jobj); CHECK(obj != nullptr); jobject ref = soa.Vm()->AddWeakGlobalRef(soa.Self(), obj); for (jint i = 0; i < reps; ++i) { CHECK_EQ(soa.Decode(ref), obj); } soa.Vm()->DeleteWeakGlobalRef(soa.Self(), ref); } extern "C" JNIEXPORT void JNICALL Java_JObjectBenchmark_timeDecodeHandleScopeRef( JNIEnv* env, jobject jobj, jint reps) { ScopedObjectAccess soa(env); for (jint i = 0; i < reps; ++i) { soa.Decode(jobj); } } } // namespace } // namespace art android-platform-art-8.1.0+r23/benchmark/jobject-benchmark/src/000077500000000000000000000000001336577252300242565ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/jobject-benchmark/src/JObjectBenchmark.java000066400000000000000000000025101336577252300302520ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public class JObjectBenchmark { public JObjectBenchmark() { // Make sure to link methods before benchmark starts. System.loadLibrary("artbenchmark"); timeAddRemoveLocal(1); timeDecodeLocal(1); timeAddRemoveGlobal(1); timeDecodeGlobal(1); timeAddRemoveWeakGlobal(1); timeDecodeWeakGlobal(1); timeDecodeHandleScopeRef(1); } public native void timeAddRemoveLocal(int reps); public native void timeDecodeLocal(int reps); public native void timeAddRemoveGlobal(int reps); public native void timeDecodeGlobal(int reps); public native void timeAddRemoveWeakGlobal(int reps); public native void timeDecodeWeakGlobal(int reps); public native void timeDecodeHandleScopeRef(int reps); } android-platform-art-8.1.0+r23/benchmark/micro-native/000077500000000000000000000000001336577252300225145ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/micro-native/micro_native.cc000066400000000000000000000160331336577252300255050ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifndef NATIVE_METHOD #define NATIVE_METHOD(className, functionName, signature) \ { #functionName, signature, reinterpret_cast(className ## _ ## functionName) } #endif #define NELEM(x) (sizeof(x)/sizeof((x)[0])) #define GLUE4(a, b, c, d) a ## b ## c ## d #define GLUE4_(a, b, c, d) GLUE4(a, b, c, d) #define CLASS_NAME "benchmarks/MicroNative/java/NativeMethods" #define CLASS_INFIX benchmarks_MicroNative_java_NativeMethods #define NAME_NORMAL_JNI_METHOD(name) GLUE4_(Java_, CLASS_INFIX, _, name) #define NAME_CRITICAL_JNI_METHOD(name) GLUE4_(JavaCritical_, CLASS_INFIX, _, name) #define DEFINE_NORMAL_JNI_METHOD(ret, name) extern "C" JNIEXPORT ret JNICALL GLUE4_(Java_, CLASS_INFIX, _, name) #define DEFINE_CRITICAL_JNI_METHOD(ret, name) extern "C" JNIEXPORT ret JNICALL GLUE4_(JavaCritical_, CLASS_INFIX, _, name) static void NativeMethods_emptyJniStaticSynchronizedMethod0(JNIEnv*, jclass) { } static void NativeMethods_emptyJniSynchronizedMethod0(JNIEnv*, jclass) { } static JNINativeMethod gMethods_NormalOnly[] = { NATIVE_METHOD(NativeMethods, emptyJniStaticSynchronizedMethod0, "()V"), NATIVE_METHOD(NativeMethods, emptyJniSynchronizedMethod0, "()V"), }; static void NativeMethods_emptyJniMethod0(JNIEnv*, jobject) { } static void NativeMethods_emptyJniMethod6(JNIEnv*, jobject, int, int, int, int, int, int) { } static void NativeMethods_emptyJniMethod6L(JNIEnv*, jobject, jobject, jarray, jarray, jobject, jarray, jarray) { } static void NativeMethods_emptyJniStaticMethod6L(JNIEnv*, jclass, jobject, jarray, jarray, jobject, jarray, jarray) { } static void NativeMethods_emptyJniStaticMethod0(JNIEnv*, jclass) { } static void NativeMethods_emptyJniStaticMethod6(JNIEnv*, jclass, int, int, int, int, int, int) { } static JNINativeMethod gMethods[] = { NATIVE_METHOD(NativeMethods, emptyJniMethod0, "()V"), NATIVE_METHOD(NativeMethods, emptyJniMethod6, "(IIIIII)V"), NATIVE_METHOD(NativeMethods, emptyJniMethod6L, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6L, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), NATIVE_METHOD(NativeMethods, emptyJniStaticMethod0, "()V"), NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6, "(IIIIII)V"), }; static void NativeMethods_emptyJniMethod0_Fast(JNIEnv*, jobject) { } static void NativeMethods_emptyJniMethod6_Fast(JNIEnv*, jobject, int, int, int, int, int, int) { } static void NativeMethods_emptyJniMethod6L_Fast(JNIEnv*, jobject, jobject, jarray, jarray, jobject, jarray, jarray) { } static void NativeMethods_emptyJniStaticMethod6L_Fast(JNIEnv*, jclass, jobject, jarray, jarray, jobject, jarray, jarray) { } static void NativeMethods_emptyJniStaticMethod0_Fast(JNIEnv*, jclass) { } static void NativeMethods_emptyJniStaticMethod6_Fast(JNIEnv*, jclass, int, int, int, int, int, int) { } static JNINativeMethod gMethods_Fast[] = { NATIVE_METHOD(NativeMethods, emptyJniMethod0_Fast, "()V"), NATIVE_METHOD(NativeMethods, emptyJniMethod6_Fast, "(IIIIII)V"), NATIVE_METHOD(NativeMethods, emptyJniMethod6L_Fast, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6L_Fast, "(Ljava/lang/String;[Ljava/lang/String;[[ILjava/lang/Object;[Ljava/lang/Object;[[[[Ljava/lang/Object;)V"), NATIVE_METHOD(NativeMethods, emptyJniStaticMethod0_Fast, "()V"), NATIVE_METHOD(NativeMethods, emptyJniStaticMethod6_Fast, "(IIIIII)V"), }; // Have both a Java_ and a JavaCritical_ version of the same empty method. // The runtime automatically selects the right one when doing a dlsym-based native lookup. DEFINE_NORMAL_JNI_METHOD(void, emptyJniStaticMethod0_1Critical)(JNIEnv*, jclass) { } DEFINE_CRITICAL_JNI_METHOD(void, emptyJniStaticMethod0_1Critical)() { } DEFINE_NORMAL_JNI_METHOD(void, emptyJniStaticMethod6_1Critical)(JNIEnv*, jclass, int, int, int, int, int, int) { } DEFINE_CRITICAL_JNI_METHOD(void, emptyJniStaticMethod6_1Critical)(int, int, int, int, int, int) { } static JNINativeMethod gMethods_Critical[] = { // Don't use NATIVE_METHOD because the name is mangled differently. { "emptyJniStaticMethod0_Critical", "()V", reinterpret_cast(NAME_CRITICAL_JNI_METHOD(emptyJniStaticMethod0_1Critical)) }, { "emptyJniStaticMethod6_Critical", "(IIIIII)V", reinterpret_cast(NAME_CRITICAL_JNI_METHOD(emptyJniStaticMethod6_1Critical)) } }; void jniRegisterNativeMethods(JNIEnv* env, const char* className, const JNINativeMethod* methods, int numMethods) { jclass c = env->FindClass(className); if (c == nullptr) { char* tmp; const char* msg; if (asprintf(&tmp, "Native registration unable to find class '%s'; aborting...", className) == -1) { // Allocation failed, print default warning. msg = "Native registration unable to find class; aborting..."; } else { msg = tmp; } env->FatalError(msg); } if (env->RegisterNatives(c, methods, numMethods) < 0) { char* tmp; const char* msg; if (asprintf(&tmp, "RegisterNatives failed for '%s'; aborting...", className) == -1) { // Allocation failed, print default warning. msg = "RegisterNatives failed; aborting..."; } else { msg = tmp; } env->FatalError(msg); } } void register_micro_native_methods(JNIEnv* env) { jniRegisterNativeMethods(env, CLASS_NAME, gMethods_NormalOnly, NELEM(gMethods_NormalOnly)); jniRegisterNativeMethods(env, CLASS_NAME, gMethods, NELEM(gMethods)); jniRegisterNativeMethods(env, CLASS_NAME, gMethods_Fast, NELEM(gMethods_Fast)); if (env->FindClass("dalvik/annotation/optimization/CriticalNative") != nullptr) { // Only register them explicitly if the annotation is present. jniRegisterNativeMethods(env, CLASS_NAME, gMethods_Critical, NELEM(gMethods_Critical)); } else { if (env->ExceptionCheck()) { // It will throw NoClassDefFoundError env->ExceptionClear(); } } // else let them be registered implicitly. } android-platform-art-8.1.0+r23/benchmark/scoped-primitive-array/000077500000000000000000000000001336577252300245165ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/scoped-primitive-array/info.txt000066400000000000000000000000711336577252300262100ustar00rootroot00000000000000Tests for measuring performance of ScopedPrimitiveArray. android-platform-art-8.1.0+r23/benchmark/scoped-primitive-array/scoped_primitive_array.cc000066400000000000000000000034611336577252300315740ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jni.h" #include "nativehelper/ScopedPrimitiveArray.h" extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureByteArray( JNIEnv* env, jclass, int reps, jbyteArray arr) { jlong ret = 0; for (jint i = 0; i < reps; ++i) { ScopedByteArrayRO sc(env, arr); ret += sc[0] + sc[sc.size() - 1]; } return ret; } extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureShortArray( JNIEnv* env, jclass, int reps, jshortArray arr) { jlong ret = 0; for (jint i = 0; i < reps; ++i) { ScopedShortArrayRO sc(env, arr); ret += sc[0] + sc[sc.size() - 1]; } return ret; } extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureIntArray( JNIEnv* env, jclass, int reps, jintArray arr) { jlong ret = 0; for (jint i = 0; i < reps; ++i) { ScopedIntArrayRO sc(env, arr); ret += sc[0] + sc[sc.size() - 1]; } return ret; } extern "C" JNIEXPORT jlong JNICALL Java_ScopedPrimitiveArrayBenchmark_measureLongArray( JNIEnv* env, jclass, int reps, jlongArray arr) { jlong ret = 0; for (jint i = 0; i < reps; ++i) { ScopedLongArrayRO sc(env, arr); ret += sc[0] + sc[sc.size() - 1]; } return ret; } android-platform-art-8.1.0+r23/benchmark/scoped-primitive-array/src/000077500000000000000000000000001336577252300253055ustar00rootroot00000000000000ScopedPrimitiveArrayBenchmark.java000066400000000000000000000054261336577252300340200ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/scoped-primitive-array/src/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public class ScopedPrimitiveArrayBenchmark { // Measure adds the first and last element of the array by using ScopedPrimitiveArray. static native long measureByteArray(int reps, byte[] arr); static native long measureShortArray(int reps, short[] arr); static native long measureIntArray(int reps, int[] arr); static native long measureLongArray(int reps, long[] arr); static final int smallLength = 16; static final int mediumLength = 256; static final int largeLength = 8096; static byte[] smallBytes = new byte[smallLength]; static byte[] mediumBytes = new byte[mediumLength]; static byte[] largeBytes = new byte[largeLength]; static short[] smallShorts = new short[smallLength]; static short[] mediumShorts = new short[mediumLength]; static short[] largeShorts = new short[largeLength]; static int[] smallInts = new int[smallLength]; static int[] mediumInts = new int[mediumLength]; static int[] largeInts = new int[largeLength]; static long[] smallLongs = new long[smallLength]; static long[] mediumLongs = new long[mediumLength]; static long[] largeLongs = new long[largeLength]; public void timeSmallBytes(int reps) { measureByteArray(reps, smallBytes); } public void timeMediumBytes(int reps) { measureByteArray(reps, mediumBytes); } public void timeLargeBytes(int reps) { measureByteArray(reps, largeBytes); } public void timeSmallShorts(int reps) { measureShortArray(reps, smallShorts); } public void timeMediumShorts(int reps) { measureShortArray(reps, mediumShorts); } public void timeLargeShorts(int reps) { measureShortArray(reps, largeShorts); } public void timeSmallInts(int reps) { measureIntArray(reps, smallInts); } public void timeMediumInts(int reps) { measureIntArray(reps, mediumInts); } public void timeLargeInts(int reps) { measureIntArray(reps, largeInts); } public void timeSmallLongs(int reps) { measureLongArray(reps, smallLongs); } public void timeMediumLongs(int reps) { measureLongArray(reps, mediumLongs); } public void timeLargeLongs(int reps) { measureLongArray(reps, largeLongs); } { System.loadLibrary("artbenchmark"); } } android-platform-art-8.1.0+r23/benchmark/string-indexof/000077500000000000000000000000001336577252300230575ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/string-indexof/info.txt000066400000000000000000000001021336577252300245440ustar00rootroot00000000000000Benchmarks for repeating String.indexOf() instructions in a loop. android-platform-art-8.1.0+r23/benchmark/string-indexof/src/000077500000000000000000000000001336577252300236465ustar00rootroot00000000000000android-platform-art-8.1.0+r23/benchmark/string-indexof/src/StringIndexOfBenchmark.java000066400000000000000000000063141336577252300310530ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ public class StringIndexOfBenchmark { public static final String string36 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; // length = 36 public void timeIndexOf0(int count) { final char c = '0'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf1(int count) { final char c = '1'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf2(int count) { final char c = '2'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf3(int count) { final char c = '3'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf4(int count) { final char c = '4'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf7(int count) { final char c = '7'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf8(int count) { final char c = '8'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOfF(int count) { final char c = 'F'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOfG(int count) { final char c = 'G'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOfV(int count) { final char c = 'V'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOfW(int count) { final char c = 'W'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } public void timeIndexOf_(int count) { final char c = '_'; String s = string36; for (int i = 0; i < count; ++i) { $noinline$indexOf(s, c); } } static int $noinline$indexOf(String s, char c) { if (doThrow) { throw new Error(); } return s.indexOf(c); } public static boolean doThrow = false; } android-platform-art-8.1.0+r23/build/000077500000000000000000000000001336577252300172645ustar00rootroot00000000000000android-platform-art-8.1.0+r23/build/Android.bp000066400000000000000000000123541336577252300211740ustar00rootroot00000000000000bootstrap_go_package { name: "soong-art", pkgPath: "android/soong/art", deps: [ "blueprint", "blueprint-pathtools", "soong", "soong-android", "soong-cc", ], srcs: [ "art.go", "codegen.go", "makevars.go", ], pluginFor: ["soong_build"], } art_global_defaults { // Additional flags are computed by art.go name: "art_defaults", clang: true, cflags: [ // Base set of cflags used by all things ART. "-fno-rtti", "-ggdb3", "-Wall", "-Werror", "-Wextra", "-Wstrict-aliasing", "-fstrict-aliasing", "-Wunreachable-code", "-Wredundant-decls", "-Wshadow", "-Wunused", "-fvisibility=protected", // Warn about thread safety violations with clang. "-Wthread-safety", "-Wthread-safety-negative", // Warn if switch fallthroughs aren't annotated. "-Wimplicit-fallthrough", // Enable float equality warnings. "-Wfloat-equal", // Enable warning of converting ints to void*. "-Wint-to-void-pointer-cast", // Enable warning of wrong unused annotations. "-Wused-but-marked-unused", // Enable warning for deprecated language features. "-Wdeprecated", // Enable warning for unreachable break & return. "-Wunreachable-code-break", "-Wunreachable-code-return", // Enable thread annotations for std::mutex, etc. "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS", ], target: { android: { cflags: [ "-DART_TARGET", // Enable missing-noreturn only on non-Mac. As lots of things are not implemented // for Apple, it's a pain. "-Wmissing-noreturn", // To use oprofile_android --callgraph, uncomment this and recompile with // mmma -j art // "-fno-omit-frame-pointer", // "-marm", // "-mapcs", ], include_dirs: [ // We optimize Thread::Current() with a direct TLS access. This requires access to a // private Bionic header. "bionic/libc/private", ], }, linux: { cflags: [ // Enable missing-noreturn only on non-Mac. As lots of things are not implemented for // Apple, it's a pain. "-Wmissing-noreturn", ], host_ldlibs: [ "-lrt", ], }, host: { cflags: [ // Bug: 15446488. We don't omit the frame pointer to work around // clang/libunwind bugs that cause SEGVs in run-test-004-ThreadStress. "-fno-omit-frame-pointer", ], host_ldlibs: [ "-ldl", "-lpthread", ], }, }, codegen: { arm: { cflags: ["-DART_ENABLE_CODEGEN_arm"], }, arm64: { cflags: ["-DART_ENABLE_CODEGEN_arm64"], }, mips: { cflags: ["-DART_ENABLE_CODEGEN_mips"], }, mips64: { cflags: ["-DART_ENABLE_CODEGEN_mips64"], }, x86: { cflags: ["-DART_ENABLE_CODEGEN_x86"], }, x86_64: { cflags: ["-DART_ENABLE_CODEGEN_x86_64"], }, }, include_dirs: [ "external/icu/icu4c/source/common", "external/lz4/lib", "external/valgrind/include", "external/valgrind", "external/vixl/src", "external/zlib", "libnativehelper/platform_include" ], tidy_checks: [ "-google-default-arguments", // We have local stores that are only used for debug checks. "-clang-analyzer-deadcode.DeadStores", // We are OK with some static globals and that they can, in theory, throw. "-cert-err58-cpp", // We have lots of C-style variadic functions, and are OK with them. JNI ensures // that working around this warning would be extra-painful. "-cert-dcl50-cpp", // No exceptions. "-misc-noexcept-move-constructor", ], tidy_flags: [ // The static analyzer treats DCHECK as always enabled; we sometimes get // false positives when we use DCHECKs with code that relies on NDEBUG. "-extra-arg=-UNDEBUG", // clang-tidy complains about functions like: // void foo() { CHECK(kIsFooEnabled); /* do foo... */ } // not being marked noreturn if kIsFooEnabled is false. "-extra-arg=-Wno-missing-noreturn", ], } art_debug_defaults { name: "art_debug_defaults", cflags: [ "-DDYNAMIC_ANNOTATIONS_ENABLED=1", "-DVIXL_DEBUG", "-UNDEBUG", ], asflags: [ "-UNDEBUG", ], target: { // This has to be duplicated for android and host to make sure it // comes after the -Wframe-larger-than warnings inserted by art.go // target-specific properties android: { cflags: ["-Wno-frame-larger-than="], }, host: { cflags: ["-Wno-frame-larger-than="], }, }, } android-platform-art-8.1.0+r23/build/Android.common.mk000066400000000000000000000063611336577252300224720ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ifndef ART_ANDROID_COMMON_MK ART_ANDROID_COMMON_MK = true ART_TARGET_SUPPORTED_ARCH := arm arm64 mips mips64 x86 x86_64 ART_HOST_SUPPORTED_ARCH := x86 x86_64 ifneq ($(HOST_OS),darwin) ART_HOST_SUPPORTED_ARCH := x86 x86_64 else # Mac OS doesn't support low-4GB allocation in a 64-bit process. So we won't be able to create # our heaps. ART_HOST_SUPPORTED_ARCH := x86 endif ART_COVERAGE := false ifeq ($(ART_COVERAGE),true) # https://gcc.gnu.org/onlinedocs/gcc/Cross-profiling.html GCOV_PREFIX := /data/local/tmp/gcov # GCOV_PREFIX_STRIP is an integer that defines how many levels should be # stripped off the beginning of the path. We want the paths in $GCOV_PREFIX to # be relative to $ANDROID_BUILD_TOP so we can just adb pull from the top and not # have to worry about placing things ourselves. GCOV_PREFIX_STRIP := $(shell echo $(ANDROID_BUILD_TOP) | grep -o / | wc -l) GCOV_ENV := GCOV_PREFIX=$(GCOV_PREFIX) GCOV_PREFIX_STRIP=$(GCOV_PREFIX_STRIP) else GCOV_ENV := endif ifeq (,$(filter $(TARGET_ARCH),$(ART_TARGET_SUPPORTED_ARCH))) $(warning unsupported TARGET_ARCH=$(TARGET_ARCH)) endif ifeq (,$(filter $(HOST_ARCH),$(ART_HOST_SUPPORTED_ARCH))) $(warning unsupported HOST_ARCH=$(HOST_ARCH)) endif # Primary vs. secondary 2ND_TARGET_ARCH := $(TARGET_2ND_ARCH) TARGET_INSTRUCTION_SET_FEATURES := $(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) 2ND_TARGET_INSTRUCTION_SET_FEATURES := $($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) ifdef TARGET_2ND_ARCH ifneq ($(filter %64,$(TARGET_ARCH)),) ART_PHONY_TEST_TARGET_SUFFIX := 64 2ND_ART_PHONY_TEST_TARGET_SUFFIX := 32 else # TODO: ??? $(warning Do not know what to do with this multi-target configuration!) ART_PHONY_TEST_TARGET_SUFFIX := 32 2ND_ART_PHONY_TEST_TARGET_SUFFIX := endif else ifneq ($(filter %64,$(TARGET_ARCH)),) ART_PHONY_TEST_TARGET_SUFFIX := 64 2ND_ART_PHONY_TEST_TARGET_SUFFIX := else ART_PHONY_TEST_TARGET_SUFFIX := 32 2ND_ART_PHONY_TEST_TARGET_SUFFIX := endif endif ART_HOST_SHLIB_EXTENSION := $(HOST_SHLIB_SUFFIX) ART_HOST_SHLIB_EXTENSION ?= .so ifeq ($(HOST_PREFER_32_BIT),true) ART_PHONY_TEST_HOST_SUFFIX := 32 2ND_ART_PHONY_TEST_HOST_SUFFIX := ART_HOST_ARCH := x86 2ND_ART_HOST_ARCH := 2ND_HOST_ARCH := ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES) 2ND_ART_HOST_OUT_SHARED_LIBRARIES := else ART_PHONY_TEST_HOST_SUFFIX := 64 2ND_ART_PHONY_TEST_HOST_SUFFIX := 32 ART_HOST_ARCH := x86_64 2ND_ART_HOST_ARCH := x86 2ND_HOST_ARCH := x86 ART_HOST_OUT_SHARED_LIBRARIES := $(HOST_OUT_SHARED_LIBRARIES) 2ND_ART_HOST_OUT_SHARED_LIBRARIES := $(2ND_HOST_OUT_SHARED_LIBRARIES) endif endif # ART_ANDROID_COMMON_MK android-platform-art-8.1.0+r23/build/Android.common_build.mk000066400000000000000000000043311336577252300236440ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ifndef ART_ANDROID_COMMON_BUILD_MK ART_ANDROID_COMMON_BUILD_MK = true include art/build/Android.common.mk # These can be overridden via the environment or by editing to # enable/disable certain build configuration. # # For example, to disable everything but the host debug build you use: # # (export ART_BUILD_TARGET_NDEBUG=false && export ART_BUILD_TARGET_DEBUG=false && export ART_BUILD_HOST_NDEBUG=false && ...) # # Beware that tests may use the non-debug build for performance, notable 055-enum-performance # ART_BUILD_TARGET_NDEBUG ?= true ART_BUILD_TARGET_DEBUG ?= true ART_BUILD_HOST_NDEBUG ?= true ART_BUILD_HOST_DEBUG ?= true ifeq ($(ART_BUILD_TARGET_NDEBUG),false) $(info Disabling ART_BUILD_TARGET_NDEBUG) endif ifeq ($(ART_BUILD_TARGET_DEBUG),false) $(info Disabling ART_BUILD_TARGET_DEBUG) endif ifeq ($(ART_BUILD_HOST_NDEBUG),false) $(info Disabling ART_BUILD_HOST_NDEBUG) endif ifeq ($(ART_BUILD_HOST_DEBUG),false) $(info Disabling ART_BUILD_HOST_DEBUG) endif # Enable the read barrier by default. ART_USE_READ_BARRIER ?= true ART_CPP_EXTENSION := .cc ifndef LIBART_IMG_HOST_BASE_ADDRESS $(error LIBART_IMG_HOST_BASE_ADDRESS unset) endif ifndef LIBART_IMG_TARGET_BASE_ADDRESS $(error LIBART_IMG_TARGET_BASE_ADDRESS unset) endif # Support for disabling certain builds. ART_BUILD_TARGET := false ART_BUILD_HOST := false ifeq ($(ART_BUILD_TARGET_NDEBUG),true) ART_BUILD_TARGET := true endif ifeq ($(ART_BUILD_TARGET_DEBUG),true) ART_BUILD_TARGET := true endif ifeq ($(ART_BUILD_HOST_NDEBUG),true) ART_BUILD_HOST := true endif ifeq ($(ART_BUILD_HOST_DEBUG),true) ART_BUILD_HOST := true endif endif # ART_ANDROID_COMMON_BUILD_MK android-platform-art-8.1.0+r23/build/Android.common_path.mk000066400000000000000000000154111336577252300235020ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ifndef ART_ANDROID_COMMON_PATH_MK ART_ANDROID_COMMON_PATH_MK := true include art/build/Android.common.mk include art/build/Android.common_build.mk # Directory used for dalvik-cache on device. ART_TARGET_DALVIK_CACHE_DIR := /data/dalvik-cache # Directory used for gtests on device. # $(TARGET_OUT_DATA_NATIVE_TESTS) will evaluate to the nativetest directory in the target part on # the host, so we can strip everything but the directory to find out whether it is "nativetest" or # "nativetest64." ART_TARGET_NATIVETEST_DIR := /data/$(notdir $(TARGET_OUT_DATA_NATIVE_TESTS))/art ART_TARGET_NATIVETEST_OUT := $(TARGET_OUT_DATA_NATIVE_TESTS)/art # Directory used for oat tests on device. ART_TARGET_TEST_DIR := /data/art-test ART_TARGET_TEST_OUT := $(TARGET_OUT_DATA)/art-test # core.oat location on the device. TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$(DEX2OAT_TARGET_ARCH)/core.oat ifdef TARGET_2ND_ARCH 2ND_TARGET_CORE_OAT := $(ART_TARGET_TEST_DIR)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core.oat endif CORE_OAT_SUFFIX := .oat # core.oat locations under the out directory. HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core ifneq ($(HOST_PREFER_32_BIT),true) 2ND_HOST_CORE_OAT_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core endif HOST_CORE_OAT_OUTS := TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core ifdef TARGET_2ND_ARCH 2ND_TARGET_CORE_OAT_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core endif TARGET_CORE_OAT_OUTS := CORE_IMG_SUFFIX := .art # core.art locations under the out directory. HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(ART_HOST_ARCH)/core ifneq ($(HOST_PREFER_32_BIT),true) 2ND_HOST_CORE_IMG_OUT_BASE := $(HOST_OUT_JAVA_LIBRARIES)/$(2ND_ART_HOST_ARCH)/core endif HOST_CORE_IMG_OUTS := TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$(DEX2OAT_TARGET_ARCH)/core ifdef TARGET_2ND_ARCH 2ND_TARGET_CORE_IMG_OUT_BASE := $(ART_TARGET_TEST_OUT)/$($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH)/core endif TARGET_CORE_IMG_OUTS := # Oat location of core.art. HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art # Jar files for core.art. HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) ifeq ($(ART_TEST_ANDROID_ROOT),) TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar) else TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar) endif HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar) TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar) # Classpath for Jack compilation: we only need core-libart. HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack) TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack) ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar) ART_CORE_SHARED_LIBRARIES := libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid ART_HOST_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION)) ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION)) ifdef HOST_2ND_ARCH ART_HOST_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so) ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so) endif ART_TARGET_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so) ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so) ifdef TARGET_2ND_ARCH ART_TARGET_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so) ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so) endif ART_CORE_DEBUGGABLE_EXECUTABLES := \ dex2oat \ dexoptanalyzer \ imgdiag \ oatdump \ patchoat \ profman \ ART_CORE_EXECUTABLES := \ dalvikvm \ dexlist \ # Depend on the -target or -host phony targets generated by the build system # for each module ART_TARGET_EXECUTABLES := ifneq ($(ART_BUILD_TARGET_NDEBUG),false) ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES) $(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)-target) endif ifneq ($(ART_BUILD_TARGET_DEBUG),false) ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)d-target) endif ART_HOST_EXECUTABLES := ifneq ($(ART_BUILD_HOST_NDEBUG),false) ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES) $(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)-host) endif ifneq ($(ART_BUILD_HOST_DEBUG),false) ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)d-host) endif endif # ART_ANDROID_COMMON_PATH_MK android-platform-art-8.1.0+r23/build/Android.common_test.mk000066400000000000000000000243411336577252300235270ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ifndef ART_ANDROID_COMMON_TEST_MK ART_ANDROID_COMMON_TEST_MK = true include art/build/Android.common_path.mk # Directory used for temporary test files on the host. ifneq ($(TMPDIR),) ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID) else # Use a BSD checksum calculated from ANDROID_BUILD_TOP and USER as one of the # path components for the test output. This should allow us to run tests from multiple # repositories at the same time. ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo ${ANDROID_BUILD_TOP}-${USER} | sum | cut -d ' ' -f1) endif # List of known broken tests that we won't attempt to execute. The test name must be the full # rule name such as test-art-host-oat-optimizing-HelloWorld64. ART_TEST_KNOWN_BROKEN := # List of run-tests to skip running in any configuration. This needs to be the full name of the # run-test such as '457-regs'. ART_TEST_RUN_TEST_SKIP ?= # Failing valgrind tests. # Note: *all* 64b tests involving the runtime do not work currently. b/15170219. # List of known failing tests that when executed won't cause test execution to not finish. # The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64. ART_TEST_KNOWN_FAILING := # Keep going after encountering a test failure? ART_TEST_KEEP_GOING ?= true # Do you want all tests, even those that are time consuming? ART_TEST_FULL ?= false # Do you want run-test to be quieter? run-tests will only show output if they fail. ART_TEST_QUIET ?= true # Do you want interpreter tests run? ART_TEST_INTERPRETER ?= true ART_TEST_INTERPRETER_ACCESS_CHECKS ?= true # Do you want JIT tests run? ART_TEST_JIT ?= true # Do you want optimizing compiler tests run? ART_TEST_OPTIMIZING ?= true # Do you want to test the optimizing compiler with graph coloring register allocation? ART_TEST_OPTIMIZING_GRAPH_COLOR ?= $(ART_TEST_FULL) # Do you want to do run-tests with profiles? ART_TEST_SPEED_PROFILE ?= $(ART_TEST_FULL) # Do we want to test PIC-compiled tests ("apps")? ART_TEST_PIC_TEST ?= $(ART_TEST_FULL) # Do you want tracing tests run? ART_TEST_TRACE ?= $(ART_TEST_FULL) # Do you want tracing tests (streaming mode) run? ART_TEST_TRACE_STREAM ?= $(ART_TEST_FULL) # Do you want tests with GC verification enabled run? ART_TEST_GC_VERIFY ?= $(ART_TEST_FULL) # Do you want tests with the GC stress mode enabled run? ART_TEST_GC_STRESS ?= $(ART_TEST_FULL) # Do you want tests with the JNI forcecopy mode enabled run? ART_TEST_JNI_FORCECOPY ?= $(ART_TEST_FULL) # Do you want run-tests with relocation enabled run? ART_TEST_RUN_TEST_RELOCATE ?= $(ART_TEST_FULL) # Do you want run-tests with prebuilding? ART_TEST_RUN_TEST_PREBUILD ?= true # Do you want run-tests with no prebuilding enabled run? ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL) # Do you want run-tests with a pregenerated core.art? ART_TEST_RUN_TEST_IMAGE ?= true # Do you want run-tests without a pregenerated core.art? ART_TEST_RUN_TEST_NO_IMAGE ?= $(ART_TEST_FULL) # Do you want run-tests with relocation enabled but patchoat failing? ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT ?= $(ART_TEST_FULL) # Do you want run-tests without a dex2oat? ART_TEST_RUN_TEST_NO_DEX2OAT ?= $(ART_TEST_FULL) # Do you want run-tests with libartd.so? ART_TEST_RUN_TEST_DEBUG ?= true # Do you want run-tests with libart.so? ART_TEST_RUN_TEST_NDEBUG ?= $(ART_TEST_FULL) # Do you want run-tests with the host/target's second arch? ART_TEST_RUN_TEST_2ND_ARCH ?= true # Do you want failed tests to have their artifacts cleaned up? ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true # Do you want run-tests with the --debuggable flag ART_TEST_RUN_TEST_DEBUGGABLE ?= $(ART_TEST_FULL) # Do you want to test multi-part boot-image functionality? ART_TEST_RUN_TEST_MULTI_IMAGE ?= $(ART_TEST_FULL) # Define the command run on test failure. $(1) is the name of the test. Executed by the shell. # If the test was a top-level make target (e.g. `test-art-host-gtest-codegen_test64`), the command # fails with exit status 1 (returned by the last `grep` statement below). # Otherwise (e.g., if the test was run as a prerequisite of a compound test command, such as # `test-art-host-gtest-codegen_test`), the command does not fail, as this would break rules running # ART_TEST_PREREQ_FINISHED as one of their actions, which expects *all* prerequisites *not* to fail. define ART_TEST_FAILED ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \ (mkdir -p $(ART_HOST_TEST_DIR)/failed/ && touch $(ART_HOST_TEST_DIR)/failed/$(1) && \ echo $(ART_TEST_KNOWN_FAILING) | grep -q $(1) \ && (echo -e "$(1) \e[91mKNOWN FAILURE\e[0m") \ || (echo -e "$(1) \e[91mFAILED\e[0m" >&2; echo $(MAKECMDGOALS) | grep -q -v $(1)))) endef ifeq ($(ART_TEST_QUIET),true) ART_TEST_ANNOUNCE_PASS := ( true ) ART_TEST_ANNOUNCE_RUN := ( true ) ART_TEST_ANNOUNCE_SKIP_FAILURE := ( true ) ART_TEST_ANNOUNCE_SKIP_BROKEN := ( true ) else # Note the use of '=' and not ':=' is intentional since these are actually functions. ART_TEST_ANNOUNCE_PASS = ( echo -e "$(1) \e[92mPASSED\e[0m" ) ART_TEST_ANNOUNCE_RUN = ( echo -e "$(1) \e[95mRUNNING\e[0m") ART_TEST_ANNOUNCE_SKIP_FAILURE = ( echo -e "$(1) \e[93mSKIPPING DUE TO EARLIER FAILURE\e[0m" ) ART_TEST_ANNOUNCE_SKIP_BROKEN = ( echo -e "$(1) \e[93mSKIPPING BROKEN TEST\e[0m" ) endif # Define the command run on test success. $(1) is the name of the test. Executed by the shell. # The command checks prints "PASSED" then checks to see if this was a top-level make target (e.g. # "mm test-art-host-oat-HelloWorld32"), if it was then it does nothing, otherwise it creates a file # to be printed in the passing test summary. define ART_TEST_PASSED ( $(call ART_TEST_ANNOUNCE_PASS,$(1)) && \ (echo $(MAKECMDGOALS) | grep -q $(1) || \ (mkdir -p $(ART_HOST_TEST_DIR)/passed/ && touch $(ART_HOST_TEST_DIR)/passed/$(1)))) endef # Define the command run on test success of multiple prerequisites. $(1) is the name of the test. # When the test is a top-level make target then a summary of the ran tests is produced. Executed by # the shell. define ART_TEST_PREREQ_FINISHED (echo -e "$(1) \e[32mCOMPLETE\e[0m" && \ (echo $(MAKECMDGOALS) | grep -q -v $(1) || \ (([ -d $(ART_HOST_TEST_DIR)/passed/ ] \ && (echo -e "\e[92mPASSING TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/passed/) \ || (echo -e "\e[91mNO TESTS PASSED\e[0m")) && \ ([ -d $(ART_HOST_TEST_DIR)/skipped/ ] \ && (echo -e "\e[93mSKIPPED TESTS\e[0m" && ls -1 $(ART_HOST_TEST_DIR)/skipped/) \ || (echo -e "\e[92mNO TESTS SKIPPED\e[0m")) && \ ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \ && (echo -e "\e[91mFAILING TESTS\e[0m" >&2 && ls -1 $(ART_HOST_TEST_DIR)/failed/ >&2) \ || (echo -e "\e[92mNO TESTS FAILED\e[0m")) \ && ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] && rm -r $(ART_HOST_TEST_DIR) \ || (rm -r $(ART_HOST_TEST_DIR) && false))))) endef # Define the command executed by the shell ahead of running an art test. $(1) is the name of the # test. define ART_TEST_SKIP ((echo $(ART_TEST_KNOWN_BROKEN) | grep -q -v $(1) \ && ([ ! -d $(ART_HOST_TEST_DIR)/failed/ ] || [ $(ART_TEST_KEEP_GOING) = true ])\ && $(call ART_TEST_ANNOUNCE_RUN,$(1)) ) \ || ((mkdir -p $(ART_HOST_TEST_DIR)/skipped/ && touch $(ART_HOST_TEST_DIR)/skipped/$(1) \ && ([ -d $(ART_HOST_TEST_DIR)/failed/ ] \ && $(call ART_TEST_ANNOUNCE_SKIP_FAILURE,$(1)) ) \ || $(call ART_TEST_ANNOUNCE_SKIP_BROKEN,$(1)) ) && false)) endef # Create a build rule to create the dex file for a test. # $(1): module prefix, e.g. art-test-dex # $(2): input test directory in art/test, e.g. HelloWorld # $(3): target output module path (default module path is used on host) # $(4): additional dependencies # $(5): a make variable used to collate target dependencies, e.g ART_TEST_TARGET_OAT_HelloWorld_DEX # $(6): a make variable used to collate host dependencies, e.g ART_TEST_HOST_OAT_HelloWorld_DEX # # If the input test directory contains a file called main.list and main.jpp, # then a multi-dex file is created passing main.list as the --main-dex-list # argument to dx and main.jpp for Jack. define build-art-test-dex ifeq ($(ART_BUILD_TARGET),true) include $(CLEAR_VARS) LOCAL_MODULE := $(1)-$(2) LOCAL_SRC_FILES := $(call all-java-files-under, $(2)) LOCAL_NO_STANDARD_LIBRARIES := true LOCAL_DEX_PREOPT := false LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4) LOCAL_MODULE_TAGS := tests LOCAL_JAVA_LIBRARIES := $(TARGET_CORE_JARS) LOCAL_MODULE_PATH := $(3) LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp endif include $(BUILD_JAVA_LIBRARY) $(5) := $$(LOCAL_INSTALLED_MODULE) endif ifeq ($(ART_BUILD_HOST),true) include $(CLEAR_VARS) LOCAL_MODULE := $(1)-$(2) LOCAL_SRC_FILES := $(call all-java-files-under, $(2)) LOCAL_NO_STANDARD_LIBRARIES := true LOCAL_DEX_PREOPT := false LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common_test.mk $(4) LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS) LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp endif include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) $(6) := $$(LOCAL_INSTALLED_MODULE) endif endef endif # ART_ANDROID_COMMON_TEST_MK android-platform-art-8.1.0+r23/build/Android.cpplint.mk000066400000000000000000000050041336577252300226440ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # include art/build/Android.common_build.mk ART_CPPLINT := $(LOCAL_PATH)/tools/cpplint.py ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references,-runtime/sizeof,-runtime/threadsafe_fn,-runtime/printf ART_CPPLINT_FLAGS := --root=$(TOP) ART_CPPLINT_QUIET := --quiet ART_CPPLINT_INGORED := \ runtime/elf.h \ runtime/openjdkjvmti/include/jvmti.h # This: # 1) Gets a list of all .h & .cc files in the art directory. # 2) Prepends 'art/' to each of them to make the full name. # 3) removes art/runtime/elf.h from the list. ART_CPPLINT_SRC := $(filter-out $(patsubst %,$(LOCAL_PATH)/%,$(ART_CPPLINT_INGORED)), $(addprefix $(LOCAL_PATH)/, $(call all-subdir-named-files,*.h) $(call all-subdir-named-files,*$(ART_CPP_EXTENSION)))) # "mm cpplint-art" to verify we aren't regressing .PHONY: cpplint-art cpplint-art: $(ART_CPPLINT) $(ART_CPPLINT_FLAGS) $(ART_CPPLINT_FILTER) $(ART_CPPLINT_SRC) # "mm cpplint-art-all" to see all warnings .PHONY: cpplint-art-all cpplint-art-all: $(ART_CPPLINT) $(ART_CPPLINT_FLAGS) $(ART_CPPLINT_SRC) OUT_CPPLINT := $(TARGET_COMMON_OUT_ROOT)/cpplint ART_CPPLINT_TARGETS := define declare-art-cpplint-target art_cpplint_file := $(1) art_cpplint_touch := $$(OUT_CPPLINT)/$$(subst /,__,$$(art_cpplint_file)) $$(art_cpplint_touch): $$(art_cpplint_file) $(ART_CPPLINT) art/build/Android.cpplint.mk $(hide) $(ART_CPPLINT) $(ART_CPPLINT_QUIET) $(ART_CPPLINT_FLAGS) $(ART_CPPLINT_FILTER) $$< $(hide) mkdir -p $$(dir $$@) $(hide) touch $$@ ART_CPPLINT_TARGETS += $$(art_cpplint_touch) endef $(foreach file, $(ART_CPPLINT_SRC), $(eval $(call declare-art-cpplint-target,$(file)))) #$(info $(call declare-art-cpplint-target,$(firstword $(ART_CPPLINT_SRC)))) include $(CLEAR_VARS) LOCAL_MODULE := cpplint-art-phony LOCAL_MODULE_TAGS := optional LOCAL_ADDITIONAL_DEPENDENCIES := $(ART_CPPLINT_TARGETS) include $(BUILD_PHONY_PACKAGE) android-platform-art-8.1.0+r23/build/Android.gtest.mk000066400000000000000000000727471336577252300223430ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # The path for which all the dex files are relative, not actually the current directory. LOCAL_PATH := art/test include art/build/Android.common_test.mk include art/build/Android.common_path.mk include art/build/Android.common_build.mk # Subdirectories in art/test which contain dex files used as inputs for gtests. GTEST_DEX_DIRECTORIES := \ AbstractMethod \ AllFields \ DefaultMethods \ DexToDexDecompiler \ ErroneousA \ ErroneousB \ ErroneousInit \ ForClassLoaderA \ ForClassLoaderB \ ForClassLoaderC \ ForClassLoaderD \ ExceptionHandle \ GetMethodSignature \ ImageLayoutA \ ImageLayoutB \ IMTA \ IMTB \ Instrumentation \ Interfaces \ Lookup \ Main \ ManyMethods \ MethodTypes \ MultiDex \ MultiDexModifiedSecondary \ MyClass \ MyClassNatives \ Nested \ NonStaticLeafMethods \ Packages \ ProtoCompare \ ProtoCompare2 \ ProfileTestMultiDex \ StaticLeafMethods \ Statics \ StaticsFromCode \ Transaction \ XandY # Create build rules for each dex file recording the dependency. $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-gtest,$(dir), \ $(ART_TARGET_NATIVETEST_OUT),art/build/Android.gtest.mk,ART_TEST_TARGET_GTEST_$(dir)_DEX, \ ART_TEST_HOST_GTEST_$(dir)_DEX))) # Create rules for MainStripped, a copy of Main with the classes.dex stripped # for the oat file assistant tests. ART_TEST_HOST_GTEST_MainStripped_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) ART_TEST_TARGET_GTEST_MainStripped_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) $(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) cp $< $@ $(call dexpreopt-remove-classes.dex,$@) $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) cp $< $@ $(call dexpreopt-remove-classes.dex,$@) ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali)) ART_TEST_GTEST_VerifierDepsMulti_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDepsMulti/*.smali)) ART_TEST_HOST_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) ART_TEST_TARGET_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDepsMulti,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX)) ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDepsMulti,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX)) $(ART_TEST_HOST_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) $(ART_TEST_TARGET_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) $(ART_TEST_HOST_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) $(ART_TEST_TARGET_GTEST_VerifierDepsMulti_DEX): $(ART_TEST_GTEST_VerifierDepsMulti_SRC) $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/smali assemble --output $@ $(filter %.smali,$^) # Dex file dependencies for each gtest. ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested ART_GTEST_atomic_dex_ref_map_test_DEX_DEPS := Interfaces ART_GTEST_class_linker_test_DEX_DEPS := AllFields ErroneousA ErroneousB ErroneousInit ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD Interfaces MethodTypes MultiDex MyClass Nested Statics StaticsFromCode ART_GTEST_class_loader_context_test_DEX_DEPS := Main MultiDex MyClass ForClassLoaderA ForClassLoaderB ForClassLoaderC ForClassLoaderD ART_GTEST_class_table_test_DEX_DEPS := XandY ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods ProfileTestMultiDex ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ART_GTEST_dexoptanalyzer_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ART_GTEST_image_space_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex ART_GTEST_oat_test_DEX_DEPS := Main ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY ART_GTEST_proxy_test_DEX_DEPS := Interfaces ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY ART_GTEST_stub_test_DEX_DEPS := AllFields ART_GTEST_transaction_test_DEX_DEPS := Transaction ART_GTEST_type_lookup_table_test_DEX_DEPS := Lookup ART_GTEST_unstarted_runtime_test_DEX_DEPS := Nested ART_GTEST_heap_verification_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY ART_GTEST_verifier_deps_test_DEX_DEPS := VerifierDeps VerifierDepsMulti MultiDex ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler # The elf writer test has dependencies on core.oat. ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32) ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_DEFAULT_64) $(TARGET_CORE_IMAGE_DEFAULT_32) ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \ $(HOST_CORE_IMAGE_optimizing_64) \ $(HOST_CORE_IMAGE_optimizing_32) \ $(HOST_CORE_IMAGE_interpreter_64) \ $(HOST_CORE_IMAGE_interpreter_32) \ $(HOST_OUT_EXECUTABLES)/patchoatd ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \ $(TARGET_CORE_IMAGE_optimizing_64) \ $(TARGET_CORE_IMAGE_optimizing_32) \ $(TARGET_CORE_IMAGE_interpreter_64) \ $(TARGET_CORE_IMAGE_interpreter_32) \ $(TARGET_OUT_EXECUTABLES)/patchoatd ART_GTEST_oat_file_assistant_test_HOST_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) ART_GTEST_dexoptanalyzer_test_HOST_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \ $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \ dexoptanalyzerd ART_GTEST_image_space_test_HOST_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) ART_GTEST_image_space_test_TARGET_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) ART_GTEST_dex2oat_test_HOST_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) ART_GTEST_dex2oat_test_TARGET_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) ART_GTEST_dex2oat_image_test_HOST_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) ART_GTEST_dex2oat_image_test_TARGET_DEPS := \ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) # TODO: document why this is needed. ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32) # The dexdiag test requires the dexdiag utility. ART_GTEST_dexdiag_test_HOST_DEPS := \ $(HOST_OUT_EXECUTABLES)/dexdiag ART_GTEST_dexdiag_test_TARGET_DEPS := \ dexdiag # The dexdump test requires an image and the dexdump utility. # TODO: rename into dexdump when migration completes ART_GTEST_dexdump_test_HOST_DEPS := \ $(HOST_CORE_IMAGE_DEFAULT_64) \ $(HOST_CORE_IMAGE_DEFAULT_32) \ $(HOST_OUT_EXECUTABLES)/dexdump2 ART_GTEST_dexdump_test_TARGET_DEPS := \ $(TARGET_CORE_IMAGE_DEFAULT_64) \ $(TARGET_CORE_IMAGE_DEFAULT_32) \ dexdump2 # The dexlayout test requires an image and the dexlayout utility. # TODO: rename into dexdump when migration completes ART_GTEST_dexlayout_test_HOST_DEPS := \ $(HOST_CORE_IMAGE_DEFAULT_64) \ $(HOST_CORE_IMAGE_DEFAULT_32) \ $(HOST_OUT_EXECUTABLES)/dexlayout \ $(HOST_OUT_EXECUTABLES)/dexdump2 ART_GTEST_dexlayout_test_TARGET_DEPS := \ $(TARGET_CORE_IMAGE_DEFAULT_64) \ $(TARGET_CORE_IMAGE_DEFAULT_32) \ dexlayout \ dexdump2 # The dexlist test requires an image and the dexlist utility. ART_GTEST_dexlist_test_HOST_DEPS := \ $(HOST_CORE_IMAGE_DEFAULT_64) \ $(HOST_CORE_IMAGE_DEFAULT_32) \ $(HOST_OUT_EXECUTABLES)/dexlist ART_GTEST_dexlist_test_TARGET_DEPS := \ $(TARGET_CORE_IMAGE_DEFAULT_64) \ $(TARGET_CORE_IMAGE_DEFAULT_32) \ dexlist # The imgdiag test has dependencies on core.oat since it needs to load it during the test. # For the host, also add the installed tool (in the base size, that should suffice). For the # target, just the module is fine, the sync will happen late enough. ART_GTEST_imgdiag_test_HOST_DEPS := \ $(HOST_CORE_IMAGE_DEFAULT_64) \ $(HOST_CORE_IMAGE_DEFAULT_32) \ $(HOST_OUT_EXECUTABLES)/imgdiagd ART_GTEST_imgdiag_test_TARGET_DEPS := \ $(TARGET_CORE_IMAGE_DEFAULT_64) \ $(TARGET_CORE_IMAGE_DEFAULT_32) \ imgdiagd # Oatdump test requires an image and oatfile to dump. ART_GTEST_oatdump_test_HOST_DEPS := \ $(HOST_CORE_IMAGE_DEFAULT_64) \ $(HOST_CORE_IMAGE_DEFAULT_32) \ $(HOST_OUT_EXECUTABLES)/oatdumpd \ $(HOST_OUT_EXECUTABLES)/oatdumpds ART_GTEST_oatdump_test_TARGET_DEPS := \ $(TARGET_CORE_IMAGE_DEFAULT_64) \ $(TARGET_CORE_IMAGE_DEFAULT_32) \ oatdump ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) # Profile assistant tests requires profman utility. ART_GTEST_profile_assistant_test_HOST_DEPS := \ $(HOST_OUT_EXECUTABLES)/profmand ART_GTEST_profile_assistant_test_TARGET_DEPS := \ profman # The path for which all the source files are relative, not actually the current directory. LOCAL_PATH := art ART_TEST_MODULES := \ art_cmdline_tests \ art_compiler_tests \ art_compiler_host_tests \ art_dex2oat_tests \ art_dexdiag_tests \ art_dexdump_tests \ art_dexlayout_tests \ art_dexlist_tests \ art_dexoptanalyzer_tests \ art_imgdiag_tests \ art_oatdump_tests \ art_profman_tests \ art_runtime_tests \ art_runtime_compiler_tests \ ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ $(ART_TEST_LIST_device_$(TARGET_ARCH)_$(m))) ifdef TARGET_2ND_ARCH 2ND_ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ $(ART_TEST_LIST_device_$(2ND_TARGET_ARCH)_$(m))) endif ART_HOST_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\ $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_$(m))) ifneq ($(HOST_PREFER_32_BIT),true) 2ND_ART_HOST_GTEST_FILES += $(foreach m,$(ART_TEST_MODULES),\ $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_$(m))) endif # Variables holding collections of gtest pre-requisits used to run a number of gtests. ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_GTEST_RULES := ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_VALGRIND_GTEST_RULES := ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_GTEST_RULES := ART_TEST_TARGET_VALGRIND_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_VALGRIND_GTEST_RULES := ART_TEST_HOST_GTEST_DEPENDENCIES := ART_GTEST_TARGET_ANDROID_ROOT := '/system' ifneq ($(ART_TEST_ANDROID_ROOT),) ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT) endif ART_VALGRIND_TARGET_DEPENDENCIES := \ $(TARGET_OUT_EXECUTABLES)/valgrind \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/memcheck-$(TARGET_ARCH)-linux \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_core-$(TARGET_ARCH)-linux.so \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_memcheck-$(TARGET_ARCH)-linux.so \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/default.supp ifdef TARGET_2ND_ARCH ART_VALGRIND_TARGET_DEPENDENCIES += \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/memcheck-$(TARGET_2ND_ARCH)-linux \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_core-$(TARGET_2ND_ARCH)-linux.so \ $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_memcheck-$(TARGET_2ND_ARCH)-linux.so endif include $(CLEAR_VARS) LOCAL_MODULE := valgrind-target-suppressions.txt LOCAL_MODULE_CLASS := ETC LOCAL_MODULE_TAGS := optional LOCAL_SRC_FILES := test/valgrind-target-suppressions.txt LOCAL_MODULE_PATH := $(ART_TARGET_TEST_OUT) include $(BUILD_PREBUILT) # Define a make rule for a target device gtest. # $(1): gtest name - the name of the test we're building such as leb128_test. # $(2): path relative to $OUT to the test binary # $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. # $(4): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/ define define-art-gtest-rule-target gtest_rule := test-art-target-gtest-$(1)$$($(3)ART_PHONY_TEST_TARGET_SUFFIX) gtest_exe := $(OUT_DIR)/$(2) gtest_target_exe := $$(patsubst $(PRODUCT_OUT)/%,/%,$$(gtest_exe)) # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test # to ensure files are pushed to the device. TEST_ART_TARGET_SYNC_DEPS += \ $$(ART_GTEST_$(1)_TARGET_DEPS) \ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \ $$(gtest_exe) \ $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \ $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \ $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \ $$(ART_TARGET_TEST_OUT)/valgrind-target-suppressions.txt $$(gtest_rule) valgrind-$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe) .PHONY: $$(gtest_rule) $$(gtest_rule): test-art-target-sync $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID $(hide) adb shell chmod 755 $$(PRIVATE_TARGET_EXE) $(hide) $$(call ART_TEST_SKIP,$$@) && \ (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \ && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \ && $$(call ART_TEST_PASSED,$$@)) \ || $$(call ART_TEST_FAILED,$$@)) $(hide) rm -f /tmp/$$@-$$$$PPID ART_TEST_TARGET_GTEST$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule) ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule) ART_TEST_TARGET_GTEST_$(1)_RULES += $$(gtest_rule) .PHONY: valgrind-$$(gtest_rule) valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-sync $(hide) adb shell touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID $(hide) adb shell rm $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID $(hide) adb shell chmod 755 $$(PRIVATE_TARGET_EXE) $(hide) $$(call ART_TEST_SKIP,$$@) && \ (adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(4) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \ valgrind --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \ --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \ --num-callers=50 --show-mismatched-frees=no \ $$(PRIVATE_TARGET_EXE) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID" \ && (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$@-$$$$PPID /tmp/ \ && $$(call ART_TEST_PASSED,$$@)) \ || $$(call ART_TEST_FAILED,$$@)) $(hide) rm -f /tmp/$$@-$$$$PPID ART_TEST_TARGET_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += valgrind-$$(gtest_rule) ART_TEST_TARGET_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule) ART_TEST_TARGET_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule) # Clear locally defined variables. valgrind_gtest_rule := gtest_rule := gtest_exe := gtest_target_exe := endef # define-art-gtest-rule-target ART_VALGRIND_DEPENDENCIES := \ $(HOST_OUT_EXECUTABLES)/valgrind \ $(HOST_OUT)/lib64/valgrind/memcheck-amd64-linux \ $(HOST_OUT)/lib64/valgrind/memcheck-x86-linux \ $(HOST_OUT)/lib64/valgrind/default.supp \ $(HOST_OUT)/lib64/valgrind/vgpreload_core-amd64-linux.so \ $(HOST_OUT)/lib64/valgrind/vgpreload_core-x86-linux.so \ $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-amd64-linux.so \ $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-x86-linux.so # Define make rules for a host gtests. # $(1): gtest name - the name of the test we're building such as leb128_test. # $(2): path relative to $OUT to the test binary # $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. define define-art-gtest-rule-host gtest_rule := test-art-host-gtest-$(1)$$($(3)ART_PHONY_TEST_HOST_SUFFIX) gtest_exe := $(OUT_DIR)/$(2) # Dependencies for all host gtests. gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \ $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \ $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \ $$(gtest_exe) \ $$(ART_GTEST_$(1)_HOST_DEPS) \ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps) # Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some # build tools (e.g., ninja) intentionally leak. We want leak checks when we run our tests, so # override ASAN_OPTIONS. b/37751350 .PHONY: $$(gtest_rule) $$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(hide) ($$(call ART_TEST_SKIP,$$@) && ASAN_OPTIONS=detect_leaks=1 $$< && \ $$(call ART_TEST_PASSED,$$@)) || $$(call ART_TEST_FAILED,$$@) ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule) ART_TEST_HOST_GTEST_RULES += $$(gtest_rule) ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule) .PHONY: valgrind-$$(gtest_rule) valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIES) $(hide) $$(call ART_TEST_SKIP,$$@) && \ VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \ $(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \ --suppressions=art/test/valgrind-suppressions.txt --num-callers=50 \ $$< && \ $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@) ART_TEST_HOST_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule) ART_TEST_HOST_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule) ART_TEST_HOST_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule) # Clear locally defined variables. valgrind_gtest_rule := gtest_rule := gtest_exe := gtest_deps := endef # define-art-gtest-rule-host # Define the rules to build and run host and target gtests. # $(1): file name # $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. define define-art-gtest-target art_gtest_filename := $(1) include $$(CLEAR_VARS) art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename))) library_path := 2ND_library_path := ifneq ($$(ART_TEST_ANDROID_ROOT),) ifdef TARGET_2ND_ARCH 2ND_library_path := $$(ART_TEST_ANDROID_ROOT)/lib library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 else ifneq ($(filter %64,$(TARGET_ARCH)),) library_path := $$(ART_TEST_ANDROID_ROOT)/lib64 else library_path := $$(ART_TEST_ANDROID_ROOT)/lib endif endif endif ifndef ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES := endif $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),$$(art_gtest_filename),$(2),$$($(2)library_path))) # Clear locally defined variables. art_gtest_filename := art_gtest_name := library_path := 2ND_library_path := endef # define-art-gtest-target # $(1): file name # $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture. define define-art-gtest-host art_gtest_filename := $(1) include $$(CLEAR_VARS) art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename))) ifndef ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := endif $$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),$$(art_gtest_filename),$(2))) # Clear locally defined variables. art_gtest_filename := art_gtest_name := endef # define-art-gtest-host # Define the rules to build and run gtests for both archs on target. # $(1): test name define define-art-gtest-target-both art_gtest_name := $(1) # A rule to run the different architecture versions of the gtest. .PHONY: test-art-target-gtest-$$(art_gtest_name) test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) .PHONY: valgrind-test-art-target-gtest-$$(art_gtest_name) valgrind-test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) # Clear now unused variables. ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES := ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES := art_gtest_name := endef # define-art-gtest-target-both # Define the rules to build and run gtests for both archs on host. # $(1): test name define define-art-gtest-host-both art_gtest_name := $(1) .PHONY: test-art-host-gtest-$$(art_gtest_name) test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) .PHONY: valgrind-test-art-host-gtest-$$(art_gtest_name) valgrind-test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES) $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) # Clear now unused variables. ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES := ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES := art_gtest_name := endef # define-art-gtest-host-both ifeq ($(ART_BUILD_TARGET),true) $(foreach file,$(ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target,$(file),))) ifdef TARGET_2ND_ARCH $(foreach file,$(2ND_ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target,$(file),2ND_))) endif # Rules to run the different architecture versions of the gtest. $(foreach file,$(ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target-both,$$(notdir $$(basename $$(file)))))) endif ifeq ($(ART_BUILD_HOST),true) $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),))) ifneq ($(HOST_PREFER_32_BIT),true) $(foreach file,$(2ND_ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),2ND_))) endif # Rules to run the different architecture versions of the gtest. $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host-both,$$(notdir $$(basename $$(file)))))) endif # Used outside the art project to get a list of the current tests RUNTIME_TARGET_GTEST_MAKE_TARGETS := $(foreach file, $(ART_TARGET_GTEST_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(basename $$(file))))) COMPILER_TARGET_GTEST_MAKE_TARGETS := # Define all the combinations of host/target, valgrind and suffix such as: # test-art-host-gtest or valgrind-test-art-host-gtest64 # $(1): host or target # $(2): HOST or TARGET # $(3): valgrind- or undefined # $(4): undefined, 32 or 64 define define-test-art-gtest-combination ifeq ($(1),host) ifneq ($(2),HOST) $$(error argument mismatch $(1) and ($2)) endif else ifneq ($(1),target) $$(error found $(1) expected host or target) endif ifneq ($(2),TARGET) $$(error argument mismatch $(1) and ($2)) endif endif rule_name := $(3)test-art-$(1)-gtest$(4) ifeq ($(3),valgrind-) dependencies := $$(ART_TEST_$(2)_VALGRIND_GTEST$(4)_RULES) else dependencies := $$(ART_TEST_$(2)_GTEST$(4)_RULES) endif .PHONY: $$(rule_name) $$(rule_name): $$(dependencies) $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@) # Clear locally defined variables. rule_name := dependencies := endef # define-test-art-gtest-combination $(eval $(call define-test-art-gtest-combination,target,TARGET,,)) $(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,)) $(eval $(call define-test-art-gtest-combination,target,TARGET,,$(ART_PHONY_TEST_TARGET_SUFFIX))) $(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,$(ART_PHONY_TEST_TARGET_SUFFIX))) ifdef TARGET_2ND_ARCH $(eval $(call define-test-art-gtest-combination,target,TARGET,,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))) $(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX))) endif $(eval $(call define-test-art-gtest-combination,host,HOST,,)) $(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,)) $(eval $(call define-test-art-gtest-combination,host,HOST,,$(ART_PHONY_TEST_HOST_SUFFIX))) $(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(ART_PHONY_TEST_HOST_SUFFIX))) ifneq ($(HOST_PREFER_32_BIT),true) $(eval $(call define-test-art-gtest-combination,host,HOST,,$(2ND_ART_PHONY_TEST_HOST_SUFFIX))) $(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(2ND_ART_PHONY_TEST_HOST_SUFFIX))) endif # Clear locally defined variables. define-art-gtest-rule-target := define-art-gtest-rule-host := define-art-gtest := define-test-art-gtest-combination := RUNTIME_GTEST_COMMON_SRC_FILES := COMPILER_GTEST_COMMON_SRC_FILES := RUNTIME_GTEST_TARGET_SRC_FILES := RUNTIME_GTEST_HOST_SRC_FILES := COMPILER_GTEST_TARGET_SRC_FILES := COMPILER_GTEST_HOST_SRC_FILES := ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_GTEST_RULES := ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES := ART_TEST_HOST_VALGRIND_GTEST_RULES := ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_GTEST_RULES := ART_TEST_TARGET_VALGRIND_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES := ART_TEST_TARGET_VALGRIND_GTEST_RULES := ART_GTEST_TARGET_ANDROID_ROOT := ART_GTEST_class_linker_test_DEX_DEPS := ART_GTEST_class_table_test_DEX_DEPS := ART_GTEST_compiler_driver_test_DEX_DEPS := ART_GTEST_dex_file_test_DEX_DEPS := ART_GTEST_exception_test_DEX_DEPS := ART_GTEST_elf_writer_test_HOST_DEPS := ART_GTEST_elf_writer_test_TARGET_DEPS := ART_GTEST_imtable_test_DEX_DEPS := ART_GTEST_jni_compiler_test_DEX_DEPS := ART_GTEST_jni_internal_test_DEX_DEPS := ART_GTEST_oat_file_assistant_test_DEX_DEPS := ART_GTEST_oat_file_assistant_test_HOST_DEPS := ART_GTEST_oat_file_assistant_test_TARGET_DEPS := ART_GTEST_dexoptanalyzer_test_DEX_DEPS := ART_GTEST_dexoptanalyzer_test_HOST_DEPS := ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := ART_GTEST_image_space_test_DEX_DEPS := ART_GTEST_image_space_test_HOST_DEPS := ART_GTEST_image_space_test_TARGET_DEPS := ART_GTEST_dex2oat_test_DEX_DEPS := ART_GTEST_dex2oat_test_HOST_DEPS := ART_GTEST_dex2oat_test_TARGET_DEPS := ART_GTEST_dex2oat_image_test_DEX_DEPS := ART_GTEST_dex2oat_image_test_HOST_DEPS := ART_GTEST_dex2oat_image_test_TARGET_DEPS := ART_GTEST_object_test_DEX_DEPS := ART_GTEST_proxy_test_DEX_DEPS := ART_GTEST_reflection_test_DEX_DEPS := ART_GTEST_stub_test_DEX_DEPS := ART_GTEST_transaction_test_DEX_DEPS := ART_GTEST_dex2oat_environment_tests_DEX_DEPS := ART_GTEST_heap_verification_test_DEX_DEPS := ART_GTEST_verifier_deps_test_DEX_DEPS := ART_VALGRIND_DEPENDENCIES := ART_VALGRIND_TARGET_DEPENDENCIES := $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=)) $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=)) ART_TEST_HOST_GTEST_MainStripped_DEX := ART_TEST_TARGET_GTEST_MainStripped_DEX := ART_TEST_GTEST_VerifierDeps_SRC := ART_TEST_HOST_GTEST_VerifierDeps_DEX := ART_TEST_TARGET_GTEST_VerifierDeps_DEX := GTEST_DEX_DIRECTORIES := LOCAL_PATH := android-platform-art-8.1.0+r23/build/Android.oat.mk000066400000000000000000000252341336577252300217650ustar00rootroot00000000000000# # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ######################################################################## # Rules to build a smaller "core" image to support core libraries # (that is, non-Android frameworks) testing on the host and target # # The main rules to build the default "boot" image are in # build/core/dex_preopt_libart.mk include art/build/Android.common_build.mk LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := ifeq ($(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default else LOCAL_DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$(DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) endif LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := ifeq ($($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES),) LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=default else LOCAL_$(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION := --instruction-set-features=$($(HOST_2ND_ARCH_VAR_PREFIX)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES) endif # Use dex2oat debug version for better error reporting # $(1): compiler - optimizing, interpreter or interpreter-access-checks. # $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds. # $(3): wrapper, e.g., valgrind. # $(4): dex2oat suffix, e.g, valgrind requires 32 right now. # $(5): multi-image. # NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for # run-test --no-image define create-core-oat-host-rules core_compile_options := core_image_name := core_oat_name := core_infix := core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY) ifeq ($(1),optimizing) core_compile_options += --compiler-backend=Optimizing core_dex2oat_dependency := $(DEX2OAT) endif ifeq ($(1),interpreter) core_compile_options += --compiler-filter=quicken core_infix := -interpreter endif ifeq ($(1),interp-ac) core_compile_options += --compiler-filter=extract --runtime-arg -Xverify:softfail core_infix := -interp-ac endif ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),) #Technically this test is not precise, but hopefully good enough. $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing) endif # If $(5) is true, generate a multi-image. ifeq ($(5),true) core_multi_infix := -multi core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar core_multi_group := _multi else core_multi_infix := core_multi_param := core_multi_group := endif core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(3)$(CORE_IMG_SUFFIX) core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(3)$(CORE_OAT_SUFFIX) # Using the bitness suffix makes it easier to add as a dependency for the run-test mk. ifeq ($(2),) $(3)HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name) else $(3)HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name) endif $(3)HOST_CORE_IMG_OUTS += $$(core_image_name) $(3)HOST_CORE_OAT_OUTS += $$(core_oat_name) # If we have a wrapper, make the target phony. ifneq ($(3),) .PHONY: $$(core_image_name) endif $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) $$(core_image_name): PRIVATE_CORE_MULTI_PARAM := $$(core_multi_param) $$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency) @echo "host dex2oat: $$@" @mkdir -p $$(dir $$@) $$(hide) $(3) $$(DEX2OAT)$(4) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \ $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \ $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \ --host --android-root=$$(HOST_OUT) \ --generate-debug-info --generate-build-id --compile-pic \ $$(PRIVATE_CORE_MULTI_PARAM) $$(PRIVATE_CORE_COMPILE_OPTIONS) $$(core_oat_name): $$(core_image_name) # Clean up locally used variables. core_dex2oat_dependency := core_compile_options := core_image_name := core_oat_name := core_infix := endef # create-core-oat-host-rules # $(1): compiler - optimizing, interpreter or interpreter-access-checks. # $(2): wrapper. # $(3): dex2oat suffix. # $(4): multi-image. define create-core-oat-host-rule-combination $(call create-core-oat-host-rules,$(1),,$(2),$(3),$(4)) ifneq ($(HOST_PREFER_32_BIT),true) $(call create-core-oat-host-rules,$(1),2ND_,$(2),$(3),$(4)) endif endef $(eval $(call create-core-oat-host-rule-combination,optimizing,,,false)) $(eval $(call create-core-oat-host-rule-combination,interpreter,,,false)) $(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false)) $(eval $(call create-core-oat-host-rule-combination,optimizing,,,true)) $(eval $(call create-core-oat-host-rule-combination,interpreter,,,true)) $(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true)) valgrindHOST_CORE_IMG_OUTS := valgrindHOST_CORE_OAT_OUTS := $(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false)) $(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false)) $(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false)) valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS) test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS) define create-core-oat-target-rules core_compile_options := core_image_name := core_oat_name := core_infix := core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY) ifeq ($(1),optimizing) core_compile_options += --compiler-backend=Optimizing # With the optimizing compiler, we want to rerun dex2oat whenever there is # a dex2oat change to catch regressions early. core_dex2oat_dependency := $(DEX2OAT) endif ifeq ($(1),interpreter) core_compile_options += --compiler-filter=quicken core_infix := -interpreter endif ifeq ($(1),interp-ac) core_compile_options += --compiler-filter=extract --runtime-arg -Xverify:softfail core_infix := -interp-ac endif ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),) # Technically this test is not precise, but hopefully good enough. $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing) endif core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(3)$(CORE_IMG_SUFFIX) core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(3)$(CORE_OAT_SUFFIX) # Using the bitness suffix makes it easier to add as a dependency for the run-test mk. ifeq ($(2),) ifdef TARGET_2ND_ARCH $(3)TARGET_CORE_IMAGE_$(1)_64 := $$(core_image_name) else $(3)TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name) endif else $(3)TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name) endif $(3)TARGET_CORE_IMG_OUTS += $$(core_image_name) $(3)TARGET_CORE_OAT_OUTS += $$(core_oat_name) # If we have a wrapper, make the target phony. ifneq ($(3),) .PHONY: $$(core_image_name) endif $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options) $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name) $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name) $$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency) @echo "target dex2oat: $$@" @mkdir -p $$(dir $$@) $$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \ --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \ --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \ $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \ --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \ --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \ --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \ --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \ --android-root=$$(PRODUCT_OUT)/system \ --generate-debug-info --generate-build-id --compile-pic \ $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1) $$(core_oat_name): $$(core_image_name) # Clean up locally used variables. core_dex2oat_dependency := core_compile_options := core_image_name := core_oat_name := core_infix := endef # create-core-oat-target-rules # $(1): compiler - optimizing, interpreter or interpreter-access-checks. # $(2): wrapper. # $(3): dex2oat suffix. define create-core-oat-target-rule-combination $(call create-core-oat-target-rules,$(1),,$(2),$(3)) ifdef TARGET_2ND_ARCH $(call create-core-oat-target-rules,$(1),2ND_,$(2),$(3)) endif endef $(eval $(call create-core-oat-target-rule-combination,optimizing,,)) $(eval $(call create-core-oat-target-rule-combination,interpreter,,)) $(eval $(call create-core-oat-target-rule-combination,interp-ac,,)) valgrindTARGET_CORE_IMG_OUTS := valgrindTARGET_CORE_OAT_OUTS := $(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32)) $(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32)) valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS) valgrind-test-art-host-dex2oat: valgrind-test-art-host-dex2oat-host valgrind-test-art-host-dex2oat-target # Define a default core image that can be used for things like gtests that # need some image to run, but don't otherwise care which image is used. HOST_CORE_IMAGE_DEFAULT_32 := $(HOST_CORE_IMAGE_optimizing_32) HOST_CORE_IMAGE_DEFAULT_64 := $(HOST_CORE_IMAGE_optimizing_64) TARGET_CORE_IMAGE_DEFAULT_32 := $(TARGET_CORE_IMAGE_optimizing_32) TARGET_CORE_IMAGE_DEFAULT_64 := $(TARGET_CORE_IMAGE_optimizing_64) android-platform-art-8.1.0+r23/build/art.go000066400000000000000000000225571336577252300204140ustar00rootroot00000000000000// Copyright (C) 2016 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package art import ( "android/soong/android" "android/soong/cc" "fmt" "sync" ) var supportedArches = []string{"arm", "arm64", "mips", "mips64", "x86", "x86_64"} func globalFlags(ctx android.BaseContext) ([]string, []string) { var cflags []string var asflags []string opt := envDefault(ctx, "ART_NDEBUG_OPT_FLAG", "-O3") cflags = append(cflags, opt) tlab := false gcType := envDefault(ctx, "ART_DEFAULT_GC_TYPE", "CMS") if envTrue(ctx, "ART_TEST_DEBUG_GC") { gcType = "SS" tlab = true } cflags = append(cflags, "-DART_DEFAULT_GC_TYPE_IS_"+gcType) if tlab { cflags = append(cflags, "-DART_USE_TLAB=1") } if !envFalse(ctx, "ART_ENABLE_VDEX") { cflags = append(cflags, "-DART_ENABLE_VDEX") } imtSize := envDefault(ctx, "ART_IMT_SIZE", "43") cflags = append(cflags, "-DIMT_SIZE="+imtSize) if envTrue(ctx, "ART_HEAP_POISONING") { cflags = append(cflags, "-DART_HEAP_POISONING=1") asflags = append(asflags, "-DART_HEAP_POISONING=1") } if !envFalse(ctx, "ART_USE_READ_BARRIER") && ctx.AConfig().ArtUseReadBarrier() { // Used to change the read barrier type. Valid values are BAKER, BROOKS, // TABLELOOKUP. The default is BAKER. barrierType := envDefault(ctx, "ART_READ_BARRIER_TYPE", "BAKER") cflags = append(cflags, "-DART_USE_READ_BARRIER=1", "-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1") asflags = append(asflags, "-DART_USE_READ_BARRIER=1", "-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1") } // We need larger stack overflow guards for ASAN, as the compiled code will have // larger frame sizes. For simplicity, just use global not-target-specific cflags. // Note: We increase this for both debug and non-debug, as the overflow gap will // be compiled into managed code. We always preopt (and build core images) with // the debug version. So make the gap consistent (and adjust for the worst). if len(ctx.AConfig().SanitizeDevice()) > 0 || len(ctx.AConfig().SanitizeHost()) > 0 { cflags = append(cflags, "-DART_STACK_OVERFLOW_GAP_arm=8192", "-DART_STACK_OVERFLOW_GAP_arm64=8192", "-DART_STACK_OVERFLOW_GAP_mips=16384", "-DART_STACK_OVERFLOW_GAP_mips64=16384", "-DART_STACK_OVERFLOW_GAP_x86=16384", "-DART_STACK_OVERFLOW_GAP_x86_64=20480") } else { cflags = append(cflags, "-DART_STACK_OVERFLOW_GAP_arm=8192", "-DART_STACK_OVERFLOW_GAP_arm64=8192", "-DART_STACK_OVERFLOW_GAP_mips=16384", "-DART_STACK_OVERFLOW_GAP_mips64=16384", "-DART_STACK_OVERFLOW_GAP_x86=8192", "-DART_STACK_OVERFLOW_GAP_x86_64=8192") } if envTrue(ctx, "ART_ENABLE_ADDRESS_SANITIZER") { // Used to enable full sanitization, i.e., user poisoning, under ASAN. cflags = append(cflags, "-DART_ENABLE_ADDRESS_SANITIZER=1") asflags = append(asflags, "-DART_ENABLE_ADDRESS_SANITIZER=1") } return cflags, asflags } func debugFlags(ctx android.BaseContext) []string { var cflags []string opt := envDefault(ctx, "ART_DEBUG_OPT_FLAG", "-O2") cflags = append(cflags, opt) return cflags } func deviceFlags(ctx android.BaseContext) []string { var cflags []string deviceFrameSizeLimit := 1736 if len(ctx.AConfig().SanitizeDevice()) > 0 { deviceFrameSizeLimit = 7400 } cflags = append(cflags, fmt.Sprintf("-Wframe-larger-than=%d", deviceFrameSizeLimit), fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", deviceFrameSizeLimit), ) cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.AConfig().LibartImgDeviceBaseAddress()) if envTrue(ctx, "ART_TARGET_LINUX") { cflags = append(cflags, "-DART_TARGET_LINUX") } else { cflags = append(cflags, "-DART_TARGET_ANDROID") } minDelta := envDefault(ctx, "LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA", "-0x1000000") maxDelta := envDefault(ctx, "LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA", "0x1000000") cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta) cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta) return cflags } func hostFlags(ctx android.BaseContext) []string { var cflags []string hostFrameSizeLimit := 1736 if len(ctx.AConfig().SanitizeHost()) > 0 { // art/test/137-cfi/cfi.cc // error: stack frame size of 1944 bytes in function 'Java_Main_unwindInProcess' hostFrameSizeLimit = 6400 } cflags = append(cflags, fmt.Sprintf("-Wframe-larger-than=%d", hostFrameSizeLimit), fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", hostFrameSizeLimit), ) cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.AConfig().LibartImgHostBaseAddress()) minDelta := envDefault(ctx, "LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA", "-0x1000000") maxDelta := envDefault(ctx, "LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA", "0x1000000") cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta) cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta) return cflags } func globalDefaults(ctx android.LoadHookContext) { type props struct { Target struct { Android struct { Cflags []string } Host struct { Cflags []string } } Cflags []string Asflags []string Sanitize struct { Recover []string } } p := &props{} p.Cflags, p.Asflags = globalFlags(ctx) p.Target.Android.Cflags = deviceFlags(ctx) p.Target.Host.Cflags = hostFlags(ctx) if envTrue(ctx, "ART_DEX_FILE_ACCESS_TRACKING") { p.Cflags = append(p.Cflags, "-DART_DEX_FILE_ACCESS_TRACKING") p.Sanitize.Recover = []string{ "address", } } ctx.AppendProperties(p) } func debugDefaults(ctx android.LoadHookContext) { type props struct { Cflags []string } p := &props{} p.Cflags = debugFlags(ctx) ctx.AppendProperties(p) } func customLinker(ctx android.LoadHookContext) { linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "") if linker != "" { type props struct { DynamicLinker string } p := &props{} p.DynamicLinker = linker ctx.AppendProperties(p) } } func prefer32Bit(ctx android.LoadHookContext) { if envTrue(ctx, "HOST_PREFER_32_BIT") { type props struct { Target struct { Host struct { Compile_multilib string } } } p := &props{} p.Target.Host.Compile_multilib = "prefer32" ctx.AppendProperties(p) } } func testMap(config android.Config) map[string][]string { return config.Once("artTests", func() interface{} { return make(map[string][]string) }).(map[string][]string) } func testInstall(ctx android.InstallHookContext) { testMap := testMap(ctx.AConfig()) var name string if ctx.Host() { name = "host_" } else { name = "device_" } name += ctx.Arch().ArchType.String() + "_" + ctx.ModuleName() artTestMutex.Lock() defer artTestMutex.Unlock() tests := testMap[name] tests = append(tests, ctx.Path().RelPathString()) testMap[name] = tests } var artTestMutex sync.Mutex func init() { android.RegisterModuleType("art_cc_library", artLibrary) android.RegisterModuleType("art_cc_binary", artBinary) android.RegisterModuleType("art_cc_test", artTest) android.RegisterModuleType("art_cc_test_library", artTestLibrary) android.RegisterModuleType("art_cc_defaults", artDefaultsFactory) android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory) android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory) } func artGlobalDefaultsFactory() android.Module { module := artDefaultsFactory() android.AddLoadHook(module, globalDefaults) return module } func artDebugDefaultsFactory() android.Module { module := artDefaultsFactory() android.AddLoadHook(module, debugDefaults) return module } func artDefaultsFactory() android.Module { c := &codegenProperties{} module := cc.DefaultsFactory(c) android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, true) }) return module } func artLibrary() android.Module { library, _ := cc.NewLibrary(android.HostAndDeviceSupported) module := library.Init() installCodegenCustomizer(module, true) return module } func artBinary() android.Module { binary, _ := cc.NewBinary(android.HostAndDeviceSupported) module := binary.Init() android.AddLoadHook(module, customLinker) android.AddLoadHook(module, prefer32Bit) return module } func artTest() android.Module { test := cc.NewTest(android.HostAndDeviceSupported) module := test.Init() installCodegenCustomizer(module, false) android.AddLoadHook(module, customLinker) android.AddLoadHook(module, prefer32Bit) android.AddInstallHook(module, testInstall) return module } func artTestLibrary() android.Module { test := cc.NewTestLibrary(android.HostAndDeviceSupported) module := test.Init() installCodegenCustomizer(module, false) android.AddLoadHook(module, prefer32Bit) android.AddInstallHook(module, testInstall) return module } func envDefault(ctx android.BaseContext, key string, defaultValue string) string { ret := ctx.AConfig().Getenv(key) if ret == "" { return defaultValue } return ret } func envTrue(ctx android.BaseContext, key string) bool { return ctx.AConfig().Getenv(key) == "true" } func envFalse(ctx android.BaseContext, key string) bool { return ctx.AConfig().Getenv(key) == "false" } android-platform-art-8.1.0+r23/build/codegen.go000066400000000000000000000074711336577252300212300ustar00rootroot00000000000000// Copyright (C) 2016 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package art // This file implements the "codegen" property to apply different properties based on the currently // selected codegen arches, which defaults to all arches on the host and the primary and secondary // arches on the device. import ( "android/soong/android" "sort" "strings" ) func codegen(ctx android.LoadHookContext, c *codegenProperties, library bool) { var hostArches, deviceArches []string e := envDefault(ctx, "ART_HOST_CODEGEN_ARCHS", "") if e == "" { hostArches = supportedArches } else { hostArches = strings.Split(e, " ") } e = envDefault(ctx, "ART_TARGET_CODEGEN_ARCHS", "") if e == "" { deviceArches = defaultDeviceCodegenArches(ctx) } else { deviceArches = strings.Split(e, " ") } addCodegenArchProperties := func(host bool, archName string) { type props struct { Target struct { Android *CodegenCommonArchProperties Host *CodegenCommonArchProperties } } type libraryProps struct { Target struct { Android *CodegenLibraryArchProperties Host *CodegenLibraryArchProperties } } var arch *codegenArchProperties switch archName { case "arm": arch = &c.Codegen.Arm case "arm64": arch = &c.Codegen.Arm64 case "mips": arch = &c.Codegen.Mips case "mips64": arch = &c.Codegen.Mips64 case "x86": arch = &c.Codegen.X86 case "x86_64": arch = &c.Codegen.X86_64 default: ctx.ModuleErrorf("Unknown codegen architecture %q", archName) return } p := &props{} l := &libraryProps{} if host { p.Target.Host = &arch.CodegenCommonArchProperties l.Target.Host = &arch.CodegenLibraryArchProperties } else { p.Target.Android = &arch.CodegenCommonArchProperties l.Target.Android = &arch.CodegenLibraryArchProperties } ctx.AppendProperties(p) if library { ctx.AppendProperties(l) } } for _, arch := range deviceArches { addCodegenArchProperties(false, arch) if ctx.Failed() { return } } for _, arch := range hostArches { addCodegenArchProperties(true, arch) if ctx.Failed() { return } } } type CodegenCommonArchProperties struct { Srcs []string Cflags []string } type CodegenLibraryArchProperties struct { Static struct { Whole_static_libs []string } Shared struct { Shared_libs []string } } type codegenArchProperties struct { CodegenCommonArchProperties CodegenLibraryArchProperties } type codegenProperties struct { Codegen struct { Arm, Arm64, Mips, Mips64, X86, X86_64 codegenArchProperties } } type codegenCustomizer struct { library bool codegenProperties codegenProperties } func defaultDeviceCodegenArches(ctx android.LoadHookContext) []string { arches := make(map[string]bool) for _, a := range ctx.DeviceConfig().Arches() { s := a.ArchType.String() arches[s] = true if s == "arm64" { arches["arm"] = true } else if s == "mips64" { arches["mips"] = true } else if s == "x86_64" { arches["x86"] = true } } ret := make([]string, 0, len(arches)) for a := range arches { ret = append(ret, a) } sort.Strings(ret) return ret } func installCodegenCustomizer(module android.Module, library bool) { c := &codegenProperties{} android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, library) }) module.AddProperties(c) } android-platform-art-8.1.0+r23/build/makevars.go000066400000000000000000000024421336577252300214260ustar00rootroot00000000000000// Copyright (C) 2016 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package art import ( "sort" "strings" "android/soong/android" ) var ( pctx = android.NewPackageContext("android/soong/art") ) func init() { android.RegisterMakeVarsProvider(pctx, makeVarsProvider) } func makeVarsProvider(ctx android.MakeVarsContext) { ctx.Strict("LIBART_IMG_HOST_BASE_ADDRESS", ctx.Config().LibartImgHostBaseAddress()) ctx.Strict("LIBART_IMG_TARGET_BASE_ADDRESS", ctx.Config().LibartImgDeviceBaseAddress()) testMap := testMap(ctx.Config()) var testNames []string for name := range testMap { testNames = append(testNames, name) } sort.Strings(testNames) for _, name := range testNames { ctx.Strict("ART_TEST_LIST_"+name, strings.Join(testMap[name], " ")) } } android-platform-art-8.1.0+r23/cmdline/000077500000000000000000000000001336577252300176005ustar00rootroot00000000000000android-platform-art-8.1.0+r23/cmdline/Android.bp000066400000000000000000000013661336577252300215110ustar00rootroot00000000000000// // Copyright (C) 2016 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // art_cc_test { name: "art_cmdline_tests", defaults: [ "art_gtest_defaults", ], srcs: ["cmdline_parser_test.cc"], } android-platform-art-8.1.0+r23/cmdline/README.md000066400000000000000000000233761336577252300210720ustar00rootroot00000000000000Cmdline =================== Introduction ------------- This directory contains the classes that do common command line tool initialization and parsing. The long term goal is eventually for all `art` command-line tools to be using these helpers. ---------- ## Cmdline Parser ------------- The `CmdlineParser` class provides a fluent interface using a domain-specific language to quickly generate a type-safe value parser that process a user-provided list of strings (`argv`). Currently, it can parse a string into a `VariantMap`, although in the future it might be desirable to parse into any struct of any field. To use, create a `CmdlineParser::Builder` and then chain the `Define` methods together with `WithType` and `IntoXX` methods. ### Quick Start For example, to save the values into a user-defined variant map: ``` struct FruitVariantMap : VariantMap { static const Key Apple; static const Key Orange; static const Key Help; }; // Note that some template boilerplate has been avoided for clarity. // See variant_map_test.cc for how to completely define a custom map. using FruitParser = CmdlineParser; FruitParser MakeParser() { auto&& builder = FruitParser::Builder(); builder. .Define("--help") .IntoKey(FruitVariantMap::Help) Define("--apple:_") .WithType() .IntoKey(FruitVariantMap::Apple) .Define("--orange:_") .WithType() .WithRange(0.0, 1.0) .IntoKey(FruitVariantMap::Orange); return builder.Build(); } int main(char** argv, int argc) { auto parser = MakeParser(); auto result = parser.parse(argv, argc)); if (result.isError()) { std::cerr << result.getMessage() << std::endl; return EXIT_FAILURE; } auto map = parser.GetArgumentsMap(); std::cout << "Help? " << map.GetOrDefault(FruitVariantMap::Help) << std::endl; std::cout << "Apple? " << map.GetOrDefault(FruitVariantMap::Apple) << std::endl; std::cout << "Orange? " << map.GetOrDefault(FruitVariantMap::Orange) << std::endl; return EXIT_SUCCESS; } ``` In the above code sample, we define a parser which is capable of parsing something like `--help --apple:123 --orange:0.456` . It will error out automatically if invalid flags are given, or if the appropriate flags are given but of the the wrong type/range. So for example, `--foo` will not parse (invalid argument), neither will `--apple:fruit` (fruit is not an int) nor `--orange:1234` (1234 is out of range of [0.0, 1.0]) ### Argument Definitions in Detail #### Define method The 'Define' method takes one or more aliases for the argument. Common examples might be `{"-h", "--help"}` where both `--help` and `-h` are aliases for the same argument. The simplest kind of argument just tests for presence, but we often want to parse out a particular type of value (such as an int or double as in the above `FruitVariantMap` example). To do that, a _wildcard_ must be used to denote the location within the token that the type will be parsed out of. For example with `-orange:_` the parse would know to check all tokens in an `argv` list for the `-orange:` prefix and then strip it, leaving only the remains to be parsed. #### WithType method (optional) After an argument definition is provided, the parser builder needs to know what type the argument will be in order to provide the type safety and make sure the rest of the argument definition is correct as early as possible (in essence, everything but the parsing of the argument name is done at compile time). Everything that follows a `WithType()` call is thus type checked to only take `T` values. If this call is omitted, the parser generator assumes you are building a `Unit` type (i.e. an argument that only cares about presence). #### WithRange method (optional) Some values will not make sense outside of a `[min, max]` range, so this is an option to quickly add a range check without writing custom code. The range check is performed after the main parsing happens and happens for any type implementing the `<=` operators. #### WithValueMap (optional) When parsing an enumeration, it might be very convenient to map a list of possible argument string values into its runtime value. With something like ``` .Define("-hello:_") .WithValueMap({"world", kWorld}, {"galaxy", kGalaxy}) ``` It will parse either `-hello:world` or `-hello:galaxy` only (and error out on other variations of `-hello:whatever`), converting it to the type-safe value of `kWorld` or `kGalaxy` respectively. This is meant to be another shorthand (like `WithRange`) to avoid writing a custom type parser. In general it takes a variadic number of `pair`. #### WithValues (optional) When an argument definition has multiple aliases with no wildcards, it might be convenient to quickly map them into discrete values. For example: ``` .Define({"-xinterpret", "-xnointerpret"}) .WithValues({true, false} ``` It will parse `-xinterpret` as `true` and `-xnointerpret` as `false`. In general, it uses the position of the argument alias to map into the WithValues position value. (Note that this method will not work when the argument definitions have a wildcard because there is no way to position-ally match that). #### AppendValues (optional) By default, the argument is assumed to appear exactly once, and if the user specifies it more than once, only the latest value is taken into account (and all previous occurrences of the argument are ignored). In some situations, we may want to accumulate the argument values instead of discarding the previous ones. For example ``` .Define("-D") .WithType)() .AppendValues() ``` Will parse something like `-Dhello -Dworld -Dbar -Dbaz` into `std::vector{"hello", "world", "bar", "baz"}`. ### Setting an argument parse target (required) To complete an argument definition, the parser generator also needs to know where to save values. Currently, only `IntoKey` is supported, but that may change in the future. #### IntoKey (required) This specifies that when a value is parsed, it will get saved into a variant map using the specific key. For example, ``` .Define("-help") .IntoKey(Map::Help) ``` will save occurrences of the `-help` argument by doing a `Map.Set(Map::Help, ParsedValue("-help"))` where `ParsedValue` is an imaginary function that parses the `-help` argment into a specific type set by `WithType`. ### Ignoring unknown arguments This is highly discouraged, but for compatibility with `JNI` which allows argument ignores, there is an option to ignore any argument tokens that are not known to the parser. This is done with the `Ignore` function which takes a list of argument definition names. It's semantically equivalent to making a series of argument definitions that map to `Unit` but don't get saved anywhere. Values will still get parsed as normal, so it will *not* ignore known arguments with invalid values, only user-arguments for which it could not find a matching argument definition. ### Parsing custom types Any type can be parsed from a string by specializing the `CmdlineType` class and implementing the static interface provided by `CmdlineTypeParser`. It is recommended to inherit from `CmdlineTypeParser` since it already provides default implementations for every method. The `Parse` method should be implemented for most types. Some types will allow appending (such as an `std::vector` and are meant to be used with `AppendValues` in which case the `ParseAndAppend` function should be implemented. For example: ``` template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& str) { char* end = nullptr; errno = 0; double value = strtod(str.c_str(), &end); if (*end != '\0') { return Result::Failure("Failed to parse double from " + str); } if (errno == ERANGE) { return Result::OutOfRange( "Failed to parse double from " + str + "; overflow/underflow occurred"); } return Result::Success(value); } static const char* Name() { return "double"; } // note: Name() is just here for more user-friendly errors, // but in the future we will use non-standard ways of getting the type name // at compile-time and this will no longer be required }; ``` Will parse any non-append argument definitions with a type of `double`. For an appending example: ``` template <> struct CmdlineType> : CmdlineTypeParser> { Result ParseAndAppend(const std::string& args, std::vector& existing_value) { existing_value.push_back(args); return Result::SuccessNoValue(); } static const char* Name() { return "std::vector"; } }; ``` Will parse multiple instances of the same argument repeatedly into the `existing_value` (which will be default-constructed to `T{}` for the first occurrence of the argument). #### What is a `Result`? `Result` is a typedef for `CmdlineParseResult` and it acts similar to a poor version of `Either` in Haskell. In particular, it would be similar to `Either< int ErrorCode, Maybe >`. There are helpers like `Result::Success(value)`, `Result::Failure(string message)` and so on to quickly construct these without caring about the type. When successfully parsing a single value, `Result::Success(value)` should be used, and when successfully parsing an appended value, use `Result::SuccessNoValue()` and write back the new value into `existing_value` as an out-parameter. When many arguments are parsed, the result is collapsed down to a `CmdlineResult` which acts as a `Either` where the right side simply indicates success. When values are successfully stored, the parser will automatically save it into the target destination as a side effect. android-platform-art-8.1.0+r23/cmdline/cmdline.h000066400000000000000000000272751336577252300214010ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_CMDLINE_H_ #define ART_CMDLINE_CMDLINE_H_ #include #include #include #include #include #include "android-base/stringprintf.h" #include "base/logging.h" #include "base/stringpiece.h" #include "noop_compiler_callbacks.h" #include "runtime.h" #if !defined(NDEBUG) #define DBG_LOG LOG(INFO) #else #define DBG_LOG LOG(DEBUG) #endif namespace art { // TODO: Move to and remove all copies of this function. static bool LocationToFilename(const std::string& location, InstructionSet isa, std::string* filename) { bool has_system = false; bool has_cache = false; // image_location = /system/framework/boot.art // system_image_filename = /system/framework//boot.art std::string system_filename(GetSystemImageFilename(location.c_str(), isa)); if (OS::FileExists(system_filename.c_str())) { has_system = true; } bool have_android_data = false; bool dalvik_cache_exists = false; bool is_global_cache = false; std::string dalvik_cache; GetDalvikCache(GetInstructionSetString(isa), false, &dalvik_cache, &have_android_data, &dalvik_cache_exists, &is_global_cache); std::string cache_filename; if (have_android_data && dalvik_cache_exists) { // Always set output location even if it does not exist, // so that the caller knows where to create the image. // // image_location = /system/framework/boot.art // *image_filename = /data/dalvik-cache//boot.art std::string error_msg; if (GetDalvikCacheFilename(location.c_str(), dalvik_cache.c_str(), &cache_filename, &error_msg)) { has_cache = true; } } if (has_system) { *filename = system_filename; return true; } else if (has_cache) { *filename = cache_filename; return true; } else { return false; } } static Runtime* StartRuntime(const char* boot_image_location, InstructionSet instruction_set) { CHECK(boot_image_location != nullptr); RuntimeOptions options; // We are more like a compiler than a run-time. We don't want to execute code. { static NoopCompilerCallbacks callbacks; options.push_back(std::make_pair("compilercallbacks", &callbacks)); } // Boot image location. { std::string boot_image_option; boot_image_option += "-Ximage:"; boot_image_option += boot_image_location; options.push_back(std::make_pair(boot_image_option.c_str(), nullptr)); } // Instruction set. options.push_back( std::make_pair("imageinstructionset", reinterpret_cast(GetInstructionSetString(instruction_set)))); // None of the command line tools need sig chain. If this changes we'll need // to upgrade this option to a proper parameter. options.push_back(std::make_pair("-Xno-sig-chain", nullptr)); if (!Runtime::Create(options, false)) { fprintf(stderr, "Failed to create runtime\n"); return nullptr; } // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, // give it away now and then switch to a more manageable ScopedObjectAccess. Thread::Current()->TransitionFromRunnableToSuspended(kNative); return Runtime::Current(); } struct CmdlineArgs { enum ParseStatus { kParseOk, // Parse successful. Do not set the error message. kParseUnknownArgument, // Unknown argument. Do not set the error message. kParseError, // Parse ok, but failed elsewhere. Print the set error message. }; bool Parse(int argc, char** argv) { // Skip over argv[0]. argv++; argc--; if (argc == 0) { fprintf(stderr, "No arguments specified\n"); PrintUsage(); return false; } std::string error_msg; for (int i = 0; i < argc; i++) { const StringPiece option(argv[i]); if (option.starts_with("--boot-image=")) { boot_image_location_ = option.substr(strlen("--boot-image=")).data(); } else if (option.starts_with("--instruction-set=")) { StringPiece instruction_set_str = option.substr(strlen("--instruction-set=")).data(); instruction_set_ = GetInstructionSetFromString(instruction_set_str.data()); if (instruction_set_ == kNone) { fprintf(stderr, "Unsupported instruction set %s\n", instruction_set_str.data()); PrintUsage(); return false; } } else if (option.starts_with("--output=")) { output_name_ = option.substr(strlen("--output=")).ToString(); const char* filename = output_name_.c_str(); out_.reset(new std::ofstream(filename)); if (!out_->good()) { fprintf(stderr, "Failed to open output filename %s\n", filename); PrintUsage(); return false; } os_ = out_.get(); } else { ParseStatus parse_status = ParseCustom(option, &error_msg); if (parse_status == kParseUnknownArgument) { fprintf(stderr, "Unknown argument %s\n", option.data()); } if (parse_status != kParseOk) { fprintf(stderr, "%s\n", error_msg.c_str()); PrintUsage(); return false; } } } DBG_LOG << "will call parse checks"; { ParseStatus checks_status = ParseChecks(&error_msg); if (checks_status != kParseOk) { fprintf(stderr, "%s\n", error_msg.c_str()); PrintUsage(); return false; } } return true; } virtual std::string GetUsage() const { std::string usage; usage += // Required. " --boot-image=: provide the image location for the boot class path.\n" " Do not include the arch as part of the name, it is added automatically.\n" " Example: --boot-image=/system/framework/boot.art\n" " (specifies /system/framework//boot.art as the image file)\n" "\n"; usage += android::base::StringPrintf( // Optional. " --instruction-set=(arm|arm64|mips|mips64|x86|x86_64): for locating the image\n" " file based on the image location set.\n" " Example: --instruction-set=x86\n" " Default: %s\n" "\n", GetInstructionSetString(kRuntimeISA)); usage += // Optional. " --output= may be used to send the output to a file.\n" " Example: --output=/tmp/oatdump.txt\n" "\n"; return usage; } // Specified by --boot-image. const char* boot_image_location_ = nullptr; // Specified by --instruction-set. InstructionSet instruction_set_ = kRuntimeISA; // Specified by --output. std::ostream* os_ = &std::cout; std::unique_ptr out_; // If something besides cout is used std::string output_name_; virtual ~CmdlineArgs() {} bool ParseCheckBootImage(std::string* error_msg) { if (boot_image_location_ == nullptr) { *error_msg = "--boot-image must be specified"; return false; } DBG_LOG << "boot image location: " << boot_image_location_; // Checks for --boot-image location. { std::string boot_image_location = boot_image_location_; size_t file_name_idx = boot_image_location.rfind('/'); if (file_name_idx == std::string::npos) { // Prevent a InsertIsaDirectory check failure. *error_msg = "Boot image location must have a / in it"; return false; } // Don't let image locations with the 'arch' in it through, since it's not a location. // This prevents a common error "Could not create an image space..." when initing the Runtime. if (file_name_idx != std::string::npos) { std::string no_file_name = boot_image_location.substr(0, file_name_idx); size_t ancestor_dirs_idx = no_file_name.rfind('/'); std::string parent_dir_name; if (ancestor_dirs_idx != std::string::npos) { parent_dir_name = no_file_name.substr(ancestor_dirs_idx + 1); } else { parent_dir_name = no_file_name; } DBG_LOG << "boot_image_location parent_dir_name was " << parent_dir_name; if (GetInstructionSetFromString(parent_dir_name.c_str()) != kNone) { *error_msg = "Do not specify the architecture as part of the boot image location"; return false; } } // Check that the boot image location points to a valid file name. std::string file_name; if (!LocationToFilename(boot_image_location, instruction_set_, &file_name)) { *error_msg = android::base::StringPrintf("No corresponding file for location '%s' exists", boot_image_location.c_str()); return false; } DBG_LOG << "boot_image_filename does exist: " << file_name; } return true; } void PrintUsage() { fprintf(stderr, "%s", GetUsage().c_str()); } protected: virtual ParseStatus ParseCustom(const StringPiece& option ATTRIBUTE_UNUSED, std::string* error_msg ATTRIBUTE_UNUSED) { return kParseUnknownArgument; } virtual ParseStatus ParseChecks(std::string* error_msg ATTRIBUTE_UNUSED) { return kParseOk; } }; template struct CmdlineMain { int Main(int argc, char** argv) { InitLogging(argv, Runtime::Abort); std::unique_ptr args = std::unique_ptr(CreateArguments()); args_ = args.get(); DBG_LOG << "Try to parse"; if (args_ == nullptr || !args_->Parse(argc, argv)) { return EXIT_FAILURE; } bool needs_runtime = NeedsRuntime(); std::unique_ptr runtime; if (needs_runtime) { std::string error_msg; if (!args_->ParseCheckBootImage(&error_msg)) { fprintf(stderr, "%s\n", error_msg.c_str()); args_->PrintUsage(); return EXIT_FAILURE; } runtime.reset(CreateRuntime(args.get())); if (runtime == nullptr) { return EXIT_FAILURE; } if (!ExecuteWithRuntime(runtime.get())) { return EXIT_FAILURE; } } else { if (!ExecuteWithoutRuntime()) { return EXIT_FAILURE; } } if (!ExecuteCommon()) { return EXIT_FAILURE; } return EXIT_SUCCESS; } // Override this function to create your own arguments. // Usually will want to return a subtype of CmdlineArgs. virtual Args* CreateArguments() { return new Args(); } // Override this function to do something else with the runtime. virtual bool ExecuteWithRuntime(Runtime* runtime) { CHECK(runtime != nullptr); // Do nothing return true; } // Does the code execution need a runtime? Sometimes it doesn't. virtual bool NeedsRuntime() { return true; } // Do execution without having created a runtime. virtual bool ExecuteWithoutRuntime() { return true; } // Continue execution after ExecuteWith[out]Runtime virtual bool ExecuteCommon() { return true; } virtual ~CmdlineMain() {} protected: Args* args_ = nullptr; private: Runtime* CreateRuntime(CmdlineArgs* args) { CHECK(args != nullptr); return StartRuntime(args->boot_image_location_, args->instruction_set_); } }; } // namespace art #endif // ART_CMDLINE_CMDLINE_H_ android-platform-art-8.1.0+r23/cmdline/cmdline_parse_result.h000066400000000000000000000114471336577252300241630ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ #define ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ #include "cmdline_result.h" #include "detail/cmdline_parser_detail.h" namespace art { // Result of a type-parsing attempt. If successful holds the strongly-typed value, // otherwise it holds either a usage or a failure string message that should be displayed back // to the user. // // CmdlineType::Parse/CmdlineType::ParseAndAppend must return this type. template struct CmdlineParseResult : CmdlineResult { using CmdlineResult::CmdlineResult; // Create an error result with the usage error code and the specified message. static CmdlineParseResult Usage(const std::string& message) { return CmdlineParseResult(kUsage, message); } // Create an error result with the failure error code and no message. static CmdlineParseResult Failure() { return CmdlineParseResult(kFailure); } // Create an error result with the failure error code and no message. static CmdlineParseResult Failure(const std::string& message) { return CmdlineParseResult(kFailure, message); } // Create a successful result which holds the specified value. static CmdlineParseResult Success(const T& value) { return CmdlineParseResult(value); } // Create a successful result, taking over the value. static CmdlineParseResult Success(T&& value) { return CmdlineParseResult(std::forward(value)); } // Create succesful result, without any values. Used when a value was successfully appended // into an existing object. static CmdlineParseResult SuccessNoValue() { return CmdlineParseResult(T {}); } // Create an error result with the OutOfRange error and the specified message. static CmdlineParseResult OutOfRange(const std::string& message) { return CmdlineParseResult(kOutOfRange, message); } // Create an error result with the OutOfRange code and a custom message // which is printed from the actual/min/max values. // Values are converted to string using the ostream<< operator. static CmdlineParseResult OutOfRange(const T& value, const T& min, const T& max) { return CmdlineParseResult(kOutOfRange, "actual: " + art::detail::ToStringAny(value) + ", min: " + art::detail::ToStringAny(min) + ", max: " + art::detail::ToStringAny(max)); } // Get a read-only reference to the underlying value. // The result must have been successful and must have a value. const T& GetValue() const { assert(IsSuccess()); assert(has_value_); return value_; } // Get a mutable reference to the underlying value. // The result must have been successful and must have a value. T& GetValue() { assert(IsSuccess()); assert(has_value_); return value_; } // Take over the value. // The result must have been successful and must have a value. T&& ReleaseValue() { assert(IsSuccess()); assert(has_value_); return std::move(value_); } // Whether or not the result has a value (e.g. created with Result::Success). // Error results never have values, success results commonly, but not always, have values. bool HasValue() const { return has_value_; } // Cast an error-result from type T2 to T1. // Safe since error-results don't store a typed value. template static CmdlineParseResult CastError(const CmdlineParseResult& other) { assert(other.IsError()); return CmdlineParseResult(other.GetStatus()); } // Make sure copying is allowed CmdlineParseResult(const CmdlineParseResult&) = default; // Make sure moving is cheap CmdlineParseResult(CmdlineParseResult&&) = default; private: explicit CmdlineParseResult(const T& value) : CmdlineResult(kSuccess), value_(value), has_value_(true) {} explicit CmdlineParseResult(T&& value) : CmdlineResult(kSuccess), value_(std::forward(value)), has_value_(true) {} CmdlineParseResult() : CmdlineResult(kSuccess), value_(), has_value_(false) {} T value_; bool has_value_ = false; }; } // namespace art #endif // ART_CMDLINE_CMDLINE_PARSE_RESULT_H_ android-platform-art-8.1.0+r23/cmdline/cmdline_parser.h000066400000000000000000000567301336577252300227530ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_CMDLINE_PARSER_H_ #define ART_CMDLINE_CMDLINE_PARSER_H_ #define CMDLINE_NDEBUG 1 // Do not output any debugging information for parsing. #include "cmdline/detail/cmdline_parser_detail.h" #include "cmdline/detail/cmdline_parse_argument_detail.h" #include "cmdline/detail/cmdline_debug_detail.h" #include "cmdline_type_parser.h" #include "token_range.h" #include "cmdline_types.h" #include "cmdline_result.h" #include "cmdline_parse_result.h" #include "runtime/base/variant_map.h" #include #include namespace art { // Build a parser for command line arguments with a small domain specific language. // Each parsed type must have a specialized CmdlineType in order to do the string->T parsing. // Each argument must also have a VariantMap::Key in order to do the T storage. template class TVariantMapKey> struct CmdlineParser { template struct ArgumentBuilder; struct Builder; // Build the parser. struct UntypedArgumentBuilder; // Build arguments which weren't yet given a type. private: // Forward declare some functions that we need to use before fully-defining structs. template static ArgumentBuilder CreateArgumentBuilder(Builder& parent); static void AppendCompletedArgument(Builder& builder, detail::CmdlineParseArgumentAny* arg); // Allow argument definitions to save their values when they are parsed, // without having a dependency on CmdlineParser or any of the builders. // // A shared pointer to the save destination is saved into the load/save argument callbacks. // // This also allows the underlying storage (i.e. a variant map) to be released // to the user, without having to recreate all of the callbacks. struct SaveDestination { SaveDestination() : variant_map_(new TVariantMap()) {} // Save value to the variant map. template void SaveToMap(const TVariantMapKey& key, TArg& value) { variant_map_->Set(key, value); } // Get the existing value from a map, creating the value if it did not already exist. template TArg& GetOrCreateFromMap(const TVariantMapKey& key) { auto* ptr = variant_map_->Get(key); if (ptr == nullptr) { variant_map_->Set(key, TArg()); ptr = variant_map_->Get(key); assert(ptr != nullptr); } return *ptr; } protected: // Release the map, clearing it as a side-effect. // Future saves will be distinct from previous saves. TVariantMap&& ReleaseMap() { return std::move(*variant_map_); } // Get a read-only reference to the variant map. const TVariantMap& GetMap() { return *variant_map_; } // Clear all potential save targets. void Clear() { variant_map_->Clear(); } private: // Don't try to copy or move this. Just don't. SaveDestination(const SaveDestination&) = delete; SaveDestination(SaveDestination&&) = delete; SaveDestination& operator=(const SaveDestination&) = delete; SaveDestination& operator=(SaveDestination&&) = delete; std::shared_ptr variant_map_; // Allow the parser to change the underlying pointers when we release the underlying storage. friend struct CmdlineParser; }; public: // Builder for the argument definition of type TArg. Do not use this type directly, // it is only a separate type to provide compile-time enforcement against doing // illegal builds. template struct ArgumentBuilder { // Add a range check to this argument. ArgumentBuilder& WithRange(const TArg& min, const TArg& max) { argument_info_.has_range_ = true; argument_info_.min_ = min; argument_info_.max_ = max; return *this; } // Map the list of names into the list of values. List of names must not have // any wildcards '_' in it. // // Do not use if a value map has already been set. ArgumentBuilder& WithValues(std::initializer_list value_list) { SetValuesInternal(value_list); return *this; } // When used with a single alias, map the alias into this value. // Same as 'WithValues({value})' , but allows the omission of the curly braces {}. ArgumentBuilder WithValue(const TArg& value) { return WithValues({ value }); } // Map the parsed string values (from _) onto a concrete value. If no wildcard // has been specified, then map the value directly from the arg name (i.e. // if there are multiple aliases, then use the alias to do the mapping). // // Do not use if a values list has already been set. ArgumentBuilder& WithValueMap( std::initializer_list> key_value_list) { assert(!argument_info_.has_value_list_); argument_info_.has_value_map_ = true; argument_info_.value_map_ = key_value_list; return *this; } // If this argument is seen multiple times, successive arguments mutate the same value // instead of replacing it with a new value. ArgumentBuilder& AppendValues() { argument_info_.appending_values_ = true; return *this; } // Convenience type alias for the variant map key type definition. using MapKey = TVariantMapKey; // Write the results of this argument into the key. // To look up the parsed arguments, get the map and then use this key with VariantMap::Get CmdlineParser::Builder& IntoKey(const MapKey& key) { // Only capture save destination as a pointer. // This allows the parser to later on change the specific save targets. auto save_destination = save_destination_; save_value_ = [save_destination, &key](TArg& value) { save_destination->SaveToMap(key, value); CMDLINE_DEBUG_LOG << "Saved value into map '" << detail::ToStringAny(value) << "'" << std::endl; }; load_value_ = [save_destination, &key]() -> TArg& { TArg& value = save_destination->GetOrCreateFromMap(key); CMDLINE_DEBUG_LOG << "Loaded value from map '" << detail::ToStringAny(value) << "'" << std::endl; return value; }; save_value_specified_ = true; load_value_specified_ = true; CompleteArgument(); return parent_; } // Ensure we always move this when returning a new builder. ArgumentBuilder(ArgumentBuilder&&) = default; protected: // Used by builder to internally ignore arguments by dropping them on the floor after parsing. CmdlineParser::Builder& IntoIgnore() { save_value_ = [](TArg& value) { CMDLINE_DEBUG_LOG << "Ignored value '" << detail::ToStringAny(value) << "'" << std::endl; }; load_value_ = []() -> TArg& { assert(false && "Should not be appending values to ignored arguments"); return *reinterpret_cast(0); // Blow up. }; save_value_specified_ = true; load_value_specified_ = true; CompleteArgument(); return parent_; } void SetValuesInternal(const std::vector&& value_list) { assert(!argument_info_.has_value_map_); argument_info_.has_value_list_ = true; argument_info_.value_list_ = value_list; } void SetNames(std::vector&& names) { argument_info_.names_ = names; } void SetNames(std::initializer_list names) { argument_info_.names_ = names; } private: // Copying is bad. Move only. ArgumentBuilder(const ArgumentBuilder&) = delete; // Called by any function that doesn't chain back into this builder. // Completes the argument builder and save the information into the main builder. void CompleteArgument() { assert(save_value_specified_ && "No Into... function called, nowhere to save parsed values to"); assert(load_value_specified_ && "No Into... function called, nowhere to load parsed values from"); argument_info_.CompleteArgument(); // Appending the completed argument is destructive. The object is no longer // usable since all the useful information got moved out of it. AppendCompletedArgument(parent_, new detail::CmdlineParseArgument( std::move(argument_info_), std::move(save_value_), std::move(load_value_))); } friend struct CmdlineParser; friend struct CmdlineParser::Builder; friend struct CmdlineParser::UntypedArgumentBuilder; ArgumentBuilder(CmdlineParser::Builder& parser, std::shared_ptr save_destination) : parent_(parser), save_value_specified_(false), load_value_specified_(false), save_destination_(save_destination) { save_value_ = [](TArg&) { assert(false && "No save value function defined"); }; load_value_ = []() -> TArg& { assert(false && "No load value function defined"); return *reinterpret_cast(0); // Blow up. }; } CmdlineParser::Builder& parent_; std::function save_value_; std::function load_value_; bool save_value_specified_; bool load_value_specified_; detail::CmdlineParserArgumentInfo argument_info_; std::shared_ptr save_destination_; }; struct UntypedArgumentBuilder { // Set a type for this argument. The specific subcommand parser is looked up by the type. template ArgumentBuilder WithType() { return CreateTypedBuilder(); } // When used with multiple aliases, map the position of the alias to the value position. template ArgumentBuilder WithValues(std::initializer_list values) { auto&& a = CreateTypedBuilder(); a.WithValues(values); return std::move(a); } // When used with a single alias, map the alias into this value. // Same as 'WithValues({value})' , but allows the omission of the curly braces {}. template ArgumentBuilder WithValue(const TArg& value) { return WithValues({ value }); } // Set the current building argument to target this key. // When this command line argument is parsed, it can be fetched with this key. Builder& IntoKey(const TVariantMapKey& key) { return CreateTypedBuilder().IntoKey(key); } // Ensure we always move this when returning a new builder. UntypedArgumentBuilder(UntypedArgumentBuilder&&) = default; protected: void SetNames(std::vector&& names) { names_ = std::move(names); } void SetNames(std::initializer_list names) { names_ = names; } private: // No copying. Move instead. UntypedArgumentBuilder(const UntypedArgumentBuilder&) = delete; template ArgumentBuilder CreateTypedBuilder() { auto&& b = CreateArgumentBuilder(parent_); InitializeTypedBuilder(&b); // Type-specific initialization b.SetNames(std::move(names_)); return std::move(b); } template typename std::enable_if::value>::type InitializeTypedBuilder(ArgumentBuilder* arg_builder) { // Every Unit argument implicitly maps to a runtime value of Unit{} std::vector values(names_.size(), Unit{}); // NOLINT [whitespace/braces] [5] arg_builder->SetValuesInternal(std::move(values)); } // No extra work for all other types void InitializeTypedBuilder(void*) {} template friend struct ArgumentBuilder; friend struct Builder; explicit UntypedArgumentBuilder(CmdlineParser::Builder& parent) : parent_(parent) {} // UntypedArgumentBuilder(UntypedArgumentBuilder&& other) = default; CmdlineParser::Builder& parent_; std::vector names_; }; // Build a new parser given a chain of calls to define arguments. struct Builder { Builder() : save_destination_(new SaveDestination()) {} // Define a single argument. The default type is Unit. UntypedArgumentBuilder Define(const char* name) { return Define({name}); } // Define a single argument with multiple aliases. UntypedArgumentBuilder Define(std::initializer_list names) { auto&& b = UntypedArgumentBuilder(*this); b.SetNames(names); return std::move(b); } // Whether the parser should give up on unrecognized arguments. Not recommended. Builder& IgnoreUnrecognized(bool ignore_unrecognized) { ignore_unrecognized_ = ignore_unrecognized; return *this; } // Provide a list of arguments to ignore for backwards compatibility. Builder& Ignore(std::initializer_list ignore_list) { for (auto&& ignore_name : ignore_list) { std::string ign = ignore_name; // Ignored arguments are just like a regular definition which have very // liberal parsing requirements (no range checks, no value checks). // Unlike regular argument definitions, when a value gets parsed into its // stronger type, we just throw it away. if (ign.find('_') != std::string::npos) { // Does the arg-def have a wildcard? // pretend this is a string, e.g. -Xjitconfig: auto&& builder = Define(ignore_name).template WithType().IntoIgnore(); assert(&builder == this); (void)builder; // Ignore pointless unused warning, it's used in the assert. } else { // pretend this is a unit, e.g. -Xjitblocking auto&& builder = Define(ignore_name).template WithType().IntoIgnore(); assert(&builder == this); (void)builder; // Ignore pointless unused warning, it's used in the assert. } } ignore_list_ = ignore_list; return *this; } // Finish building the parser; performs sanity checks. Return value is moved, not copied. // Do not call this more than once. CmdlineParser Build() { assert(!built_); built_ = true; auto&& p = CmdlineParser(ignore_unrecognized_, std::move(ignore_list_), save_destination_, std::move(completed_arguments_)); return std::move(p); } protected: void AppendCompletedArgument(detail::CmdlineParseArgumentAny* arg) { auto smart_ptr = std::unique_ptr(arg); completed_arguments_.push_back(std::move(smart_ptr)); } private: // No copying now! Builder(const Builder& other) = delete; template friend struct ArgumentBuilder; friend struct UntypedArgumentBuilder; friend struct CmdlineParser; bool built_ = false; bool ignore_unrecognized_ = false; std::vector ignore_list_; std::shared_ptr save_destination_; std::vector> completed_arguments_; }; CmdlineResult Parse(const std::string& argv) { std::vector tokenized; Split(argv, ' ', &tokenized); return Parse(TokenRange(std::move(tokenized))); } // Parse the arguments; storing results into the arguments map. Returns success value. CmdlineResult Parse(const char* argv) { return Parse(std::string(argv)); } // Parse the arguments; storing the results into the arguments map. Returns success value. // Assumes that argv[0] is a valid argument (i.e. not the program name). CmdlineResult Parse(const std::vector& argv) { return Parse(TokenRange(argv.begin(), argv.end())); } // Parse the arguments; storing the results into the arguments map. Returns success value. // Assumes that argv[0] is a valid argument (i.e. not the program name). CmdlineResult Parse(const std::vector& argv) { return Parse(TokenRange(argv.begin(), argv.end())); } // Parse the arguments (directly from an int main(argv,argc)). Returns success value. // Assumes that argv[0] is the program name, and ignores it. CmdlineResult Parse(const char* argv[], int argc) { return Parse(TokenRange(&argv[1], argc - 1)); // ignore argv[0] because it's the program name } // Look up the arguments that have been parsed; use the target keys to lookup individual args. const TVariantMap& GetArgumentsMap() const { return save_destination_->GetMap(); } // Release the arguments map that has been parsed; useful for move semantics. TVariantMap&& ReleaseArgumentsMap() { return save_destination_->ReleaseMap(); } // How many arguments were defined? size_t CountDefinedArguments() const { return completed_arguments_.size(); } // Ensure we have a default move constructor. CmdlineParser(CmdlineParser&&) = default; // Ensure we have a default move assignment operator. CmdlineParser& operator=(CmdlineParser&&) = default; private: friend struct Builder; // Construct a new parser from the builder. Move all the arguments. CmdlineParser(bool ignore_unrecognized, std::vector&& ignore_list, std::shared_ptr save_destination, std::vector>&& completed_arguments) : ignore_unrecognized_(ignore_unrecognized), ignore_list_(std::move(ignore_list)), save_destination_(save_destination), completed_arguments_(std::move(completed_arguments)) { assert(save_destination != nullptr); } // Parse the arguments; storing results into the arguments map. Returns success value. // The parsing will fail on the first non-success parse result and return that error. // // All previously-parsed arguments are cleared out. // Otherwise, all parsed arguments will be stored into SaveDestination as a side-effect. // A partial parse will result only in a partial save of the arguments. CmdlineResult Parse(TokenRange&& arguments_list) { save_destination_->Clear(); for (size_t i = 0; i < arguments_list.Size(); ) { TokenRange possible_name = arguments_list.Slice(i); size_t best_match_size = 0; // How many tokens were matched in the best case. size_t best_match_arg_idx = 0; bool matched = false; // At least one argument definition has been matched? // Find the closest argument definition for the remaining token range. size_t arg_idx = 0; for (auto&& arg : completed_arguments_) { size_t local_match = arg->MaybeMatches(possible_name); if (local_match > best_match_size) { best_match_size = local_match; best_match_arg_idx = arg_idx; matched = true; } arg_idx++; } // Saw some kind of unknown argument if (matched == false) { if (UNLIKELY(ignore_unrecognized_)) { // This is usually off, we only need it for JNI. // Consume 1 token and keep going, hopefully the next token is a good one. ++i; continue; } // Common case: // Bail out on the first unknown argument with an error. return CmdlineResult(CmdlineResult::kUnknown, std::string("Unknown argument: ") + possible_name[0]); } // Look at the best-matched argument definition and try to parse against that. auto&& arg = completed_arguments_[best_match_arg_idx]; assert(arg->MaybeMatches(possible_name) == best_match_size); // Try to parse the argument now, if we have enough tokens. std::pair num_tokens = arg->GetNumTokens(); size_t min_tokens; size_t max_tokens; std::tie(min_tokens, max_tokens) = num_tokens; if ((i + min_tokens) > arguments_list.Size()) { // expected longer command line but it was too short // e.g. if the argv was only "-Xms" without specifying a memory option CMDLINE_DEBUG_LOG << "Parse failure, i = " << i << ", arg list " << arguments_list.Size() << " num tokens in arg_def: " << min_tokens << "," << max_tokens << std::endl; return CmdlineResult(CmdlineResult::kFailure, std::string("Argument ") + possible_name[0] + ": incomplete command line arguments, expected " + std::to_string(size_t(i + min_tokens) - arguments_list.Size()) + " more tokens"); } if (best_match_size > max_tokens || best_match_size < min_tokens) { // Even our best match was out of range, so parsing would fail instantly. return CmdlineResult(CmdlineResult::kFailure, std::string("Argument ") + possible_name[0] + ": too few tokens " "matched " + std::to_string(best_match_size) + " but wanted " + std::to_string(num_tokens.first)); } // We have enough tokens to begin exact parsing. TokenRange exact_range = possible_name.Slice(0, max_tokens); size_t consumed_tokens = 1; // At least 1 if we ever want to try to resume parsing on error CmdlineResult parse_attempt = arg->ParseArgument(exact_range, &consumed_tokens); if (parse_attempt.IsError()) { // We may also want to continue parsing the other tokens to gather more errors. return parse_attempt; } // else the value has been successfully stored into the map assert(consumed_tokens > 0); // Don't hang in an infinite loop trying to parse i += consumed_tokens; // TODO: also handle ignoring arguments for backwards compatibility } // for return CmdlineResult(CmdlineResult::kSuccess); } bool ignore_unrecognized_ = false; std::vector ignore_list_; std::shared_ptr save_destination_; std::vector> completed_arguments_; }; // This has to be defined after everything else, since we want the builders to call this. template class TVariantMapKey> template typename CmdlineParser::template ArgumentBuilder CmdlineParser::CreateArgumentBuilder( CmdlineParser::Builder& parent) { return CmdlineParser::ArgumentBuilder( parent, parent.save_destination_); } // This has to be defined after everything else, since we want the builders to call this. template class TVariantMapKey> void CmdlineParser::AppendCompletedArgument( CmdlineParser::Builder& builder, detail::CmdlineParseArgumentAny* arg) { builder.AppendCompletedArgument(arg); } } // namespace art #endif // ART_CMDLINE_CMDLINE_PARSER_H_ android-platform-art-8.1.0+r23/cmdline/cmdline_parser_test.cc000066400000000000000000000527251336577252300241500ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cmdline_parser.h" #include "runtime/runtime_options.h" #include "runtime/parsed_options.h" #include "utils.h" #include #include "gtest/gtest.h" #include "runtime/experimental_flags.h" #include "runtime/runtime.h" #define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast(expected), \ reinterpret_cast(nullptr)); namespace art { bool UsuallyEquals(double expected, double actual); // This has a gtest dependency, which is why it's in the gtest only. bool operator==(const ProfileSaverOptions& lhs, const ProfileSaverOptions& rhs) { return lhs.enabled_ == rhs.enabled_ && lhs.min_save_period_ms_ == rhs.min_save_period_ms_ && lhs.save_resolved_classes_delay_ms_ == rhs.save_resolved_classes_delay_ms_ && lhs.hot_startup_method_samples_ == rhs.hot_startup_method_samples_ && lhs.min_methods_to_save_ == rhs.min_methods_to_save_ && lhs.min_classes_to_save_ == rhs.min_classes_to_save_ && lhs.min_notification_before_wake_ == rhs.min_notification_before_wake_ && lhs.max_notification_before_wake_ == rhs.max_notification_before_wake_; } bool UsuallyEquals(double expected, double actual) { using FloatingPoint = ::testing::internal::FloatingPoint; FloatingPoint exp(expected); FloatingPoint act(actual); // Compare with ULPs instead of comparing with == return exp.AlmostEquals(act); } template bool UsuallyEquals(const T& expected, const T& actual, typename std::enable_if< detail::SupportsEqualityOperator::value>::type* = 0) { return expected == actual; } // Try to use memcmp to compare simple plain-old-data structs. // // This should *not* generate false positives, but it can generate false negatives. // This will mostly work except for fields like float which can have different bit patterns // that are nevertheless equal. // If a test is failing because the structs aren't "equal" when they really are // then it's recommended to implement operator== for it instead. template bool UsuallyEquals(const T& expected, const T& actual, const Ignore& ... more ATTRIBUTE_UNUSED, typename std::enable_if::value>::type* = 0, typename std::enable_if::value>::type* = 0 ) { return memcmp(std::addressof(expected), std::addressof(actual), sizeof(T)) == 0; } bool UsuallyEquals(const XGcOption& expected, const XGcOption& actual) { return memcmp(std::addressof(expected), std::addressof(actual), sizeof(expected)) == 0; } bool UsuallyEquals(const char* expected, const std::string& actual) { return std::string(expected) == actual; } template ::testing::AssertionResult IsExpectedKeyValue(const T& expected, const TMap& map, const TKey& key) { auto* actual = map.Get(key); if (actual != nullptr) { if (!UsuallyEquals(expected, *actual)) { return ::testing::AssertionFailure() << "expected " << detail::ToStringAny(expected) << " but got " << detail::ToStringAny(*actual); } return ::testing::AssertionSuccess(); } return ::testing::AssertionFailure() << "key was not in the map"; } template ::testing::AssertionResult IsExpectedDefaultKeyValue(const T& expected, const TMap& map, const TKey& key) { const T& actual = map.GetOrDefault(key); if (!UsuallyEquals(expected, actual)) { return ::testing::AssertionFailure() << "expected " << detail::ToStringAny(expected) << " but got " << detail::ToStringAny(actual); } return ::testing::AssertionSuccess(); } class CmdlineParserTest : public ::testing::Test { public: CmdlineParserTest() = default; ~CmdlineParserTest() = default; protected: using M = RuntimeArgumentMap; using RuntimeParser = ParsedOptions::RuntimeParser; static void SetUpTestCase() { art::InitLogging(nullptr, art::Runtime::Abort); // argv = null } virtual void SetUp() { parser_ = ParsedOptions::MakeParser(false); // do not ignore unrecognized options } static ::testing::AssertionResult IsResultSuccessful(const CmdlineResult& result) { if (result.IsSuccess()) { return ::testing::AssertionSuccess(); } else { return ::testing::AssertionFailure() << result.GetStatus() << " with: " << result.GetMessage(); } } static ::testing::AssertionResult IsResultFailure(const CmdlineResult& result, CmdlineResult::Status failure_status) { if (result.IsSuccess()) { return ::testing::AssertionFailure() << " got success but expected failure: " << failure_status; } else if (result.GetStatus() == failure_status) { return ::testing::AssertionSuccess(); } return ::testing::AssertionFailure() << " expected failure " << failure_status << " but got " << result.GetStatus(); } std::unique_ptr parser_; }; #define EXPECT_KEY_EXISTS(map, key) EXPECT_TRUE((map).Exists(key)) #define EXPECT_KEY_VALUE(map, key, expected) EXPECT_TRUE(IsExpectedKeyValue(expected, map, key)) #define EXPECT_DEFAULT_KEY_VALUE(map, key, expected) EXPECT_TRUE(IsExpectedDefaultKeyValue(expected, map, key)) #define _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv) \ do { \ EXPECT_TRUE(IsResultSuccessful(parser_->Parse(argv))); \ EXPECT_EQ(0u, parser_->GetArgumentsMap().Size()); \ #define EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv) \ _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); \ } while (false) #define EXPECT_SINGLE_PARSE_DEFAULT_VALUE(expected, argv, key)\ _EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); \ RuntimeArgumentMap args = parser_->ReleaseArgumentsMap(); \ EXPECT_DEFAULT_KEY_VALUE(args, key, expected); \ } while (false) // NOLINT [readability/namespace] [5] #define _EXPECT_SINGLE_PARSE_EXISTS(argv, key) \ do { \ EXPECT_TRUE(IsResultSuccessful(parser_->Parse(argv))); \ RuntimeArgumentMap args = parser_->ReleaseArgumentsMap(); \ EXPECT_EQ(1u, args.Size()); \ EXPECT_KEY_EXISTS(args, key); \ #define EXPECT_SINGLE_PARSE_EXISTS(argv, key) \ _EXPECT_SINGLE_PARSE_EXISTS(argv, key); \ } while (false) #define EXPECT_SINGLE_PARSE_VALUE(expected, argv, key) \ _EXPECT_SINGLE_PARSE_EXISTS(argv, key); \ EXPECT_KEY_VALUE(args, key, expected); \ } while (false) // NOLINT [readability/namespace] [5] #define EXPECT_SINGLE_PARSE_VALUE_STR(expected, argv, key) \ EXPECT_SINGLE_PARSE_VALUE(std::string(expected), argv, key) #define EXPECT_SINGLE_PARSE_FAIL(argv, failure_status) \ do { \ EXPECT_TRUE(IsResultFailure(parser_->Parse(argv), failure_status));\ RuntimeArgumentMap args = parser_->ReleaseArgumentsMap();\ EXPECT_EQ(0u, args.Size()); \ } while (false) TEST_F(CmdlineParserTest, TestSimpleSuccesses) { auto& parser = *parser_; EXPECT_LT(0u, parser.CountDefinedArguments()); { // Test case 1: No command line arguments EXPECT_TRUE(IsResultSuccessful(parser.Parse(""))); RuntimeArgumentMap args = parser.ReleaseArgumentsMap(); EXPECT_EQ(0u, args.Size()); } EXPECT_SINGLE_PARSE_EXISTS("-Xzygote", M::Zygote); EXPECT_SINGLE_PARSE_VALUE_STR("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath); EXPECT_SINGLE_PARSE_VALUE("/hello/world", "-Xbootclasspath:/hello/world", M::BootClassPath); EXPECT_SINGLE_PARSE_VALUE(Memory<1>(234), "-Xss234", M::StackSize); EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(1234*MB), "-Xms1234m", M::MemoryInitialSize); EXPECT_SINGLE_PARSE_VALUE(true, "-XX:EnableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); EXPECT_SINGLE_PARSE_VALUE(false, "-XX:DisableHSpaceCompactForOOM", M::EnableHSpaceCompactForOOM); EXPECT_SINGLE_PARSE_VALUE(0.5, "-XX:HeapTargetUtilization=0.5", M::HeapTargetUtilization); EXPECT_SINGLE_PARSE_VALUE(5u, "-XX:ParallelGCThreads=5", M::ParallelGCThreads); EXPECT_SINGLE_PARSE_EXISTS("-Xno-dex-file-fallback", M::NoDexFileFallback); } // TEST_F TEST_F(CmdlineParserTest, TestSimpleFailures) { // Test argument is unknown to the parser EXPECT_SINGLE_PARSE_FAIL("abcdefg^%@#*(@#", CmdlineResult::kUnknown); // Test value map substitution fails EXPECT_SINGLE_PARSE_FAIL("-Xverify:whatever", CmdlineResult::kFailure); // Test value type parsing failures EXPECT_SINGLE_PARSE_FAIL("-Xsswhatever", CmdlineResult::kFailure); // invalid memory value EXPECT_SINGLE_PARSE_FAIL("-Xms123", CmdlineResult::kFailure); // memory value too small EXPECT_SINGLE_PARSE_FAIL("-XX:HeapTargetUtilization=0.0", CmdlineResult::kOutOfRange); // toosmal EXPECT_SINGLE_PARSE_FAIL("-XX:HeapTargetUtilization=2.0", CmdlineResult::kOutOfRange); // toolarg EXPECT_SINGLE_PARSE_FAIL("-XX:ParallelGCThreads=-5", CmdlineResult::kOutOfRange); // too small EXPECT_SINGLE_PARSE_FAIL("-Xgc:blablabla", CmdlineResult::kUsage); // not a valid suboption } // TEST_F TEST_F(CmdlineParserTest, TestLogVerbosity) { { const char* log_args = "-verbose:" "class,compiler,gc,heap,jdwp,jni,monitor,profiler,signals,simulator,startup," "third-party-jni,threads,verifier"; LogVerbosity log_verbosity = LogVerbosity(); log_verbosity.class_linker = true; log_verbosity.compiler = true; log_verbosity.gc = true; log_verbosity.heap = true; log_verbosity.jdwp = true; log_verbosity.jni = true; log_verbosity.monitor = true; log_verbosity.profiler = true; log_verbosity.signals = true; log_verbosity.simulator = true; log_verbosity.startup = true; log_verbosity.third_party_jni = true; log_verbosity.threads = true; log_verbosity.verifier = true; EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); } { const char* log_args = "-verbose:" "class,compiler,gc,heap,jdwp,jni,monitor"; LogVerbosity log_verbosity = LogVerbosity(); log_verbosity.class_linker = true; log_verbosity.compiler = true; log_verbosity.gc = true; log_verbosity.heap = true; log_verbosity.jdwp = true; log_verbosity.jni = true; log_verbosity.monitor = true; EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); } EXPECT_SINGLE_PARSE_FAIL("-verbose:blablabla", CmdlineResult::kUsage); // invalid verbose opt { const char* log_args = "-verbose:deopt"; LogVerbosity log_verbosity = LogVerbosity(); log_verbosity.deopt = true; EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); } { const char* log_args = "-verbose:collector"; LogVerbosity log_verbosity = LogVerbosity(); log_verbosity.collector = true; EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); } { const char* log_args = "-verbose:oat"; LogVerbosity log_verbosity = LogVerbosity(); log_verbosity.oat = true; EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); } { const char* log_args = "-verbose:dex"; LogVerbosity log_verbosity = LogVerbosity(); log_verbosity.dex = true; EXPECT_SINGLE_PARSE_VALUE(log_verbosity, log_args, M::Verbose); } } // TEST_F // TODO: Enable this b/19274810 TEST_F(CmdlineParserTest, DISABLED_TestXGcOption) { /* * Test success */ { XGcOption option_all_true{}; // NOLINT [readability/braces] [4] option_all_true.collector_type_ = gc::CollectorType::kCollectorTypeCMS; option_all_true.verify_pre_gc_heap_ = true; option_all_true.verify_pre_sweeping_heap_ = true; option_all_true.verify_post_gc_heap_ = true; option_all_true.verify_pre_gc_rosalloc_ = true; option_all_true.verify_pre_sweeping_rosalloc_ = true; option_all_true.verify_post_gc_rosalloc_ = true; const char * xgc_args_all_true = "-Xgc:concurrent," "preverify,presweepingverify,postverify," "preverify_rosalloc,presweepingverify_rosalloc," "postverify_rosalloc,precise," "verifycardtable"; EXPECT_SINGLE_PARSE_VALUE(option_all_true, xgc_args_all_true, M::GcOption); XGcOption option_all_false{}; // NOLINT [readability/braces] [4] option_all_false.collector_type_ = gc::CollectorType::kCollectorTypeMS; option_all_false.verify_pre_gc_heap_ = false; option_all_false.verify_pre_sweeping_heap_ = false; option_all_false.verify_post_gc_heap_ = false; option_all_false.verify_pre_gc_rosalloc_ = false; option_all_false.verify_pre_sweeping_rosalloc_ = false; option_all_false.verify_post_gc_rosalloc_ = false; const char* xgc_args_all_false = "-Xgc:nonconcurrent," "nopreverify,nopresweepingverify,nopostverify,nopreverify_rosalloc," "nopresweepingverify_rosalloc,nopostverify_rosalloc,noprecise,noverifycardtable"; EXPECT_SINGLE_PARSE_VALUE(option_all_false, xgc_args_all_false, M::GcOption); XGcOption option_all_default{}; // NOLINT [readability/braces] [4] const char* xgc_args_blank = "-Xgc:"; EXPECT_SINGLE_PARSE_VALUE(option_all_default, xgc_args_blank, M::GcOption); } /* * Test failures */ EXPECT_SINGLE_PARSE_FAIL("-Xgc:blablabla", CmdlineResult::kUsage); // invalid Xgc opt } // TEST_F /* * {"-Xrunjdwp:_", "-agentlib:jdwp=_"} */ TEST_F(CmdlineParserTest, TestJdwpOptions) { /* * Test success */ { /* * "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n" */ JDWP::JdwpOptions opt = JDWP::JdwpOptions(); opt.transport = JDWP::JdwpTransportType::kJdwpTransportSocket; opt.port = 8000; opt.server = true; const char *opt_args = "-Xrunjdwp:transport=dt_socket,address=8000,server=y"; EXPECT_SINGLE_PARSE_VALUE(opt, opt_args, M::JdwpOptions); } { /* * "Example: -agentlib:jdwp=transport=dt_socket,address=localhost:6500,server=n\n"); */ JDWP::JdwpOptions opt = JDWP::JdwpOptions(); opt.transport = JDWP::JdwpTransportType::kJdwpTransportSocket; opt.host = "localhost"; opt.port = 6500; opt.server = false; const char *opt_args = "-agentlib:jdwp=transport=dt_socket,address=localhost:6500,server=n"; EXPECT_SINGLE_PARSE_VALUE(opt, opt_args, M::JdwpOptions); } /* * Test failures */ EXPECT_SINGLE_PARSE_FAIL("-Xrunjdwp:help", CmdlineResult::kUsage); // usage for help only EXPECT_SINGLE_PARSE_FAIL("-Xrunjdwp:blabla", CmdlineResult::kFailure); // invalid subarg EXPECT_SINGLE_PARSE_FAIL("-agentlib:jdwp=help", CmdlineResult::kUsage); // usage for help only EXPECT_SINGLE_PARSE_FAIL("-agentlib:jdwp=blabla", CmdlineResult::kFailure); // invalid subarg } // TEST_F /* * -D_ -D_ -D_ ... */ TEST_F(CmdlineParserTest, TestPropertiesList) { /* * Test successes */ { std::vector opt = {"hello"}; EXPECT_SINGLE_PARSE_VALUE(opt, "-Dhello", M::PropertiesList); } { std::vector opt = {"hello", "world"}; EXPECT_SINGLE_PARSE_VALUE(opt, "-Dhello -Dworld", M::PropertiesList); } { std::vector opt = {"one", "two", "three"}; EXPECT_SINGLE_PARSE_VALUE(opt, "-Done -Dtwo -Dthree", M::PropertiesList); } } // TEST_F /* * -Xcompiler-option foo -Xcompiler-option bar ... */ TEST_F(CmdlineParserTest, TestCompilerOption) { /* * Test successes */ { std::vector opt = {"hello"}; EXPECT_SINGLE_PARSE_VALUE(opt, "-Xcompiler-option hello", M::CompilerOptions); } { std::vector opt = {"hello", "world"}; EXPECT_SINGLE_PARSE_VALUE(opt, "-Xcompiler-option hello -Xcompiler-option world", M::CompilerOptions); } { std::vector opt = {"one", "two", "three"}; EXPECT_SINGLE_PARSE_VALUE(opt, "-Xcompiler-option one -Xcompiler-option two -Xcompiler-option three", M::CompilerOptions); } } // TEST_F /* * -Xjit, -Xnojit, -Xjitcodecachesize, Xjitcompilethreshold */ TEST_F(CmdlineParserTest, TestJitOptions) { /* * Test successes */ { EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJitCompilation); EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJitCompilation); } { EXPECT_SINGLE_PARSE_VALUE( MemoryKiB(16 * KB), "-Xjitinitialsize:16K", M::JITCodeCacheInitialCapacity); EXPECT_SINGLE_PARSE_VALUE( MemoryKiB(16 * MB), "-Xjitmaxsize:16M", M::JITCodeCacheMaxCapacity); } { EXPECT_SINGLE_PARSE_VALUE(12345u, "-Xjitthreshold:12345", M::JITCompileThreshold); } } // TEST_F /* * -Xps-* */ TEST_F(CmdlineParserTest, ProfileSaverOptions) { ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc", true); EXPECT_SINGLE_PARSE_VALUE(opt, "-Xjitsaveprofilinginfo " "-Xps-min-save-period-ms:1 " "-Xps-save-resolved-classes-delay-ms:2 " "-Xps-hot-startup-method-samples:3 " "-Xps-min-methods-to-save:4 " "-Xps-min-classes-to-save:5 " "-Xps-min-notification-before-wake:6 " "-Xps-max-notification-before-wake:7 " "-Xps-profile-path:abc " "-Xps-profile-boot-class-path", M::ProfileSaverOpts); } // TEST_F /* -Xexperimental:_ */ TEST_F(CmdlineParserTest, TestExperimentalFlags) { // Default EXPECT_SINGLE_PARSE_DEFAULT_VALUE(ExperimentalFlags::kNone, "", M::Experimental); // Disabled explicitly EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kNone, "-Xexperimental:none", M::Experimental); } // -Xverify:_ TEST_F(CmdlineParserTest, TestVerify) { EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kNone, "-Xverify:none", M::Verify); EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:remote", M::Verify); EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kEnable, "-Xverify:all", M::Verify); EXPECT_SINGLE_PARSE_VALUE(verifier::VerifyMode::kSoftFail, "-Xverify:softfail", M::Verify); } TEST_F(CmdlineParserTest, TestIgnoreUnrecognized) { RuntimeParser::Builder parserBuilder; parserBuilder .Define("-help") .IntoKey(M::Help) .IgnoreUnrecognized(true); parser_.reset(new RuntimeParser(parserBuilder.Build())); EXPECT_SINGLE_PARSE_EMPTY_SUCCESS("-non-existent-option"); EXPECT_SINGLE_PARSE_EMPTY_SUCCESS("-non-existent-option1 --non-existent-option-2"); } // TEST_F TEST_F(CmdlineParserTest, TestIgnoredArguments) { std::initializer_list ignored_args = { "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa", "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:abdef", "-Xdexopt:foobar", "-Xnoquithandler", "-Xjnigreflimit:ixnay", "-Xgenregmap", "-Xnogenregmap", "-Xverifyopt:never", "-Xcheckdexsum", "-Xincludeselectedop", "-Xjitop:noop", "-Xincludeselectedmethod", "-Xjitblocking", "-Xjitmethod:_", "-Xjitclass:nosuchluck", "-Xjitoffset:none", "-Xjitconfig:yes", "-Xjitcheckcg", "-Xjitverbose", "-Xjitprofile", "-Xjitdisableopt", "-Xjitsuspendpoll", "-XX:mainThreadStackSize=1337" }; // Check they are ignored when parsed one at a time for (auto&& arg : ignored_args) { SCOPED_TRACE(arg); EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(arg); } // Check they are ignored when we pass it all together at once std::vector argv = ignored_args; EXPECT_SINGLE_PARSE_EMPTY_SUCCESS(argv); } // TEST_F TEST_F(CmdlineParserTest, MultipleArguments) { EXPECT_TRUE(IsResultSuccessful(parser_->Parse( "-help -XX:ForegroundHeapGrowthMultiplier=0.5 " "-Xnodex2oat -Xmethod-trace -XX:LargeObjectSpace=map"))); auto&& map = parser_->ReleaseArgumentsMap(); EXPECT_EQ(5u, map.Size()); EXPECT_KEY_VALUE(map, M::Help, Unit{}); // NOLINT [whitespace/braces] [5] EXPECT_KEY_VALUE(map, M::ForegroundHeapGrowthMultiplier, 0.5); EXPECT_KEY_VALUE(map, M::Dex2Oat, false); EXPECT_KEY_VALUE(map, M::MethodTrace, Unit{}); // NOLINT [whitespace/braces] [5] EXPECT_KEY_VALUE(map, M::LargeObjectSpace, gc::space::LargeObjectSpaceType::kMap); } // TEST_F } // namespace art android-platform-art-8.1.0+r23/cmdline/cmdline_result.h000066400000000000000000000062141336577252300227650ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_CMDLINE_RESULT_H_ #define ART_CMDLINE_CMDLINE_RESULT_H_ #include #include namespace art { // Result of an attempt to process the command line arguments. If fails, specifies // the specific error code and an error message. // Use the value-carrying CmdlineParseResult to get an additional value out in a success case. struct CmdlineResult { enum Status { kSuccess, // Error codes: kUsage, kFailure, kOutOfRange, kUnknown, }; // Short-hand for checking if the result was successful. operator bool() const { return IsSuccess(); } // Check if the operation has succeeded. bool IsSuccess() const { return status_ == kSuccess; } // Check if the operation was not a success. bool IsError() const { return status_ != kSuccess; } // Get the specific status, regardless of whether it's failure or success. Status GetStatus() const { return status_; } // Get the error message, *must* only be called for error status results. const std::string& GetMessage() const { assert(IsError()); return message_; } // Constructor any status. No message. explicit CmdlineResult(Status status) : status_(status) {} // Constructor with an error status, copying the message. CmdlineResult(Status status, const std::string& message) : status_(status), message_(message) { assert(status != kSuccess); } // Constructor with an error status, taking over the message. CmdlineResult(Status status, std::string&& message) : status_(status), message_(message) { assert(status != kSuccess); } // Make sure copying exists CmdlineResult(const CmdlineResult&) = default; // Make sure moving is cheap CmdlineResult(CmdlineResult&&) = default; private: const Status status_; const std::string message_; }; // TODO: code-generate this static inline std::ostream& operator<<(std::ostream& stream, CmdlineResult::Status status) { switch (status) { case CmdlineResult::kSuccess: stream << "kSuccess"; break; case CmdlineResult::kUsage: stream << "kUsage"; break; case CmdlineResult::kFailure: stream << "kFailure"; break; case CmdlineResult::kOutOfRange: stream << "kOutOfRange"; break; case CmdlineResult::kUnknown: stream << "kUnknown"; break; default: UNREACHABLE(); } return stream; } } // namespace art #endif // ART_CMDLINE_CMDLINE_RESULT_H_ android-platform-art-8.1.0+r23/cmdline/cmdline_type_parser.h000066400000000000000000000055651336577252300240140ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ #define ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ #include "cmdline_parse_result.h" namespace art { // Base class for user-defined CmdlineType specializations. // // Not strictly necessary, but if the specializations fail to Define all of these functions // the compilation will fail. template struct CmdlineTypeParser { // Return value of parsing attempts. Represents a Success(T value) or an Error(int code) using Result = CmdlineParseResult; // Parse a single value for an argument definition out of the wildcard component. // // e.g. if the argument definition was "foo:_", and the user-provided input was "foo:bar", // then args is "bar". Result Parse(const std::string& args ATTRIBUTE_UNUSED) { assert(false); return Result::Failure("Missing type specialization and/or value map"); } // Parse a value and append it into the existing value so far, for argument // definitions which are marked with AppendValues(). // // The value is parsed out of the wildcard component as in Parse. // // If the initial value does not exist yet, a default value is created by // value-initializing with 'T()'. Result ParseAndAppend(const std::string& args ATTRIBUTE_UNUSED, T& existing_value ATTRIBUTE_UNUSED) { assert(false); return Result::Failure("Missing type specialization and/or value map"); } // Runtime type name of T, so that we can print more useful error messages. static const char* Name() { assert(false); return "UnspecializedType"; } // Whether or not your type can parse argument definitions defined without a "_" // e.g. -Xenable-profiler just mutates the existing profiler struct in-place // so it doesn't need to do any parsing other than token recognition. // // If this is false, then either the argument definition has a _, from which the parsing // happens, or the tokens get mapped to a value list/map from which a 1:1 matching occurs. // // This should almost *always* be false! static constexpr bool kCanParseBlankless = false; protected: // Don't accidentally initialize instances of this directly; they will assert at runtime. CmdlineTypeParser() = default; }; } // namespace art #endif // ART_CMDLINE_CMDLINE_TYPE_PARSER_H_ android-platform-art-8.1.0+r23/cmdline/cmdline_types.h000066400000000000000000000663541336577252300226260ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_CMDLINE_TYPES_H_ #define ART_CMDLINE_CMDLINE_TYPES_H_ #define CMDLINE_NDEBUG 1 // Do not output any debugging information for parsing. #include #include "memory_representation.h" #include "detail/cmdline_debug_detail.h" #include "cmdline_type_parser.h" #include "android-base/strings.h" // Includes for the types that are being specialized #include #include "base/logging.h" #include "base/time_utils.h" #include "experimental_flags.h" #include "gc/collector_type.h" #include "gc/space/large_object_space.h" #include "jdwp/jdwp.h" #include "jit/profile_saver_options.h" #include "plugin.h" #include "ti/agent.h" #include "unit.h" namespace art { // The default specialization will always fail parsing the type from a string. // Provide your own specialization that inherits from CmdlineTypeParser // and implements either Parse or ParseAndAppend // (only if the argument was defined with ::AppendValues()) but not both. template struct CmdlineType : CmdlineTypeParser { }; // Specializations for CmdlineType follow: // Parse argument definitions for Unit-typed arguments. template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& args) { if (args == "") { return Result::Success(Unit{}); // NOLINT [whitespace/braces] [5] } return Result::Failure("Unexpected extra characters " + args); } }; template <> struct CmdlineType : CmdlineTypeParser { /* * Handle one of the JDWP name/value pairs. * * JDWP options are: * help: if specified, show help message and bail * transport: may be dt_socket or dt_shmem * address: for dt_socket, "host:port", or just "port" when listening * server: if "y", wait for debugger to attach; if "n", attach to debugger * timeout: how long to wait for debugger to connect / listen * * Useful with server=n (these aren't supported yet): * onthrow=: connect to debugger when exception thrown * onuncaught=y|n: connect to debugger when uncaught exception thrown * launch=: launch the debugger itself * * The "transport" option is required, as is "address" if server=n. */ Result Parse(const std::string& options) { VLOG(jdwp) << "ParseJdwpOptions: " << options; if (options == "help") { return Result::Usage( "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n" "Example: -Xrunjdwp:transport=dt_socket,address=localhost:6500,server=n\n"); } const std::string s; std::vector pairs; Split(options, ',', &pairs); JDWP::JdwpOptions jdwp_options; for (const std::string& jdwp_option : pairs) { std::string::size_type equals_pos = jdwp_option.find('='); if (equals_pos == std::string::npos) { return Result::Failure(s + "Can't parse JDWP option '" + jdwp_option + "' in '" + options + "'"); } Result parse_attempt = ParseJdwpOption(jdwp_option.substr(0, equals_pos), jdwp_option.substr(equals_pos + 1), &jdwp_options); if (parse_attempt.IsError()) { // We fail to parse this JDWP option. return parse_attempt; } } if (jdwp_options.transport == JDWP::kJdwpTransportUnknown) { return Result::Failure(s + "Must specify JDWP transport: " + options); } if (!jdwp_options.server && (jdwp_options.host.empty() || jdwp_options.port == 0)) { return Result::Failure(s + "Must specify JDWP host and port when server=n: " + options); } return Result::Success(std::move(jdwp_options)); } Result ParseJdwpOption(const std::string& name, const std::string& value, JDWP::JdwpOptions* jdwp_options) { if (name == "transport") { if (value == "dt_socket") { jdwp_options->transport = JDWP::kJdwpTransportSocket; } else if (value == "dt_android_adb") { jdwp_options->transport = JDWP::kJdwpTransportAndroidAdb; } else { return Result::Failure("JDWP transport not supported: " + value); } } else if (name == "server") { if (value == "n") { jdwp_options->server = false; } else if (value == "y") { jdwp_options->server = true; } else { return Result::Failure("JDWP option 'server' must be 'y' or 'n'"); } } else if (name == "suspend") { if (value == "n") { jdwp_options->suspend = false; } else if (value == "y") { jdwp_options->suspend = true; } else { return Result::Failure("JDWP option 'suspend' must be 'y' or 'n'"); } } else if (name == "address") { /* this is either or : */ std::string port_string; jdwp_options->host.clear(); std::string::size_type colon = value.find(':'); if (colon != std::string::npos) { jdwp_options->host = value.substr(0, colon); port_string = value.substr(colon + 1); } else { port_string = value; } if (port_string.empty()) { return Result::Failure("JDWP address missing port: " + value); } char* end; uint64_t port = strtoul(port_string.c_str(), &end, 10); if (*end != '\0' || port > 0xffff) { return Result::Failure("JDWP address has junk in port field: " + value); } jdwp_options->port = port; } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") { /* valid but unsupported */ LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'"; } else { LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'"; } return Result::SuccessNoValue(); } static const char* Name() { return "JdwpOptions"; } }; template struct CmdlineType> : CmdlineTypeParser> { using typename CmdlineTypeParser>::Result; Result Parse(const std::string& arg) { CMDLINE_DEBUG_LOG << "Parsing memory: " << arg << std::endl; size_t val = ParseMemoryOption(arg.c_str(), Divisor); CMDLINE_DEBUG_LOG << "Memory parsed to size_t value: " << val << std::endl; if (val == 0) { return Result::Failure(std::string("not a valid memory value, or not divisible by ") + std::to_string(Divisor)); } return Result::Success(Memory(val)); } // Parse a string of the form /[0-9]+[kKmMgG]?/, which is used to specify // memory sizes. [kK] indicates kilobytes, [mM] megabytes, and // [gG] gigabytes. // // "s" should point just past the "-Xm?" part of the string. // "div" specifies a divisor, e.g. 1024 if the value must be a multiple // of 1024. // // The spec says the -Xmx and -Xms options must be multiples of 1024. It // doesn't say anything about -Xss. // // Returns 0 (a useless size) if "s" is malformed or specifies a low or // non-evenly-divisible value. // static size_t ParseMemoryOption(const char* s, size_t div) { // strtoul accepts a leading [+-], which we don't want, // so make sure our string starts with a decimal digit. if (isdigit(*s)) { char* s2; size_t val = strtoul(s, &s2, 10); if (s2 != s) { // s2 should be pointing just after the number. // If this is the end of the string, the user // has specified a number of bytes. Otherwise, // there should be exactly one more character // that specifies a multiplier. if (*s2 != '\0') { // The remainder of the string is either a single multiplier // character, or nothing to indicate that the value is in // bytes. char c = *s2++; if (*s2 == '\0') { size_t mul; if (c == '\0') { mul = 1; } else if (c == 'k' || c == 'K') { mul = KB; } else if (c == 'm' || c == 'M') { mul = MB; } else if (c == 'g' || c == 'G') { mul = GB; } else { // Unknown multiplier character. return 0; } if (val <= std::numeric_limits::max() / mul) { val *= mul; } else { // Clamp to a multiple of 1024. val = std::numeric_limits::max() & ~(1024-1); } } else { // There's more than one character after the numeric part. return 0; } } // The man page says that a -Xm value must be a multiple of 1024. if (val % div == 0) { return val; } } } return 0; } static const char* Name() { return Memory::Name(); } }; template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& str) { char* end = nullptr; errno = 0; double value = strtod(str.c_str(), &end); if (*end != '\0') { return Result::Failure("Failed to parse double from " + str); } if (errno == ERANGE) { return Result::OutOfRange( "Failed to parse double from " + str + "; overflow/underflow occurred"); } return Result::Success(value); } static const char* Name() { return "double"; } }; template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& str) { const char* begin = str.c_str(); char* end; // Parse into a larger type (long long) because we can't use strtoul // since it silently converts negative values into unsigned long and doesn't set errno. errno = 0; long long int result = strtoll(begin, &end, 10); // NOLINT [runtime/int] [4] if (begin == end || *end != '\0' || errno == EINVAL) { return Result::Failure("Failed to parse integer from " + str); } else if ((errno == ERANGE) || // NOLINT [runtime/int] [4] result < std::numeric_limits::min() || result > std::numeric_limits::max() || result < 0) { return Result::OutOfRange( "Failed to parse integer from " + str + "; out of unsigned int range"); } return Result::Success(static_cast(result)); } static const char* Name() { return "unsigned integer"; } }; // Lightweight nanosecond value type. Allows parser to convert user-input from milliseconds // to nanoseconds automatically after parsing. // // All implicit conversion from uint64_t uses nanoseconds. struct MillisecondsToNanoseconds { // Create from nanoseconds. MillisecondsToNanoseconds(uint64_t nanoseconds) : nanoseconds_(nanoseconds) { // NOLINT [runtime/explicit] [5] } // Create from milliseconds. static MillisecondsToNanoseconds FromMilliseconds(unsigned int milliseconds) { return MillisecondsToNanoseconds(MsToNs(milliseconds)); } // Get the underlying nanoseconds value. uint64_t GetNanoseconds() const { return nanoseconds_; } // Get the milliseconds value [via a conversion]. Loss of precision will occur. uint64_t GetMilliseconds() const { return NsToMs(nanoseconds_); } // Get the underlying nanoseconds value. operator uint64_t() const { return GetNanoseconds(); } // Default constructors/copy-constructors. MillisecondsToNanoseconds() : nanoseconds_(0ul) {} MillisecondsToNanoseconds(const MillisecondsToNanoseconds&) = default; MillisecondsToNanoseconds(MillisecondsToNanoseconds&&) = default; private: uint64_t nanoseconds_; }; template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& str) { CmdlineType uint_parser; CmdlineParseResult res = uint_parser.Parse(str); if (res.IsSuccess()) { return Result::Success(MillisecondsToNanoseconds::FromMilliseconds(res.GetValue())); } else { return Result::CastError(res); } } static const char* Name() { return "MillisecondsToNanoseconds"; } }; template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& args) { return Result::Success(args); } Result ParseAndAppend(const std::string& args, std::string& existing_value) { if (existing_value.empty()) { existing_value = args; } else { existing_value += ' '; existing_value += args; } return Result::SuccessNoValue(); } }; template <> struct CmdlineType> : CmdlineTypeParser> { Result Parse(const std::string& args) { assert(false && "Use AppendValues() for a Plugin vector type"); return Result::Failure("Unconditional failure: Plugin vector must be appended: " + args); } Result ParseAndAppend(const std::string& args, std::vector& existing_value) { existing_value.push_back(Plugin::Create(args)); return Result::SuccessNoValue(); } static const char* Name() { return "std::vector"; } }; template <> struct CmdlineType> : CmdlineTypeParser> { Result Parse(const std::string& args) { assert(false && "Use AppendValues() for an Agent list type"); return Result::Failure("Unconditional failure: Agent list must be appended: " + args); } Result ParseAndAppend(const std::string& args, std::list& existing_value) { existing_value.emplace_back(args); return Result::SuccessNoValue(); } static const char* Name() { return "std::list"; } }; template <> struct CmdlineType> : CmdlineTypeParser> { Result Parse(const std::string& args) { assert(false && "Use AppendValues() for a string vector type"); return Result::Failure("Unconditional failure: string vector must be appended: " + args); } Result ParseAndAppend(const std::string& args, std::vector& existing_value) { existing_value.push_back(args); return Result::SuccessNoValue(); } static const char* Name() { return "std::vector"; } }; template struct ParseStringList { explicit ParseStringList(std::vector&& list) : list_(list) {} operator std::vector() const { return list_; } operator std::vector&&() && { return std::move(list_); } size_t Size() const { return list_.size(); } std::string Join() const { return android::base::Join(list_, Separator); } static ParseStringList Split(const std::string& str) { std::vector list; art::Split(str, Separator, &list); return ParseStringList(std::move(list)); } ParseStringList() = default; ParseStringList(const ParseStringList&) = default; ParseStringList(ParseStringList&&) = default; private: std::vector list_; }; template struct CmdlineType> : CmdlineTypeParser> { using Result = CmdlineParseResult>; Result Parse(const std::string& args) { return Result::Success(ParseStringList::Split(args)); } static const char* Name() { return "ParseStringList"; } }; static gc::CollectorType ParseCollectorType(const std::string& option) { if (option == "MS" || option == "nonconcurrent") { return gc::kCollectorTypeMS; } else if (option == "CMS" || option == "concurrent") { return gc::kCollectorTypeCMS; } else if (option == "SS") { return gc::kCollectorTypeSS; } else if (option == "GSS") { return gc::kCollectorTypeGSS; } else if (option == "CC") { return gc::kCollectorTypeCC; } else if (option == "MC") { return gc::kCollectorTypeMC; } else { return gc::kCollectorTypeNone; } } struct XGcOption { // These defaults are used when the command line arguments for -Xgc: // are either omitted completely or partially. gc::CollectorType collector_type_ = gc::kCollectorTypeDefault; bool verify_pre_gc_heap_ = false; bool verify_pre_sweeping_heap_ = kIsDebugBuild; bool verify_post_gc_heap_ = false; bool verify_pre_gc_rosalloc_ = kIsDebugBuild; bool verify_pre_sweeping_rosalloc_ = false; bool verify_post_gc_rosalloc_ = false; // Do no measurements for kUseTableLookupReadBarrier to avoid test timeouts. b/31679493 bool measure_ = kIsDebugBuild && !kUseTableLookupReadBarrier; bool gcstress_ = false; }; template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& option) { // -Xgc: already stripped XGcOption xgc{}; // NOLINT [readability/braces] [4] std::vector gc_options; Split(option, ',', &gc_options); for (const std::string& gc_option : gc_options) { gc::CollectorType collector_type = ParseCollectorType(gc_option); if (collector_type != gc::kCollectorTypeNone) { xgc.collector_type_ = collector_type; } else if (gc_option == "preverify") { xgc.verify_pre_gc_heap_ = true; } else if (gc_option == "nopreverify") { xgc.verify_pre_gc_heap_ = false; } else if (gc_option == "presweepingverify") { xgc.verify_pre_sweeping_heap_ = true; } else if (gc_option == "nopresweepingverify") { xgc.verify_pre_sweeping_heap_ = false; } else if (gc_option == "postverify") { xgc.verify_post_gc_heap_ = true; } else if (gc_option == "nopostverify") { xgc.verify_post_gc_heap_ = false; } else if (gc_option == "preverify_rosalloc") { xgc.verify_pre_gc_rosalloc_ = true; } else if (gc_option == "nopreverify_rosalloc") { xgc.verify_pre_gc_rosalloc_ = false; } else if (gc_option == "presweepingverify_rosalloc") { xgc.verify_pre_sweeping_rosalloc_ = true; } else if (gc_option == "nopresweepingverify_rosalloc") { xgc.verify_pre_sweeping_rosalloc_ = false; } else if (gc_option == "postverify_rosalloc") { xgc.verify_post_gc_rosalloc_ = true; } else if (gc_option == "nopostverify_rosalloc") { xgc.verify_post_gc_rosalloc_ = false; } else if (gc_option == "gcstress") { xgc.gcstress_ = true; } else if (gc_option == "nogcstress") { xgc.gcstress_ = false; } else if (gc_option == "measure") { xgc.measure_ = true; } else if ((gc_option == "precise") || (gc_option == "noprecise") || (gc_option == "verifycardtable") || (gc_option == "noverifycardtable")) { // Ignored for backwards compatibility. } else { return Result::Usage(std::string("Unknown -Xgc option ") + gc_option); } } return Result::Success(std::move(xgc)); } static const char* Name() { return "XgcOption"; } }; struct BackgroundGcOption { // If background_collector_type_ is kCollectorTypeNone, it defaults to the // XGcOption::collector_type_ after parsing options. If you set this to // kCollectorTypeHSpaceCompact then we will do an hspace compaction when // we transition to background instead of a normal collector transition. gc::CollectorType background_collector_type_; BackgroundGcOption(gc::CollectorType background_collector_type) // NOLINT [runtime/explicit] [5] : background_collector_type_(background_collector_type) {} BackgroundGcOption() : background_collector_type_(gc::kCollectorTypeNone) { } operator gc::CollectorType() const { return background_collector_type_; } }; template<> struct CmdlineType : CmdlineTypeParser, private BackgroundGcOption { Result Parse(const std::string& substring) { // Special handling for HSpaceCompact since this is only valid as a background GC type. if (substring == "HSpaceCompact") { background_collector_type_ = gc::kCollectorTypeHomogeneousSpaceCompact; } else { gc::CollectorType collector_type = ParseCollectorType(substring); if (collector_type != gc::kCollectorTypeNone) { background_collector_type_ = collector_type; } else { return Result::Failure(); } } BackgroundGcOption res = *this; return Result::Success(res); } static const char* Name() { return "BackgroundGcOption"; } }; template <> struct CmdlineType : CmdlineTypeParser { Result Parse(const std::string& options) { LogVerbosity log_verbosity = LogVerbosity(); std::vector verbose_options; Split(options, ',', &verbose_options); for (size_t j = 0; j < verbose_options.size(); ++j) { if (verbose_options[j] == "class") { log_verbosity.class_linker = true; } else if (verbose_options[j] == "collector") { log_verbosity.collector = true; } else if (verbose_options[j] == "compiler") { log_verbosity.compiler = true; } else if (verbose_options[j] == "deopt") { log_verbosity.deopt = true; } else if (verbose_options[j] == "gc") { log_verbosity.gc = true; } else if (verbose_options[j] == "heap") { log_verbosity.heap = true; } else if (verbose_options[j] == "jdwp") { log_verbosity.jdwp = true; } else if (verbose_options[j] == "jit") { log_verbosity.jit = true; } else if (verbose_options[j] == "jni") { log_verbosity.jni = true; } else if (verbose_options[j] == "monitor") { log_verbosity.monitor = true; } else if (verbose_options[j] == "oat") { log_verbosity.oat = true; } else if (verbose_options[j] == "profiler") { log_verbosity.profiler = true; } else if (verbose_options[j] == "signals") { log_verbosity.signals = true; } else if (verbose_options[j] == "simulator") { log_verbosity.simulator = true; } else if (verbose_options[j] == "startup") { log_verbosity.startup = true; } else if (verbose_options[j] == "third-party-jni") { log_verbosity.third_party_jni = true; } else if (verbose_options[j] == "threads") { log_verbosity.threads = true; } else if (verbose_options[j] == "verifier") { log_verbosity.verifier = true; } else if (verbose_options[j] == "image") { log_verbosity.image = true; } else if (verbose_options[j] == "systrace-locks") { log_verbosity.systrace_lock_logging = true; } else if (verbose_options[j] == "agents") { log_verbosity.agents = true; } else if (verbose_options[j] == "dex") { log_verbosity.dex = true; } else { return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]); } } return Result::Success(log_verbosity); } static const char* Name() { return "LogVerbosity"; } }; template <> struct CmdlineType : CmdlineTypeParser { using Result = CmdlineParseResult; private: using StringResult = CmdlineParseResult; using DoubleResult = CmdlineParseResult; template static Result ParseInto(ProfileSaverOptions& options, T ProfileSaverOptions::*pField, CmdlineParseResult&& result) { assert(pField != nullptr); if (result.IsSuccess()) { options.*pField = result.ReleaseValue(); return Result::SuccessNoValue(); } return Result::CastError(result); } static std::string RemovePrefix(const std::string& source) { size_t prefix_idx = source.find(':'); if (prefix_idx == std::string::npos) { return ""; } return source.substr(prefix_idx + 1); } public: Result ParseAndAppend(const std::string& option, ProfileSaverOptions& existing) { // Special case which doesn't include a wildcard argument definition. // We pass-it through as-is. if (option == "-Xjitsaveprofilinginfo") { existing.enabled_ = true; return Result::SuccessNoValue(); } if (option == "profile-boot-class-path") { existing.profile_boot_class_path_ = true; return Result::SuccessNoValue(); } // The rest of these options are always the wildcard from '-Xps-*' std::string suffix = RemovePrefix(option); if (android::base::StartsWith(option, "min-save-period-ms:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::min_save_period_ms_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "save-resolved-classes-delay-ms:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::save_resolved_classes_delay_ms_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "hot-startup-method-samples:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::hot_startup_method_samples_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "min-methods-to-save:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::min_methods_to_save_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "min-classes-to-save:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::min_classes_to_save_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "min-notification-before-wake:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::min_notification_before_wake_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "max-notification-before-wake:")) { CmdlineType type_parser; return ParseInto(existing, &ProfileSaverOptions::max_notification_before_wake_, type_parser.Parse(suffix)); } if (android::base::StartsWith(option, "profile-path:")) { existing.profile_path_ = suffix; return Result::SuccessNoValue(); } return Result::Failure(std::string("Invalid suboption '") + option + "'"); } static const char* Name() { return "ProfileSaverOptions"; } static constexpr bool kCanParseBlankless = true; }; template<> struct CmdlineType : CmdlineTypeParser { Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) { if (option == "none") { existing = ExperimentalFlags::kNone; } else { return Result::Failure(std::string("Unknown option '") + option + "'"); } return Result::SuccessNoValue(); } static const char* Name() { return "ExperimentalFlags"; } }; } // namespace art #endif // ART_CMDLINE_CMDLINE_TYPES_H_ android-platform-art-8.1.0+r23/cmdline/detail/000077500000000000000000000000001336577252300210425ustar00rootroot00000000000000android-platform-art-8.1.0+r23/cmdline/detail/cmdline_debug_detail.h000066400000000000000000000026631336577252300253250ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ #define ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ #include #ifndef CMDLINE_NDEBUG #define CMDLINE_DEBUG_LOG std::cerr #else #define CMDLINE_DEBUG_LOG ::art::detail::debug_log_ignore() #endif namespace art { // Implementation details for some template querying. Don't look inside if you hate templates. namespace detail { struct debug_log_ignore { // Ignore most of the normal operator<< usage. template debug_log_ignore& operator<<(const T&) { return *this; } // Ignore std::endl and the like. debug_log_ignore& operator<<(std::ostream& (*)(std::ostream&) ) { return *this; } }; } // namespace detail // NOLINT [readability/namespace] [5] } // namespace art #endif // ART_CMDLINE_DETAIL_CMDLINE_DEBUG_DETAIL_H_ android-platform-art-8.1.0+r23/cmdline/detail/cmdline_parse_argument_detail.h000066400000000000000000000505061336577252300272520ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ #define ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ #include #include #include #include #include #include #include #include "android-base/strings.h" #include "cmdline_parse_result.h" #include "cmdline_types.h" #include "token_range.h" #include "unit.h" namespace art { // Implementation details for the parser. Do not look inside if you hate templates. namespace detail { // A non-templated base class for argument parsers. Used by the general parser // to parse arguments, without needing to know the argument type at compile time. // // This is an application of the type erasure idiom. struct CmdlineParseArgumentAny { virtual ~CmdlineParseArgumentAny() {} // Attempt to parse this argument starting at arguments[position]. // If the parsing succeeds, the parsed value will be saved as a side-effect. // // In most situations, the parsing will not match by returning kUnknown. In this case, // no tokens were consumed and the position variable will not be updated. // // At other times, parsing may fail due to validation but the initial token was still matched // (for example an out of range value, or passing in a string where an int was expected). // In this case the tokens are still consumed, and the position variable will get incremented // by all the consumed tokens. // // The # of tokens consumed by the parse attempt will be set as an out-parameter into // consumed_tokens. The parser should skip this many tokens before parsing the next // argument. virtual CmdlineResult ParseArgument(const TokenRange& arguments, size_t* consumed_tokens) = 0; // How many tokens should be taken off argv for parsing this argument. // For example "--help" is just 1, "-compiler-option _" would be 2 (since there's a space). // // A [min,max] range is returned to represent argument definitions with multiple // value tokens. (e.g. {"-h", "-h " } would return [1,2]). virtual std::pair GetNumTokens() const = 0; // Get the run-time typename of the argument type. virtual const char* GetTypeName() const = 0; // Try to do a close match, returning how many tokens were matched against this argument // definition. More tokens is better. // // Do a quick match token-by-token, and see if they match. // Any tokens with a wildcard in them are only matched up until the wildcard. // If this is true, then the wildcard matching later on can still fail, so this is not // a guarantee that the argument is correct, it's more of a strong hint that the // user-provided input *probably* was trying to match this argument. // // Returns how many tokens were either matched (or ignored because there was a // wildcard present). 0 means no match. If the Size() tokens are returned. virtual size_t MaybeMatches(const TokenRange& tokens) = 0; }; template using EnableIfNumeric = std::enable_if::value>; template using DisableIfNumeric = std::enable_if::value>; // Argument definition information, created by an ArgumentBuilder and an UntypedArgumentBuilder. template struct CmdlineParserArgumentInfo { // This version will only be used if TArg is arithmetic and thus has the <= operators. template // Necessary to get SFINAE to kick in. bool CheckRange(const TArg& value, typename EnableIfNumeric::type* = 0) { if (has_range_) { return min_ <= value && value <= max_; } return true; } // This version will be used at other times when TArg is not arithmetic. template bool CheckRange(const TArg&, typename DisableIfNumeric::type* = 0) { assert(!has_range_); return true; } // Do a quick match token-by-token, and see if they match. // Any tokens with a wildcard in them only match the prefix up until the wildcard. // // If this is true, then the wildcard matching later on can still fail, so this is not // a guarantee that the argument is correct, it's more of a strong hint that the // user-provided input *probably* was trying to match this argument. size_t MaybeMatches(const TokenRange& token_list) const { auto best_match = FindClosestMatch(token_list); return best_match.second; } // Attempt to find the closest match (see MaybeMatches). // // Returns the token range that was the closest match and the # of tokens that // this range was matched up until. std::pair FindClosestMatch(const TokenRange& token_list) const { const TokenRange* best_match_ptr = nullptr; size_t best_match = 0; for (auto&& token_range : tokenized_names_) { size_t this_match = token_range.MaybeMatches(token_list, std::string("_")); if (this_match > best_match) { best_match_ptr = &token_range; best_match = this_match; } } return std::make_pair(best_match_ptr, best_match); } // Mark the argument definition as completed, do not mutate the object anymore after this // call is done. // // Performs several sanity checks and token calculations. void CompleteArgument() { assert(names_.size() >= 1); assert(!is_completed_); is_completed_ = true; size_t blank_count = 0; size_t token_count = 0; size_t global_blank_count = 0; size_t global_token_count = 0; for (auto&& name : names_) { std::string s(name); size_t local_blank_count = std::count(s.begin(), s.end(), '_'); size_t local_token_count = std::count(s.begin(), s.end(), ' '); if (global_blank_count != 0) { assert(local_blank_count == global_blank_count && "Every argument descriptor string must have same amount of blanks (_)"); } if (local_blank_count != 0) { global_blank_count = local_blank_count; blank_count++; assert(local_blank_count == 1 && "More than one blank is not supported"); assert(s.back() == '_' && "The blank character must only be at the end of the string"); } if (global_token_count != 0) { assert(local_token_count == global_token_count && "Every argument descriptor string must have same amount of tokens (spaces)"); } if (local_token_count != 0) { global_token_count = local_token_count; token_count++; } // Tokenize every name, turning it from a string to a token list. tokenized_names_.clear(); for (auto&& name1 : names_) { // Split along ' ' only, removing any duplicated spaces. tokenized_names_.push_back( TokenRange::Split(name1, {' '}).RemoveToken(" ")); } // remove the _ character from each of the token ranges // we will often end up with an empty token (i.e. ["-XX", "_"] -> ["-XX", ""] // and this is OK because we still need an empty token to simplify // range comparisons simple_names_.clear(); for (auto&& tokenized_name : tokenized_names_) { simple_names_.push_back(tokenized_name.RemoveCharacter('_')); } } if (token_count != 0) { assert(("Every argument descriptor string must have equal amount of tokens (spaces)" && token_count == names_.size())); } if (blank_count != 0) { assert(("Every argument descriptor string must have an equal amount of blanks (_)" && blank_count == names_.size())); } using_blanks_ = blank_count > 0; { size_t smallest_name_token_range_size = std::accumulate(tokenized_names_.begin(), tokenized_names_.end(), ~(0u), [](size_t min, const TokenRange& cur) { return std::min(min, cur.Size()); }); size_t largest_name_token_range_size = std::accumulate(tokenized_names_.begin(), tokenized_names_.end(), 0u, [](size_t max, const TokenRange& cur) { return std::max(max, cur.Size()); }); token_range_size_ = std::make_pair(smallest_name_token_range_size, largest_name_token_range_size); } if (has_value_list_) { assert(names_.size() == value_list_.size() && "Number of arg descriptors must match number of values"); assert(!has_value_map_); } if (has_value_map_) { if (!using_blanks_) { assert(names_.size() == value_map_.size() && "Since no blanks were specified, each arg is mapped directly into a mapped " "value without parsing; sizes must match"); } assert(!has_value_list_); } if (!using_blanks_ && !CmdlineType::kCanParseBlankless) { assert((has_value_map_ || has_value_list_) && "Arguments without a blank (_) must provide either a value map or a value list"); } TypedCheck(); } // List of aliases for a single argument definition, e.g. {"-Xdex2oat", "-Xnodex2oat"}. std::vector names_; // Is there at least 1 wildcard '_' in the argument definition? bool using_blanks_ = false; // [min, max] token counts in each arg def std::pair token_range_size_; // contains all the names in a tokenized form, i.e. as a space-delimited list std::vector tokenized_names_; // contains the tokenized names, but with the _ character stripped std::vector simple_names_; // For argument definitions created with '.AppendValues()' // Meaning that parsing should mutate the existing value in-place if possible. bool appending_values_ = false; // For argument definitions created with '.WithRange(min, max)' bool has_range_ = false; TArg min_; TArg max_; // For argument definitions created with '.WithValueMap' bool has_value_map_ = false; std::vector> value_map_; // For argument definitions created with '.WithValues' bool has_value_list_ = false; std::vector value_list_; // Make sure there's a default constructor. CmdlineParserArgumentInfo() = default; // Ensure there's a default move constructor. CmdlineParserArgumentInfo(CmdlineParserArgumentInfo&&) = default; private: // Perform type-specific checks at runtime. template void TypedCheck(typename std::enable_if::value>::type* = 0) { assert(!using_blanks_ && "Blanks are not supported in Unit arguments; since a Unit has no parse-able value"); } void TypedCheck() {} bool is_completed_ = false; }; // A virtual-implementation of the necessary argument information in order to // be able to parse arguments. template struct CmdlineParseArgument : CmdlineParseArgumentAny { CmdlineParseArgument(CmdlineParserArgumentInfo&& argument_info, std::function&& save_argument, std::function&& load_argument) : argument_info_(std::forward(argument_info)), save_argument_(std::forward(save_argument)), load_argument_(std::forward(load_argument)) { } using UserTypeInfo = CmdlineType; virtual CmdlineResult ParseArgument(const TokenRange& arguments, size_t* consumed_tokens) { assert(arguments.Size() > 0); assert(consumed_tokens != nullptr); auto closest_match_res = argument_info_.FindClosestMatch(arguments); size_t best_match_size = closest_match_res.second; const TokenRange* best_match_arg_def = closest_match_res.first; if (best_match_size > arguments.Size()) { // The best match has more tokens than were provided. // Shouldn't happen in practice since the outer parser does this check. return CmdlineResult(CmdlineResult::kUnknown, "Size mismatch"); } assert(best_match_arg_def != nullptr); *consumed_tokens = best_match_arg_def->Size(); if (!argument_info_.using_blanks_) { return ParseArgumentSingle(arguments.Join(' ')); } // Extract out the blank value from arguments // e.g. for a def of "foo:_" and input "foo:bar", blank_value == "bar" std::string blank_value = ""; size_t idx = 0; for (auto&& def_token : *best_match_arg_def) { auto&& arg_token = arguments[idx]; // Does this definition-token have a wildcard in it? if (def_token.find('_') == std::string::npos) { // No, regular token. Match 1:1 against the argument token. bool token_match = def_token == arg_token; if (!token_match) { return CmdlineResult(CmdlineResult::kFailure, std::string("Failed to parse ") + best_match_arg_def->GetToken(0) + " at token " + std::to_string(idx)); } } else { // This is a wild-carded token. TokenRange def_split_wildcards = TokenRange::Split(def_token, {'_'}); // Extract the wildcard contents out of the user-provided arg_token. std::unique_ptr arg_matches = def_split_wildcards.MatchSubstrings(arg_token, "_"); if (arg_matches == nullptr) { return CmdlineResult(CmdlineResult::kFailure, std::string("Failed to parse ") + best_match_arg_def->GetToken(0) + ", with a wildcard pattern " + def_token + " at token " + std::to_string(idx)); } // Get the corresponding wildcard tokens from arg_matches, // and concatenate it to blank_value. for (size_t sub_idx = 0; sub_idx < def_split_wildcards.Size() && sub_idx < arg_matches->Size(); ++sub_idx) { if (def_split_wildcards[sub_idx] == "_") { blank_value += arg_matches->GetToken(sub_idx); } } } ++idx; } return ParseArgumentSingle(blank_value); } private: virtual CmdlineResult ParseArgumentSingle(const std::string& argument) { // TODO: refactor to use LookupValue for the value lists/maps // Handle the 'WithValueMap(...)' argument definition if (argument_info_.has_value_map_) { for (auto&& value_pair : argument_info_.value_map_) { const char* name = value_pair.first; if (argument == name) { return SaveArgument(value_pair.second); } } // Error case: Fail, telling the user what the allowed values were. std::vector allowed_values; for (auto&& value_pair : argument_info_.value_map_) { const char* name = value_pair.first; allowed_values.push_back(name); } std::string allowed_values_flat = android::base::Join(allowed_values, ','); return CmdlineResult(CmdlineResult::kFailure, "Argument value '" + argument + "' does not match any of known valid" "values: {" + allowed_values_flat + "}"); } // Handle the 'WithValues(...)' argument definition if (argument_info_.has_value_list_) { size_t arg_def_idx = 0; for (auto&& value : argument_info_.value_list_) { auto&& arg_def_token = argument_info_.names_[arg_def_idx]; if (arg_def_token == argument) { return SaveArgument(value); } ++arg_def_idx; } assert(arg_def_idx + 1 == argument_info_.value_list_.size() && "Number of named argument definitions must match number of values defined"); // Error case: Fail, telling the user what the allowed values were. std::vector allowed_values; for (auto&& arg_name : argument_info_.names_) { allowed_values.push_back(arg_name); } std::string allowed_values_flat = android::base::Join(allowed_values, ','); return CmdlineResult(CmdlineResult::kFailure, "Argument value '" + argument + "' does not match any of known valid" "values: {" + allowed_values_flat + "}"); } // Handle the regular case where we parsed an unknown value from a blank. UserTypeInfo type_parser; if (argument_info_.appending_values_) { TArg& existing = load_argument_(); CmdlineParseResult result = type_parser.ParseAndAppend(argument, existing); assert(!argument_info_.has_range_); return result; } CmdlineParseResult result = type_parser.Parse(argument); if (result.IsSuccess()) { TArg& value = result.GetValue(); // Do a range check for 'WithRange(min,max)' argument definition. if (!argument_info_.CheckRange(value)) { return CmdlineParseResult::OutOfRange( value, argument_info_.min_, argument_info_.max_); } return SaveArgument(value); } // Some kind of type-specific parse error. Pass the result as-is. CmdlineResult raw_result = std::move(result); return raw_result; } public: virtual const char* GetTypeName() const { // TODO: Obviate the need for each type specialization to hardcode the type name return UserTypeInfo::Name(); } // How many tokens should be taken off argv for parsing this argument. // For example "--help" is just 1, "-compiler-option _" would be 2 (since there's a space). // // A [min,max] range is returned to represent argument definitions with multiple // value tokens. (e.g. {"-h", "-h " } would return [1,2]). virtual std::pair GetNumTokens() const { return argument_info_.token_range_size_; } // See if this token range might begin the same as the argument definition. virtual size_t MaybeMatches(const TokenRange& tokens) { return argument_info_.MaybeMatches(tokens); } private: CmdlineResult SaveArgument(const TArg& value) { assert(!argument_info_.appending_values_ && "If the values are being appended, then the updated parse value is " "updated by-ref as a side effect and shouldn't be stored directly"); TArg val = value; save_argument_(val); return CmdlineResult(CmdlineResult::kSuccess); } CmdlineParserArgumentInfo argument_info_; std::function save_argument_; std::function load_argument_; }; } // namespace detail // NOLINT [readability/namespace] [5] } // namespace art #endif // ART_CMDLINE_DETAIL_CMDLINE_PARSE_ARGUMENT_DETAIL_H_ android-platform-art-8.1.0+r23/cmdline/detail/cmdline_parser_detail.h000066400000000000000000000117031336577252300255260ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ #define ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ #include #include #include namespace art { // Implementation details for some template querying. Don't look inside if you hate templates. namespace detail { template typename std::remove_reference::type& FakeReference(); // SupportsInsertionOperator::value will evaluate to a boolean, // whose value is true if the TStream class supports the << operator against T, // and false otherwise. template struct SupportsInsertionOperator { private: template static std::true_type InsertionOperatorTest(TStream& os, const T& value, std::remove_reference* = 0); // NOLINT [whitespace/operators] [3] template static std::false_type InsertionOperatorTest(TStream& os, const T& ... args); public: static constexpr bool value = decltype(InsertionOperatorTest(FakeReference(), std::declval()))::value; }; template struct SupportsEqualityOperatorImpl; template struct SupportsEqualityOperatorImpl { private: template static std::true_type EqualityOperatorTest(const TL& left, const TR& right, std::remove_reference* = 0); // NOLINT [whitespace/operators] [3] template static std::false_type EqualityOperatorTest(const TL& left, const T& ... args); public: static constexpr bool value = decltype(EqualityOperatorTest(std::declval(), std::declval()))::value; }; // Partial specialization when TLeft/TRight are both floating points. // This is a work-around because decltype(floatvar1 == floatvar2) // will not compile with clang: // error: comparing floating point with == or != is unsafe [-Werror,-Wfloat-equal] template struct SupportsEqualityOperatorImpl { static constexpr bool value = true; }; // SupportsEqualityOperatorImpl::value will evaluate to a boolean, // whose value is true if T1 can be compared against T2 with ==, // and false otherwise. template struct SupportsEqualityOperator : SupportsEqualityOperatorImpl::value && std::is_floating_point::value> { }; // Convert any kind of type to an std::string, even if there's no // serialization support for it. Unknown types get converted to an // an arbitrary value. // // Meant for printing user-visible errors or unit test failures only. template std::string ToStringAny(const T& value, typename std::enable_if< SupportsInsertionOperator::value>::type* = 0) { std::stringstream stream; stream << value; return stream.str(); } template std::string ToStringAny(const std::vector value, typename std::enable_if< SupportsInsertionOperator::value>::type* = 0) { std::stringstream stream; stream << "vector{"; for (size_t i = 0; i < value.size(); ++i) { stream << ToStringAny(value[i]); if (i != value.size() - 1) { stream << ','; } } stream << "}"; return stream.str(); } template std::string ToStringAny(const T&, typename std::enable_if< !SupportsInsertionOperator::value>::type* = 0 ) { return std::string("(unknown type [no operator<< implemented] for )"); } } // namespace detail // NOLINT [readability/namespace] [5] } // namespace art #endif // ART_CMDLINE_DETAIL_CMDLINE_PARSER_DETAIL_H_ android-platform-art-8.1.0+r23/cmdline/memory_representation.h000066400000000000000000000035351336577252300244110ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_MEMORY_REPRESENTATION_H_ #define ART_CMDLINE_MEMORY_REPRESENTATION_H_ #include #include #include #include "base/bit_utils.h" namespace art { // An integral representation of bytes of memory. // The underlying runtime size_t value is guaranteed to be a multiple of Divisor. template struct Memory { static_assert(IsPowerOfTwo(kDivisor), "Divisor must be a power of 2"); static Memory FromBytes(size_t bytes) { assert(bytes % kDivisor == 0); return Memory(bytes); } Memory() : Value(0u) {} Memory(size_t value) : Value(value) { // NOLINT [runtime/explicit] [5] assert(value % kDivisor == 0); } operator size_t() const { return Value; } size_t ToBytes() const { return Value; } static const char* Name() { static std::string str; if (str.empty()) { str = "Memory<" + std::to_string(kDivisor) + '>'; } return str.c_str(); } size_t Value; }; template std::ostream& operator<<(std::ostream& stream, Memory memory) { return stream << memory.Value << '*' << kDivisor; } using MemoryKiB = Memory<1024>; } // namespace art #endif // ART_CMDLINE_MEMORY_REPRESENTATION_H_ android-platform-art-8.1.0+r23/cmdline/token_range.h000066400000000000000000000335461336577252300222600ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_TOKEN_RANGE_H_ #define ART_CMDLINE_TOKEN_RANGE_H_ #include #include #include #include #include #include "android-base/strings.h" namespace art { // A range of tokens to make token matching algorithms easier. // // We try really hard to avoid copying and store only a pointer and iterators to the // interiors of the vector, so a typical copy constructor never ends up doing a deep copy. // It is up to the user to play nice and not to mutate the strings in-place. // // Tokens are only copied if a mutating operation is performed (and even then only // if it *actually* mutates the token). struct TokenRange { // Short-hand for a vector of strings. A single string and a token is synonymous. using TokenList = std::vector; // Copying-from-vector constructor. explicit TokenRange(const TokenList& token_list) : token_list_(new TokenList(token_list)), begin_(token_list_->begin()), end_(token_list_->end()) {} // Copying-from-iterator constructor template TokenRange(ForwardIterator it_begin, ForwardIterator it_end) : token_list_(new TokenList(it_begin, it_end)), begin_(token_list_->begin()), end_(token_list_->end()) {} #if 0 // Copying-from-vector constructor. TokenRange(const TokenList& token_list ATTRIBUTE_UNUSED, TokenList::const_iterator it_begin, TokenList::const_iterator it_end) : token_list_(new TokenList(it_begin, it_end)), begin_(token_list_->begin()), end_(token_list_->end()) { assert(it_begin >= token_list.begin()); assert(it_end <= token_list.end()); } #endif // Copying from char array constructor, convertings into tokens (strings) along the way. TokenRange(const char* token_list[], size_t length) : token_list_(new TokenList(&token_list[0], &token_list[length])), begin_(token_list_->begin()), end_(token_list_->end()) {} // Non-copying move-from-vector constructor. Takes over the token vector. explicit TokenRange(TokenList&& token_list) : token_list_(new TokenList(std::forward(token_list))), begin_(token_list_->begin()), end_(token_list_->end()) {} // Non-copying constructor. Retain reference to existing list of tokens. TokenRange(std::shared_ptr token_list, TokenList::const_iterator it_begin, TokenList::const_iterator it_end) : token_list_(token_list), begin_(it_begin), end_(it_end) { assert(it_begin >= token_list->begin()); assert(it_end <= token_list->end()); } // Non-copying copy constructor. TokenRange(const TokenRange&) = default; // Non-copying move constructor. TokenRange(TokenRange&&) = default; // Non-copying constructor. Retains reference to an existing list of tokens, with offset. explicit TokenRange(std::shared_ptr token_list) : token_list_(token_list), begin_(token_list_->begin()), end_(token_list_->end()) {} // Iterator type for begin() and end(). Guaranteed to be a RandomAccessIterator. using iterator = TokenList::const_iterator; // Iterator type for const begin() and const end(). Guaranteed to be a RandomAccessIterator. using const_iterator = iterator; // Create a token range by splitting a string. Each separator gets their own token. // Since the separator are retained as tokens, it might be useful to call // RemoveToken afterwards. static TokenRange Split(const std::string& string, std::initializer_list separators) { TokenList new_token_list; std::string tok; for (auto&& c : string) { for (char sep : separators) { if (c == sep) { // We spotted a separator character. // Push back everything before the last separator as a new token. // Push back the separator as a token. if (!tok.empty()) { new_token_list.push_back(tok); tok = ""; } new_token_list.push_back(std::string() + sep); } else { // Build up the token with another character. tok += c; } } } if (!tok.empty()) { new_token_list.push_back(tok); } return TokenRange(std::move(new_token_list)); } // A RandomAccessIterator to the first element in this range. iterator begin() const { return begin_; } // A RandomAccessIterator to one past the last element in this range. iterator end() const { return end_; } // The size of the range, i.e. how many tokens are in it. size_t Size() const { return std::distance(begin_, end_); } // Are there 0 tokens in this range? bool IsEmpty() const { return Size() > 0; } // Look up a token by it's offset. const std::string& GetToken(size_t offset) const { assert(offset < Size()); return *(begin_ + offset); } // Does this token range equal the other range? // Equality is defined as having both the same size, and // each corresponding token being equal. bool operator==(const TokenRange& other) const { if (this == &other) { return true; } if (Size() != other.Size()) { return false; } return std::equal(begin(), end(), other.begin()); } // Look up the token at the requested index. const std::string& operator[](int index) const { assert(index >= 0 && static_cast(index) < Size()); return *(begin() + index); } // Does this current range start with the other range? bool StartsWith(const TokenRange& other) const { if (this == &other) { return true; } if (Size() < other.Size()) { return false; } auto& smaller = Size() < other.Size() ? *this : other; auto& greater = Size() < other.Size() ? other : *this; return std::equal(smaller.begin(), smaller.end(), greater.begin()); } // Remove all characters 'c' from each token, potentially copying the underlying tokens. TokenRange RemoveCharacter(char c) const { TokenList new_token_list(begin(), end()); bool changed = false; for (auto&& token : new_token_list) { auto it = std::remove_if(token.begin(), token.end(), [&](char ch) { if (ch == c) { changed = true; return true; } return false; }); token.erase(it, token.end()); } if (!changed) { return *this; } return TokenRange(std::move(new_token_list)); } // Remove all tokens matching this one, potentially copying the underlying tokens. TokenRange RemoveToken(const std::string& token) { return RemoveIf([&](const std::string& tok) { return tok == token; }); } // Discard all empty tokens, potentially copying the underlying tokens. TokenRange DiscardEmpty() const { return RemoveIf([](const std::string& token) { return token.empty(); }); } // Create a non-copying subset of this range. // Length is trimmed so that the Slice does not go out of range. TokenRange Slice(size_t offset, size_t length = std::string::npos) const { assert(offset < Size()); if (length != std::string::npos && offset + length > Size()) { length = Size() - offset; } iterator it_end; if (length == std::string::npos) { it_end = end(); } else { it_end = begin() + offset + length; } return TokenRange(token_list_, begin() + offset, it_end); } // Try to match the string with tokens from this range. // Each token is used to match exactly once (after which the next token is used, and so on). // The matching happens from left-to-right in a non-greedy fashion. // If the currently-matched token is the wildcard, then the new outputted token will // contain as much as possible until the next token is matched. // // For example, if this == ["a:", "_", "b:] and "_" is the match string, then // MatchSubstrings on "a:foob:" will yield: ["a:", "foo", "b:"] // // Since the string matching can fail (e.g. ["foo"] against "bar"), then this // function can fail, in which cause it will return null. std::unique_ptr MatchSubstrings(const std::string& string, const std::string& wildcard) const { TokenList new_token_list; size_t wildcard_idx = std::string::npos; size_t string_idx = 0; // Function to push all the characters matched as a wildcard so far // as a brand new token. It resets the wildcard matching. // Empty wildcards are possible and ok, but only if wildcard matching was on. auto maybe_push_wildcard_token = [&]() { if (wildcard_idx != std::string::npos) { size_t wildcard_length = string_idx - wildcard_idx; std::string wildcard_substr = string.substr(wildcard_idx, wildcard_length); new_token_list.push_back(std::move(wildcard_substr)); wildcard_idx = std::string::npos; } }; for (iterator it = begin(); it != end(); ++it) { const std::string& tok = *it; if (tok == wildcard) { maybe_push_wildcard_token(); wildcard_idx = string_idx; continue; } size_t next_token_idx = string.find(tok); if (next_token_idx == std::string::npos) { // Could not find token at all return nullptr; } else if (next_token_idx != string_idx && wildcard_idx == std::string::npos) { // Found the token at a non-starting location, and we weren't // trying to parse the wildcard. return nullptr; } new_token_list.push_back(string.substr(next_token_idx, tok.size())); maybe_push_wildcard_token(); string_idx += tok.size(); } size_t remaining = string.size() - string_idx; if (remaining > 0) { if (wildcard_idx == std::string::npos) { // Some characters were still remaining in the string, // but it wasn't trying to match a wildcard. return nullptr; } } // If some characters are remaining, the rest must be a wildcard. string_idx += remaining; maybe_push_wildcard_token(); return std::unique_ptr(new TokenRange(std::move(new_token_list))); } // Do a quick match token-by-token, and see if they match. // Any tokens with a wildcard in them are only matched up until the wildcard. // If this is true, then the wildcard matching later on can still fail, so this is not // a guarantee that the argument is correct, it's more of a strong hint that the // user-provided input *probably* was trying to match this argument. // // Returns how many tokens were either matched (or ignored because there was a // wildcard present). 0 means no match. If the size() tokens are returned. size_t MaybeMatches(const TokenRange& token_list, const std::string& wildcard) const { auto token_it = token_list.begin(); auto token_end = token_list.end(); auto name_it = begin(); auto name_end = end(); size_t matched_tokens = 0; while (token_it != token_end && name_it != name_end) { // Skip token matching when the corresponding name has a wildcard in it. const std::string& name = *name_it; size_t wildcard_idx = name.find(wildcard); if (wildcard_idx == std::string::npos) { // No wildcard present // Did the definition token match the user token? if (name != *token_it) { return matched_tokens; } } else { std::string name_prefix = name.substr(0, wildcard_idx); // Did the user token start with the up-to-the-wildcard prefix? if (!StartsWith(*token_it, name_prefix)) { return matched_tokens; } } ++token_it; ++name_it; ++matched_tokens; } // If we got this far, it's either a full match or the token list was too short. return matched_tokens; } // Flatten the token range by joining every adjacent token with the separator character. // e.g. ["hello", "world"].join('$') == "hello$world" std::string Join(char separator) const { TokenList tmp(begin(), end()); return android::base::Join(tmp, separator); // TODO: Join should probably take an offset or iterators } private: static bool StartsWith(const std::string& larger, const std::string& smaller) { if (larger.size() >= smaller.size()) { return std::equal(smaller.begin(), smaller.end(), larger.begin()); } return false; } template TokenRange RemoveIf(const TPredicate& predicate) const { // If any of the tokens in the token lists are empty, then // we need to remove them and compress the token list into a smaller one. bool remove = false; for (auto it = begin_; it != end_; ++it) { auto&& token = *it; if (predicate(token)) { remove = true; break; } } // Actually copy the token list and remove the tokens that don't match our predicate. if (remove) { auto token_list = std::make_shared(begin(), end()); TokenList::iterator new_end = std::remove_if(token_list->begin(), token_list->end(), predicate); token_list->erase(new_end, token_list->end()); assert(token_list_->size() > token_list->size() && "Nothing was actually removed!"); return TokenRange(token_list); } return *this; } const std::shared_ptr> token_list_; const iterator begin_; const iterator end_; }; } // namespace art #endif // ART_CMDLINE_TOKEN_RANGE_H_ android-platform-art-8.1.0+r23/cmdline/unit.h000066400000000000000000000021251336577252300207300ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_CMDLINE_UNIT_H_ #define ART_CMDLINE_UNIT_H_ namespace art { // Used for arguments that simply indicate presence (e.g. "-help") without any values. struct Unit { // Avoid 'Conditional jump or move depends on uninitialised value(s)' errors // when running valgrind by specifying a user-defined constructor. Unit() {} Unit(const Unit&) = default; ~Unit() {} bool operator==(Unit) const { return true; } }; } // namespace art #endif // ART_CMDLINE_UNIT_H_ android-platform-art-8.1.0+r23/compiler/000077500000000000000000000000001336577252300177775ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/Android.bp000066400000000000000000000365471336577252300217210ustar00rootroot00000000000000// // Copyright (C) 2012 The Android Open Source Project // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // TODO We should really separate out those files that are actually needed for both variants of an // architecture into its own category. Currently we just include all of the 32bit variant in the // 64bit variant. It also might be good to allow one to compile only the 64bit variant without the // 32bit one. art_cc_defaults { name: "libart-compiler-defaults", defaults: ["art_defaults"], host_supported: true, clang: true, srcs: [ "compiled_method.cc", "debug/elf_debug_writer.cc", "dex/dex_to_dex_compiler.cc", "dex/inline_method_analyser.cc", "dex/verified_method.cc", "dex/verification_results.cc", "dex/quick_compiler_callbacks.cc", "driver/compiled_method_storage.cc", "driver/compiler_driver.cc", "driver/compiler_options.cc", "driver/dex_compilation_unit.cc", "linker/buffered_output_stream.cc", "linker/file_output_stream.cc", "linker/multi_oat_relative_patcher.cc", "linker/output_stream.cc", "linker/vector_output_stream.cc", "linker/relative_patcher.cc", "jit/jit_compiler.cc", "jit/jit_logger.cc", "jni/quick/calling_convention.cc", "jni/quick/jni_compiler.cc", "optimizing/block_builder.cc", "optimizing/bounds_check_elimination.cc", "optimizing/builder.cc", "optimizing/cha_guard_optimization.cc", "optimizing/code_generator.cc", "optimizing/code_generator_utils.cc", "optimizing/code_sinking.cc", "optimizing/constant_folding.cc", "optimizing/dead_code_elimination.cc", "optimizing/escape.cc", "optimizing/graph_checker.cc", "optimizing/graph_visualizer.cc", "optimizing/gvn.cc", "optimizing/induction_var_analysis.cc", "optimizing/induction_var_range.cc", "optimizing/inliner.cc", "optimizing/instruction_builder.cc", "optimizing/instruction_simplifier.cc", "optimizing/intrinsics.cc", "optimizing/licm.cc", "optimizing/linear_order.cc", "optimizing/load_store_analysis.cc", "optimizing/load_store_elimination.cc", "optimizing/locations.cc", "optimizing/loop_optimization.cc", "optimizing/nodes.cc", "optimizing/optimization.cc", "optimizing/optimizing_compiler.cc", "optimizing/parallel_move_resolver.cc", "optimizing/prepare_for_register_allocation.cc", "optimizing/reference_type_propagation.cc", "optimizing/register_allocation_resolver.cc", "optimizing/register_allocator.cc", "optimizing/register_allocator_graph_color.cc", "optimizing/register_allocator_linear_scan.cc", "optimizing/select_generator.cc", "optimizing/scheduler.cc", "optimizing/sharpening.cc", "optimizing/side_effects_analysis.cc", "optimizing/ssa_builder.cc", "optimizing/ssa_liveness_analysis.cc", "optimizing/ssa_phi_elimination.cc", "optimizing/stack_map_stream.cc", "trampolines/trampoline_compiler.cc", "utils/assembler.cc", "utils/jni_macro_assembler.cc", "utils/swap_space.cc", "compiler.cc", "elf_writer.cc", "elf_writer_quick.cc", "image_writer.cc", "oat_writer.cc", ], codegen: { arm: { srcs: [ "jni/quick/arm/calling_convention_arm.cc", "linker/arm/relative_patcher_arm_base.cc", "linker/arm/relative_patcher_thumb2.cc", "optimizing/code_generator_arm_vixl.cc", "optimizing/code_generator_vector_arm_vixl.cc", "optimizing/instruction_simplifier_arm.cc", "optimizing/instruction_simplifier_shared.cc", "optimizing/intrinsics_arm_vixl.cc", "optimizing/nodes_shared.cc", "optimizing/scheduler_arm.cc", "utils/arm/assembler_arm_vixl.cc", "utils/arm/constants_arm.cc", "utils/arm/jni_macro_assembler_arm_vixl.cc", "utils/arm/managed_register_arm.cc", ], }, arm64: { srcs: [ "jni/quick/arm64/calling_convention_arm64.cc", "linker/arm64/relative_patcher_arm64.cc", "optimizing/code_generator_arm64.cc", "optimizing/code_generator_vector_arm64.cc", "optimizing/scheduler_arm64.cc", "optimizing/instruction_simplifier_arm64.cc", "optimizing/intrinsics_arm64.cc", "utils/arm64/assembler_arm64.cc", "utils/arm64/jni_macro_assembler_arm64.cc", "utils/arm64/managed_register_arm64.cc", ], }, mips: { srcs: [ "jni/quick/mips/calling_convention_mips.cc", "linker/mips/relative_patcher_mips.cc", "optimizing/code_generator_mips.cc", "optimizing/code_generator_vector_mips.cc", "optimizing/intrinsics_mips.cc", "optimizing/pc_relative_fixups_mips.cc", "utils/mips/assembler_mips.cc", "utils/mips/managed_register_mips.cc", ], }, mips64: { srcs: [ "jni/quick/mips64/calling_convention_mips64.cc", "linker/mips64/relative_patcher_mips64.cc", "optimizing/code_generator_mips64.cc", "optimizing/code_generator_vector_mips64.cc", "optimizing/intrinsics_mips64.cc", "utils/mips64/assembler_mips64.cc", "utils/mips64/managed_register_mips64.cc", ], }, x86: { srcs: [ "jni/quick/x86/calling_convention_x86.cc", "linker/x86/relative_patcher_x86.cc", "linker/x86/relative_patcher_x86_base.cc", "optimizing/code_generator_x86.cc", "optimizing/code_generator_vector_x86.cc", "optimizing/intrinsics_x86.cc", "optimizing/pc_relative_fixups_x86.cc", "optimizing/x86_memory_gen.cc", "utils/x86/assembler_x86.cc", "utils/x86/jni_macro_assembler_x86.cc", "utils/x86/managed_register_x86.cc", ], }, x86_64: { srcs: [ "jni/quick/x86_64/calling_convention_x86_64.cc", "linker/x86_64/relative_patcher_x86_64.cc", "optimizing/intrinsics_x86_64.cc", "optimizing/code_generator_x86_64.cc", "optimizing/code_generator_vector_x86_64.cc", "utils/x86_64/assembler_x86_64.cc", "utils/x86_64/jni_macro_assembler_x86_64.cc", "utils/x86_64/managed_register_x86_64.cc", ], }, }, target: { host: { // For compiler driver TLS. host_ldlibs: ["-lpthread"], }, android: { // For atrace. shared_libs: ["libcutils"], }, }, generated_sources: ["art_compiler_operator_srcs"], shared_libs: [ "libbase", "liblz4", "liblzma", ], include_dirs: ["art/disassembler"], export_include_dirs: ["."], // For SHA-1 checksumming of build ID static: { whole_static_libs: ["libcrypto"], }, shared: { shared_libs: ["libcrypto"], }, } gensrcs { name: "art_compiler_operator_srcs", cmd: "$(location generate-operator-out.py) art/compiler $(in) > $(out)", tool_files: ["generate-operator-out.py"], srcs: [ "compiled_method.h", "dex/dex_to_dex_compiler.h", "driver/compiler_driver.h", "driver/compiler_options.h", "image_writer.h", "optimizing/locations.h", "utils/arm/constants_arm.h", "utils/mips/assembler_mips.h", "utils/mips64/assembler_mips64.h", ], output_extension: "operator_out.cc", } art_cc_library { name: "libart-compiler", defaults: ["libart-compiler-defaults"], codegen: { arm: { // VIXL assembly support for ARM targets. static: { whole_static_libs: [ "libvixl-arm", ], }, shared: { shared_libs: [ "libvixl-arm", ], }, }, arm64: { // VIXL assembly support for ARM64 targets. static: { whole_static_libs: [ "libvixl-arm64", ], }, shared: { shared_libs: [ "libvixl-arm64", ], }, }, }, shared_libs: [ "libart", "libart-dexlayout", ], } art_cc_library { name: "libartd-compiler", defaults: [ "art_debug_defaults", "libart-compiler-defaults", ], codegen: { arm: { // VIXL assembly support for ARM targets. static: { whole_static_libs: [ "libvixld-arm", ], }, shared: { shared_libs: [ "libvixld-arm", ], }, }, arm64: { // VIXL assembly support for ARM64 targets. static: { whole_static_libs: [ "libvixld-arm64", ], }, shared: { shared_libs: [ "libvixld-arm64", ], }, }, }, shared_libs: [ "libartd", "libartd-dexlayout" ], } art_cc_library { name: "libart-compiler-gtest", defaults: ["libart-gtest-defaults"], srcs: ["common_compiler_test.cc"], shared_libs: [ "libartd-compiler", "libart-runtime-gtest", "libbase", ], } art_cc_test { name: "art_compiler_tests", defaults: [ "art_gtest_defaults", ], srcs: [ "compiled_method_test.cc", "debug/dwarf/dwarf_test.cc", "dex/dex_to_dex_decompiler_test.cc", "driver/compiled_method_storage_test.cc", "driver/compiler_driver_test.cc", "elf_writer_test.cc", "exception_test.cc", "image_test.cc", "image_write_read_test.cc", "jni/jni_compiler_test.cc", "linker/method_bss_mapping_encoder_test.cc", "linker/multi_oat_relative_patcher_test.cc", "linker/output_stream_test.cc", "oat_test.cc", "optimizing/bounds_check_elimination_test.cc", "optimizing/dominator_test.cc", "optimizing/find_loops_test.cc", "optimizing/graph_checker_test.cc", "optimizing/graph_test.cc", "optimizing/gvn_test.cc", "optimizing/induction_var_analysis_test.cc", "optimizing/induction_var_range_test.cc", "optimizing/licm_test.cc", "optimizing/live_interval_test.cc", "optimizing/loop_optimization_test.cc", "optimizing/nodes_test.cc", "optimizing/nodes_vector_test.cc", "optimizing/parallel_move_test.cc", "optimizing/pretty_printer_test.cc", "optimizing/reference_type_propagation_test.cc", "optimizing/side_effects_test.cc", "optimizing/ssa_liveness_analysis_test.cc", "optimizing/ssa_test.cc", "optimizing/stack_map_test.cc", "optimizing/suspend_check_test.cc", "utils/atomic_dex_ref_map_test.cc", "utils/dedupe_set_test.cc", "utils/intrusive_forward_list_test.cc", "utils/string_reference_test.cc", "utils/swap_space_test.cc", "utils/test_dex_file_builder_test.cc", "verifier_deps_test.cc", "jni/jni_cfi_test.cc", "optimizing/codegen_test.cc", "optimizing/load_store_analysis_test.cc", "optimizing/optimizing_cfi_test.cc", "optimizing/scheduler_test.cc", ], codegen: { arm: { srcs: [ "linker/arm/relative_patcher_thumb2_test.cc", "utils/arm/managed_register_arm_test.cc", ], }, arm64: { srcs: [ "linker/arm64/relative_patcher_arm64_test.cc", "utils/arm64/managed_register_arm64_test.cc", ], }, mips: { srcs: [ "linker/mips/relative_patcher_mips_test.cc", "linker/mips/relative_patcher_mips32r6_test.cc", ], }, mips64: { srcs: [ "linker/mips64/relative_patcher_mips64_test.cc", "utils/mips64/managed_register_mips64_test.cc", ], }, x86: { srcs: [ "linker/x86/relative_patcher_x86_test.cc", "utils/x86/managed_register_x86_test.cc", // These tests are testing architecture-independent // functionality, but happen to use x86 codegen as part of the // test. "optimizing/constant_folding_test.cc", "optimizing/dead_code_elimination_test.cc", "optimizing/linearize_test.cc", "optimizing/live_ranges_test.cc", "optimizing/liveness_test.cc", "optimizing/register_allocator_test.cc", ], }, x86_64: { srcs: [ "linker/x86_64/relative_patcher_x86_64_test.cc", ], }, }, shared_libs: [ "libartd-compiler", "libvixld-arm", "libvixld-arm64", "libbacktrace", "libnativeloader", ], target: { host: { shared_libs: [ "libartd-simulator", ], }, }, } art_cc_test { name: "art_compiler_host_tests", device_supported: false, defaults: [ "art_gtest_defaults", ], codegen: { arm: { srcs: [ "utils/assembler_thumb_test.cc", ], }, mips: { srcs: [ "optimizing/emit_swap_mips_test.cc", "utils/mips/assembler_mips_test.cc", "utils/mips/assembler_mips32r5_test.cc", "utils/mips/assembler_mips32r6_test.cc", ], }, mips64: { srcs: [ "utils/mips64/assembler_mips64_test.cc", ], }, x86: { srcs: [ "utils/x86/assembler_x86_test.cc", ], }, x86_64: { srcs: [ "utils/x86_64/assembler_x86_64_test.cc", ], }, }, shared_libs: [ "libartd-compiler", "libvixld-arm", "libvixld-arm64", ], } android-platform-art-8.1.0+r23/compiler/cfi_test.h000066400000000000000000000135431336577252300217560ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_CFI_TEST_H_ #define ART_COMPILER_CFI_TEST_H_ #include #include #include #include "arch/instruction_set.h" #include "base/enums.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/dwarf_test.h" #include "debug/dwarf/headers.h" #include "disassembler/disassembler.h" #include "gtest/gtest.h" #include "thread.h" namespace art { constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT; class CFITest : public dwarf::DwarfTest { public: void GenerateExpected(FILE* f, InstructionSet isa, const char* isa_str, const std::vector& actual_asm, const std::vector& actual_cfi) { std::vector lines; // Print the raw bytes. fprintf(f, "static constexpr uint8_t expected_asm_%s[] = {", isa_str); HexDump(f, actual_asm); fprintf(f, "\n};\n"); fprintf(f, "static constexpr uint8_t expected_cfi_%s[] = {", isa_str); HexDump(f, actual_cfi); fprintf(f, "\n};\n"); // Pretty-print CFI opcodes. constexpr bool is64bit = false; dwarf::DebugFrameOpCodeWriter<> initial_opcodes; dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_); std::vector debug_frame_patches; dwarf::WriteFDE(is64bit, 0, 0, 0, actual_asm.size(), ArrayRef(actual_cfi), kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); ReformatCfi(Objdump(false, "-W"), &lines); // Pretty-print assembly. const uint8_t* asm_base = actual_asm.data(); const uint8_t* asm_end = asm_base + actual_asm.size(); auto* opts = new DisassemblerOptions(false, asm_base, asm_end, true, is64bit ? &Thread::DumpThreadOffset : &Thread::DumpThreadOffset); std::unique_ptr disasm(Disassembler::Create(isa, opts)); std::stringstream stream; const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0); disasm->Dump(stream, base, base + actual_asm.size()); ReformatAsm(&stream, &lines); // Print CFI and assembly interleaved. std::stable_sort(lines.begin(), lines.end(), CompareByAddress); for (const std::string& line : lines) { fprintf(f, "// %s\n", line.c_str()); } fprintf(f, "\n"); } private: // Helper - get offset just past the end of given string. static size_t FindEndOf(const std::string& str, const char* substr) { size_t pos = str.find(substr); CHECK_NE(std::string::npos, pos); return pos + strlen(substr); } // Spit to lines and remove raw instruction bytes. static void ReformatAsm(std::stringstream* stream, std::vector* output) { std::string line; while (std::getline(*stream, line)) { line = line.substr(0, FindEndOf(line, ": ")) + line.substr(FindEndOf(line, "\t")); size_t pos; while ((pos = line.find(" ")) != std::string::npos) { line = line.replace(pos, 2, " "); } while (!line.empty() && line.back() == ' ') { line.pop_back(); } output->push_back(line); } } // Find interesting parts of objdump output and prefix the lines with address. static void ReformatCfi(const std::vector& lines, std::vector* output) { std::string address; for (const std::string& line : lines) { if (line.find("DW_CFA_nop") != std::string::npos) { // Ignore. } else if (line.find("DW_CFA_advance_loc") != std::string::npos) { // The last 8 characters are the address. address = "0x" + line.substr(line.size() - 8); } else if (line.find("DW_CFA_") != std::string::npos) { std::string new_line(line); // "bad register" warning is caused by always using host (x86) objdump. const char* bad_reg = "bad register: "; size_t pos; if ((pos = new_line.find(bad_reg)) != std::string::npos) { new_line = new_line.replace(pos, strlen(bad_reg), ""); } // Remove register names in parentheses since they have x86 names. if ((pos = new_line.find(" (")) != std::string::npos) { new_line = new_line.replace(pos, FindEndOf(new_line, ")") - pos, ""); } // Use the .cfi_ prefix. new_line = ".cfi_" + new_line.substr(FindEndOf(new_line, "DW_CFA_")); output->push_back(address + ": " + new_line); } } } // Compare strings by the address prefix. static bool CompareByAddress(const std::string& lhs, const std::string& rhs) { EXPECT_EQ(lhs[10], ':'); EXPECT_EQ(rhs[10], ':'); return strncmp(lhs.c_str(), rhs.c_str(), 10) < 0; } // Pretty-print byte array. 12 bytes per line. static void HexDump(FILE* f, const std::vector& data) { for (size_t i = 0; i < data.size(); i++) { fprintf(f, i % 12 == 0 ? "\n " : " "); // Whitespace. fprintf(f, "0x%02X,", data[i]); } } }; } // namespace art #endif // ART_COMPILER_CFI_TEST_H_ android-platform-art-8.1.0+r23/compiler/common_compiler_test.cc000066400000000000000000000334631336577252300245400ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common_compiler_test.h" #include "arch/instruction_set_features.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/callee_save_type.h" #include "base/enums.h" #include "class_linker.h" #include "compiled_method.h" #include "dex/quick_compiler_callbacks.h" #include "dex/verification_results.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "interpreter/interpreter.h" #include "mirror/class_loader.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" #include "mirror/object-inl.h" #include "oat_quick_method_header.h" #include "scoped_thread_state_change-inl.h" #include "thread-current-inl.h" #include "utils.h" namespace art { CommonCompilerTest::CommonCompilerTest() {} CommonCompilerTest::~CommonCompilerTest() {} void CommonCompilerTest::MakeExecutable(ArtMethod* method) { CHECK(method != nullptr); const CompiledMethod* compiled_method = nullptr; if (!method->IsAbstract()) { mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache(); const DexFile& dex_file = *dex_cache->GetDexFile(); compiled_method = compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method->GetDexMethodIndex())); } // If the code size is 0 it means the method was skipped due to profile guided compilation. if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) { ArrayRef code = compiled_method->GetQuickCode(); const uint32_t code_size = code.size(); ArrayRef vmap_table = compiled_method->GetVmapTable(); const uint32_t vmap_table_offset = vmap_table.empty() ? 0u : sizeof(OatQuickMethodHeader) + vmap_table.size(); // The method info is directly before the vmap table. ArrayRef method_info = compiled_method->GetMethodInfo(); const uint32_t method_info_offset = method_info.empty() ? 0u : vmap_table_offset + method_info.size(); OatQuickMethodHeader method_header(vmap_table_offset, method_info_offset, compiled_method->GetFrameSizeInBytes(), compiled_method->GetCoreSpillMask(), compiled_method->GetFpSpillMask(), code_size); header_code_and_maps_chunks_.push_back(std::vector()); std::vector* chunk = &header_code_and_maps_chunks_.back(); const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet()); const size_t size = method_info.size() + vmap_table.size() + sizeof(method_header) + code_size; chunk->reserve(size + max_padding); chunk->resize(sizeof(method_header)); memcpy(&(*chunk)[0], &method_header, sizeof(method_header)); chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end()); chunk->insert(chunk->begin(), method_info.begin(), method_info.end()); chunk->insert(chunk->end(), code.begin(), code.end()); CHECK_EQ(chunk->size(), size); const void* unaligned_code_ptr = chunk->data() + (size - code_size); size_t offset = dchecked_integral_cast(reinterpret_cast(unaligned_code_ptr)); size_t padding = compiled_method->AlignCode(offset) - offset; // Make sure no resizing takes place. CHECK_GE(chunk->capacity(), chunk->size() + padding); chunk->insert(chunk->begin(), padding, 0); const void* code_ptr = reinterpret_cast(unaligned_code_ptr) + padding; CHECK_EQ(code_ptr, static_cast(chunk->data() + (chunk->size() - code_size))); MakeExecutable(code_ptr, code.size()); const void* method_code = CompiledMethod::CodePointer(code_ptr, compiled_method->GetInstructionSet()); LOG(INFO) << "MakeExecutable " << method->PrettyMethod() << " code=" << method_code; class_linker_->SetEntryPointsToCompiledCode(method, method_code); } else { // No code? You must mean to go into the interpreter. // Or the generic JNI... class_linker_->SetEntryPointsToInterpreter(method); } } void CommonCompilerTest::MakeExecutable(const void* code_start, size_t code_length) { CHECK(code_start != nullptr); CHECK_NE(code_length, 0U); uintptr_t data = reinterpret_cast(code_start); uintptr_t base = RoundDown(data, kPageSize); uintptr_t limit = RoundUp(data + code_length, kPageSize); uintptr_t len = limit - base; int result = mprotect(reinterpret_cast(base), len, PROT_READ | PROT_WRITE | PROT_EXEC); CHECK_EQ(result, 0); FlushInstructionCache(reinterpret_cast(base), reinterpret_cast(base + len)); } void CommonCompilerTest::MakeExecutable(ObjPtr class_loader, const char* class_name) { std::string class_descriptor(DotToDescriptor(class_name)); Thread* self = Thread::Current(); StackHandleScope<1> hs(self); Handle loader(hs.NewHandle(class_loader)); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); CHECK(klass != nullptr) << "Class not found " << class_name; PointerSize pointer_size = class_linker_->GetImagePointerSize(); for (auto& m : klass->GetMethods(pointer_size)) { MakeExecutable(&m); } } // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler // driver assumes ownership of the set, so the test should properly release the set. std::unordered_set* CommonCompilerTest::GetImageClasses() { // Empty set: by default no classes are retained in the image. return new std::unordered_set(); } // Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler // driver assumes ownership of the set, so the test should properly release the set. std::unordered_set* CommonCompilerTest::GetCompiledClasses() { // Null, no selection of compiled-classes. return nullptr; } // Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler // driver assumes ownership of the set, so the test should properly release the set. std::unordered_set* CommonCompilerTest::GetCompiledMethods() { // Null, no selection of compiled-methods. return nullptr; } // Get ProfileCompilationInfo that should be passed to the driver. ProfileCompilationInfo* CommonCompilerTest::GetProfileCompilationInfo() { // Null, profile information will not be taken into account. return nullptr; } void CommonCompilerTest::SetUp() { CommonRuntimeTest::SetUp(); { ScopedObjectAccess soa(Thread::Current()); const InstructionSet instruction_set = kRuntimeISA; // Take the default set of instruction features from the build. instruction_set_features_ = InstructionSetFeatures::FromCppDefines(); runtime_->SetInstructionSet(instruction_set); for (uint32_t i = 0; i < static_cast(CalleeSaveType::kLastCalleeSaveType); ++i) { CalleeSaveType type = CalleeSaveType(i); if (!runtime_->HasCalleeSaveMethod(type)) { runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type); } } timer_.reset(new CumulativeLogger("Compilation times")); CreateCompilerDriver(compiler_kind_, instruction_set); } } void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads) { compiler_options_->boot_image_ = true; compiler_options_->SetCompilerFilter(GetCompilerFilter()); compiler_driver_.reset(new CompilerDriver(compiler_options_.get(), verification_results_.get(), kind, isa, instruction_set_features_.get(), GetImageClasses(), GetCompiledClasses(), GetCompiledMethods(), number_of_threads, /* dump_stats */ true, /* dump_passes */ true, timer_.get(), /* swap_fd */ -1, GetProfileCompilationInfo())); // We typically don't generate an image in unit tests, disable this optimization by default. compiler_driver_->SetSupportBootImageFixup(false); } void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) { CommonRuntimeTest::SetUpRuntimeOptions(options); compiler_options_.reset(new CompilerOptions); verification_results_.reset(new VerificationResults(compiler_options_.get())); QuickCompilerCallbacks* callbacks = new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp); callbacks->SetVerificationResults(verification_results_.get()); callbacks_.reset(callbacks); } Compiler::Kind CommonCompilerTest::GetCompilerKind() const { return compiler_kind_; } void CommonCompilerTest::SetCompilerKind(Compiler::Kind compiler_kind) { compiler_kind_ = compiler_kind; } InstructionSet CommonCompilerTest::GetInstructionSet() const { DCHECK(compiler_driver_.get() != nullptr); return compiler_driver_->GetInstructionSet(); } void CommonCompilerTest::TearDown() { timer_.reset(); compiler_driver_.reset(); callbacks_.reset(); verification_results_.reset(); compiler_options_.reset(); image_reservation_.reset(); CommonRuntimeTest::TearDown(); } void CommonCompilerTest::CompileClass(mirror::ClassLoader* class_loader, const char* class_name) { std::string class_descriptor(DotToDescriptor(class_name)); Thread* self = Thread::Current(); StackHandleScope<1> hs(self); Handle loader(hs.NewHandle(class_loader)); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), loader); CHECK(klass != nullptr) << "Class not found " << class_name; auto pointer_size = class_linker_->GetImagePointerSize(); for (auto& m : klass->GetMethods(pointer_size)) { CompileMethod(&m); } } void CommonCompilerTest::CompileMethod(ArtMethod* method) { CHECK(method != nullptr); TimingLogger timings("CommonTest::CompileMethod", false, false); TimingLogger::ScopedTiming t(__FUNCTION__, &timings); compiler_driver_->CompileOne(Thread::Current(), method, &timings); TimingLogger::ScopedTiming t2("MakeExecutable", &timings); MakeExecutable(method); } void CommonCompilerTest::CompileDirectMethod(Handle class_loader, const char* class_name, const char* method_name, const char* signature) { std::string class_descriptor(DotToDescriptor(class_name)); Thread* self = Thread::Current(); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); CHECK(klass != nullptr) << "Class not found " << class_name; auto pointer_size = class_linker_->GetImagePointerSize(); ArtMethod* method = klass->FindClassMethod(method_name, signature, pointer_size); CHECK(method != nullptr && method->IsDirect()) << "Direct method not found: " << class_name << "." << method_name << signature; CompileMethod(method); } void CommonCompilerTest::CompileVirtualMethod(Handle class_loader, const char* class_name, const char* method_name, const char* signature) { std::string class_descriptor(DotToDescriptor(class_name)); Thread* self = Thread::Current(); mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader); CHECK(klass != nullptr) << "Class not found " << class_name; auto pointer_size = class_linker_->GetImagePointerSize(); ArtMethod* method = klass->FindClassMethod(method_name, signature, pointer_size); CHECK(method != nullptr && !method->IsDirect()) << "Virtual method not found: " << class_name << "." << method_name << signature; CompileMethod(method); } void CommonCompilerTest::ReserveImageSpace() { // Reserve where the image will be loaded up front so that other parts of test set up don't // accidentally end up colliding with the fixed memory address when we need to load the image. std::string error_msg; MemMap::Init(); image_reservation_.reset(MemMap::MapAnonymous("image reservation", reinterpret_cast(ART_BASE_ADDRESS), (size_t)120 * 1024 * 1024, // 120MB PROT_NONE, false /* no need for 4gb flag with fixed mmap*/, false /* not reusing existing reservation */, &error_msg)); CHECK(image_reservation_.get() != nullptr) << error_msg; } void CommonCompilerTest::UnreserveImageSpace() { image_reservation_.reset(); } } // namespace art android-platform-art-8.1.0+r23/compiler/common_compiler_test.h000066400000000000000000000102201336577252300243640ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_ #define ART_COMPILER_COMMON_COMPILER_TEST_H_ #include #include #include #include "common_runtime_test.h" #include "compiler.h" #include "jit/profile_compilation_info.h" #include "oat_file.h" namespace art { namespace mirror { class ClassLoader; } // namespace mirror class CompilerDriver; class CompilerOptions; class CumulativeLogger; class VerificationResults; template class Handle; class CommonCompilerTest : public CommonRuntimeTest { public: CommonCompilerTest(); ~CommonCompilerTest(); // Create an OatMethod based on pointers (for unit tests). OatFile::OatMethod CreateOatMethod(const void* code); void MakeExecutable(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); static void MakeExecutable(const void* code_start, size_t code_length); void MakeExecutable(ObjPtr class_loader, const char* class_name) REQUIRES_SHARED(Locks::mutator_lock_); protected: virtual void SetUp(); virtual void SetUpRuntimeOptions(RuntimeOptions* options); Compiler::Kind GetCompilerKind() const; void SetCompilerKind(Compiler::Kind compiler_kind); InstructionSet GetInstructionSet() const; // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler // driver assumes ownership of the set, so the test should properly release the set. virtual std::unordered_set* GetImageClasses(); // Get the set of compiled classes given to the compiler-driver in SetUp. Note: the compiler // driver assumes ownership of the set, so the test should properly release the set. virtual std::unordered_set* GetCompiledClasses(); // Get the set of compiled methods given to the compiler-driver in SetUp. Note: the compiler // driver assumes ownership of the set, so the test should properly release the set. virtual std::unordered_set* GetCompiledMethods(); virtual ProfileCompilationInfo* GetProfileCompilationInfo(); virtual CompilerFilter::Filter GetCompilerFilter() const { return CompilerFilter::kDefaultCompilerFilter; } virtual void TearDown(); void CompileClass(mirror::ClassLoader* class_loader, const char* class_name) REQUIRES_SHARED(Locks::mutator_lock_); void CompileMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); void CompileDirectMethod(Handle class_loader, const char* class_name, const char* method_name, const char* signature) REQUIRES_SHARED(Locks::mutator_lock_); void CompileVirtualMethod(Handle class_loader, const char* class_name, const char* method_name, const char* signature) REQUIRES_SHARED(Locks::mutator_lock_); void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U); void ReserveImageSpace(); void UnreserveImageSpace(); Compiler::Kind compiler_kind_ = Compiler::kOptimizing; std::unique_ptr compiler_options_; std::unique_ptr verification_results_; std::unique_ptr compiler_driver_; std::unique_ptr timer_; std::unique_ptr instruction_set_features_; private: std::unique_ptr image_reservation_; // Chunks must not move their storage after being created - use the node-based std::list. std::list> header_code_and_maps_chunks_; }; } // namespace art #endif // ART_COMPILER_COMMON_COMPILER_TEST_H_ android-platform-art-8.1.0+r23/compiler/compiled_method.cc000066400000000000000000000131651336577252300234500ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compiled_method.h" #include "driver/compiled_method_storage.h" #include "driver/compiler_driver.h" #include "utils/swap_space.h" namespace art { CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, const ArrayRef& quick_code) : compiler_driver_(compiler_driver), instruction_set_(instruction_set), quick_code_(compiler_driver_->GetCompiledMethodStorage()->DeduplicateCode(quick_code)) { } CompiledCode::~CompiledCode() { compiler_driver_->GetCompiledMethodStorage()->ReleaseCode(quick_code_); } bool CompiledCode::operator==(const CompiledCode& rhs) const { if (quick_code_ != nullptr) { if (rhs.quick_code_ == nullptr) { return false; } else if (quick_code_->size() != rhs.quick_code_->size()) { return false; } else { return std::equal(quick_code_->begin(), quick_code_->end(), rhs.quick_code_->begin()); } } return (rhs.quick_code_ == nullptr); } size_t CompiledCode::AlignCode(size_t offset) const { return AlignCode(offset, instruction_set_); } size_t CompiledCode::AlignCode(size_t offset, InstructionSet instruction_set) { return RoundUp(offset, GetInstructionSetAlignment(instruction_set)); } size_t CompiledCode::CodeDelta() const { return CodeDelta(instruction_set_); } size_t CompiledCode::CodeDelta(InstructionSet instruction_set) { switch (instruction_set) { case kArm: case kArm64: case kMips: case kMips64: case kX86: case kX86_64: return 0; case kThumb2: { // +1 to set the low-order bit so a BLX will switch to Thumb mode return 1; } default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; return 0; } } const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet instruction_set) { switch (instruction_set) { case kArm: case kArm64: case kMips: case kMips64: case kX86: case kX86_64: return code_pointer; case kThumb2: { uintptr_t address = reinterpret_cast(code_pointer); // Set the low-order bit so a BLX will switch to Thumb mode address |= 0x1; return reinterpret_cast(address); } default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; return nullptr; } } CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const ArrayRef& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef& method_info, const ArrayRef& vmap_table, const ArrayRef& cfi_info, const ArrayRef& patches) : CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask), method_info_(driver->GetCompiledMethodStorage()->DeduplicateMethodInfo(method_info)), vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)), cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)), patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) { } CompiledMethod* CompiledMethod::SwapAllocCompiledMethod( CompilerDriver* driver, InstructionSet instruction_set, const ArrayRef& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef& method_info, const ArrayRef& vmap_table, const ArrayRef& cfi_info, const ArrayRef& patches) { SwapAllocator alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator()); CompiledMethod* ret = alloc.allocate(1); alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask, fp_spill_mask, method_info, vmap_table, cfi_info, patches); return ret; } void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) { SwapAllocator alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator()); alloc.destroy(m); alloc.deallocate(m, 1); } CompiledMethod::~CompiledMethod() { CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage(); storage->ReleaseLinkerPatches(patches_); storage->ReleaseCFIInfo(cfi_info_); storage->ReleaseVMapTable(vmap_table_); storage->ReleaseMethodInfo(method_info_); } } // namespace art android-platform-art-8.1.0+r23/compiler/compiled_method.h000066400000000000000000000401561336577252300233120ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_COMPILED_METHOD_H_ #define ART_COMPILER_COMPILED_METHOD_H_ #include #include #include #include #include "arch/instruction_set.h" #include "base/array_ref.h" #include "base/bit_utils.h" #include "base/length_prefixed_array.h" #include "dex_file_types.h" #include "method_reference.h" namespace art { class CompilerDriver; class CompiledMethodStorage; class CompiledCode { public: // For Quick to supply an code blob CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set, const ArrayRef& quick_code); virtual ~CompiledCode(); InstructionSet GetInstructionSet() const { return instruction_set_; } ArrayRef GetQuickCode() const { return GetArray(quick_code_); } bool operator==(const CompiledCode& rhs) const; // To align an offset from a page-aligned value to make it suitable // for code storage. For example on ARM, to ensure that PC relative // valu computations work out as expected. size_t AlignCode(size_t offset) const; static size_t AlignCode(size_t offset, InstructionSet instruction_set); // returns the difference between the code address and a usable PC. // mainly to cope with kThumb2 where the lower bit must be set. size_t CodeDelta() const; static size_t CodeDelta(InstructionSet instruction_set); // Returns a pointer suitable for invoking the code at the argument // code_pointer address. Mainly to cope with kThumb2 where the // lower bit must be set to indicate Thumb mode. static const void* CodePointer(const void* code_pointer, InstructionSet instruction_set); protected: template static ArrayRef GetArray(const LengthPrefixedArray* array) { if (array == nullptr) { return ArrayRef(); } DCHECK_NE(array->size(), 0u); return ArrayRef(&array->At(0), array->size()); } CompilerDriver* GetCompilerDriver() { return compiler_driver_; } private: CompilerDriver* const compiler_driver_; const InstructionSet instruction_set_; // Used to store the PIC code for Quick. const LengthPrefixedArray* const quick_code_; }; class SrcMapElem { public: uint32_t from_; int32_t to_; }; inline bool operator<(const SrcMapElem& lhs, const SrcMapElem& rhs) { if (lhs.from_ != rhs.from_) { return lhs.from_ < rhs.from_; } return lhs.to_ < rhs.to_; } inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) { return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_; } class LinkerPatch { public: // Note: We explicitly specify the underlying type of the enum because GCC // would otherwise select a bigger underlying type and then complain that // 'art::LinkerPatch::patch_type_' is too small to hold all // values of 'enum class art::LinkerPatch::Type' // which is ridiculous given we have only a handful of values here. If we // choose to squeeze the Type into fewer than 8 bits, we'll have to declare // patch_type_ as an uintN_t and do explicit static_cast<>s. enum class Type : uint8_t { kMethodRelative, // NOTE: Actual patching is instruction_set-dependent. kMethodBssEntry, // NOTE: Actual patching is instruction_set-dependent. kCall, kCallRelative, // NOTE: Actual patching is instruction_set-dependent. kTypeRelative, // NOTE: Actual patching is instruction_set-dependent. kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent. kStringRelative, // NOTE: Actual patching is instruction_set-dependent. kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent. kBakerReadBarrierBranch, // NOTE: Actual patching is instruction_set-dependent. }; static LinkerPatch RelativeMethodPatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t pc_insn_offset, uint32_t target_method_idx) { LinkerPatch patch(literal_offset, Type::kMethodRelative, target_dex_file); patch.method_idx_ = target_method_idx; patch.pc_insn_offset_ = pc_insn_offset; return patch; } static LinkerPatch MethodBssEntryPatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t pc_insn_offset, uint32_t target_method_idx) { LinkerPatch patch(literal_offset, Type::kMethodBssEntry, target_dex_file); patch.method_idx_ = target_method_idx; patch.pc_insn_offset_ = pc_insn_offset; return patch; } static LinkerPatch CodePatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t target_method_idx) { LinkerPatch patch(literal_offset, Type::kCall, target_dex_file); patch.method_idx_ = target_method_idx; return patch; } static LinkerPatch RelativeCodePatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t target_method_idx) { LinkerPatch patch(literal_offset, Type::kCallRelative, target_dex_file); patch.method_idx_ = target_method_idx; return patch; } static LinkerPatch RelativeTypePatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t pc_insn_offset, uint32_t target_type_idx) { LinkerPatch patch(literal_offset, Type::kTypeRelative, target_dex_file); patch.type_idx_ = target_type_idx; patch.pc_insn_offset_ = pc_insn_offset; return patch; } static LinkerPatch TypeBssEntryPatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t pc_insn_offset, uint32_t target_type_idx) { LinkerPatch patch(literal_offset, Type::kTypeBssEntry, target_dex_file); patch.type_idx_ = target_type_idx; patch.pc_insn_offset_ = pc_insn_offset; return patch; } static LinkerPatch RelativeStringPatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t pc_insn_offset, uint32_t target_string_idx) { LinkerPatch patch(literal_offset, Type::kStringRelative, target_dex_file); patch.string_idx_ = target_string_idx; patch.pc_insn_offset_ = pc_insn_offset; return patch; } static LinkerPatch StringBssEntryPatch(size_t literal_offset, const DexFile* target_dex_file, uint32_t pc_insn_offset, uint32_t target_string_idx) { LinkerPatch patch(literal_offset, Type::kStringBssEntry, target_dex_file); patch.string_idx_ = target_string_idx; patch.pc_insn_offset_ = pc_insn_offset; return patch; } static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset, uint32_t custom_value1 = 0u, uint32_t custom_value2 = 0u) { LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, nullptr); patch.baker_custom_value1_ = custom_value1; patch.baker_custom_value2_ = custom_value2; return patch; } LinkerPatch(const LinkerPatch& other) = default; LinkerPatch& operator=(const LinkerPatch& other) = default; size_t LiteralOffset() const { return literal_offset_; } Type GetType() const { return patch_type_; } bool IsPcRelative() const { switch (GetType()) { case Type::kMethodRelative: case Type::kMethodBssEntry: case Type::kCallRelative: case Type::kTypeRelative: case Type::kTypeBssEntry: case Type::kStringRelative: case Type::kStringBssEntry: case Type::kBakerReadBarrierBranch: return true; default: return false; } } MethodReference TargetMethod() const { DCHECK(patch_type_ == Type::kMethodRelative || patch_type_ == Type::kMethodBssEntry || patch_type_ == Type::kCall || patch_type_ == Type::kCallRelative); return MethodReference(target_dex_file_, method_idx_); } const DexFile* TargetTypeDexFile() const { DCHECK(patch_type_ == Type::kTypeRelative || patch_type_ == Type::kTypeBssEntry); return target_dex_file_; } dex::TypeIndex TargetTypeIndex() const { DCHECK(patch_type_ == Type::kTypeRelative || patch_type_ == Type::kTypeBssEntry); return dex::TypeIndex(type_idx_); } const DexFile* TargetStringDexFile() const { DCHECK(patch_type_ == Type::kStringRelative || patch_type_ == Type::kStringBssEntry); return target_dex_file_; } dex::StringIndex TargetStringIndex() const { DCHECK(patch_type_ == Type::kStringRelative || patch_type_ == Type::kStringBssEntry); return dex::StringIndex(string_idx_); } uint32_t PcInsnOffset() const { DCHECK(patch_type_ == Type::kMethodRelative || patch_type_ == Type::kMethodBssEntry || patch_type_ == Type::kTypeRelative || patch_type_ == Type::kTypeBssEntry || patch_type_ == Type::kStringRelative || patch_type_ == Type::kStringBssEntry); return pc_insn_offset_; } uint32_t GetBakerCustomValue1() const { DCHECK(patch_type_ == Type::kBakerReadBarrierBranch); return baker_custom_value1_; } uint32_t GetBakerCustomValue2() const { DCHECK(patch_type_ == Type::kBakerReadBarrierBranch); return baker_custom_value2_; } private: LinkerPatch(size_t literal_offset, Type patch_type, const DexFile* target_dex_file) : target_dex_file_(target_dex_file), literal_offset_(literal_offset), patch_type_(patch_type) { cmp1_ = 0u; cmp2_ = 0u; // The compiler rejects methods that are too big, so the compiled code // of a single method really shouln't be anywhere close to 16MiB. DCHECK(IsUint<24>(literal_offset)); } const DexFile* target_dex_file_; // TODO: Clean up naming. Some patched locations are literals but others are not. uint32_t literal_offset_ : 24; // Method code size up to 16MiB. Type patch_type_ : 8; union { uint32_t cmp1_; // Used for relational operators. uint32_t method_idx_; // Method index for Call/Method patches. uint32_t type_idx_; // Type index for Type patches. uint32_t string_idx_; // String index for String patches. uint32_t baker_custom_value1_; static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators"); static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators"); static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators"); static_assert(sizeof(baker_custom_value1_) == sizeof(cmp1_), "needed by relational operators"); }; union { // Note: To avoid uninitialized padding on 64-bit systems, we use `size_t` for `cmp2_`. // This allows a hashing function to treat an array of linker patches as raw memory. size_t cmp2_; // Used for relational operators. // Literal offset of the insn loading PC (same as literal_offset if it's the same insn, // may be different if the PC-relative addressing needs multiple insns). uint32_t pc_insn_offset_; uint32_t baker_custom_value2_; static_assert(sizeof(pc_insn_offset_) <= sizeof(cmp2_), "needed by relational operators"); static_assert(sizeof(baker_custom_value2_) <= sizeof(cmp2_), "needed by relational operators"); }; friend bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs); friend bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs); }; std::ostream& operator<<(std::ostream& os, const LinkerPatch::Type& type); inline bool operator==(const LinkerPatch& lhs, const LinkerPatch& rhs) { return lhs.literal_offset_ == rhs.literal_offset_ && lhs.patch_type_ == rhs.patch_type_ && lhs.target_dex_file_ == rhs.target_dex_file_ && lhs.cmp1_ == rhs.cmp1_ && lhs.cmp2_ == rhs.cmp2_; } inline bool operator<(const LinkerPatch& lhs, const LinkerPatch& rhs) { return (lhs.literal_offset_ != rhs.literal_offset_) ? lhs.literal_offset_ < rhs.literal_offset_ : (lhs.patch_type_ != rhs.patch_type_) ? lhs.patch_type_ < rhs.patch_type_ : (lhs.target_dex_file_ != rhs.target_dex_file_) ? lhs.target_dex_file_ < rhs.target_dex_file_ : (lhs.cmp1_ != rhs.cmp1_) ? lhs.cmp1_ < rhs.cmp1_ : lhs.cmp2_ < rhs.cmp2_; } class CompiledMethod FINAL : public CompiledCode { public: // Constructs a CompiledMethod. // Note: Consider using the static allocation methods below that will allocate the CompiledMethod // in the swap space. CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const ArrayRef& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef& method_info, const ArrayRef& vmap_table, const ArrayRef& cfi_info, const ArrayRef& patches); virtual ~CompiledMethod(); static CompiledMethod* SwapAllocCompiledMethod( CompilerDriver* driver, InstructionSet instruction_set, const ArrayRef& quick_code, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, const ArrayRef& method_info, const ArrayRef& vmap_table, const ArrayRef& cfi_info, const ArrayRef& patches); static void ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m); size_t GetFrameSizeInBytes() const { return frame_size_in_bytes_; } uint32_t GetCoreSpillMask() const { return core_spill_mask_; } uint32_t GetFpSpillMask() const { return fp_spill_mask_; } ArrayRef GetMethodInfo() const { return GetArray(method_info_); } ArrayRef GetVmapTable() const { return GetArray(vmap_table_); } ArrayRef GetCFIInfo() const { return GetArray(cfi_info_); } ArrayRef GetPatches() const { return GetArray(patches_); } private: // For quick code, the size of the activation used by the code. const size_t frame_size_in_bytes_; // For quick code, a bit mask describing spilled GPR callee-save registers. const uint32_t core_spill_mask_; // For quick code, a bit mask describing spilled FPR callee-save registers. const uint32_t fp_spill_mask_; // For quick code, method specific information that is not very dedupe friendly (method indices). const LengthPrefixedArray* const method_info_; // For quick code, holds code infos which contain stack maps, inline information, and etc. const LengthPrefixedArray* const vmap_table_; // For quick code, a FDE entry for the debug_frame section. const LengthPrefixedArray* const cfi_info_; // For quick code, linker patches needed by the method. const LengthPrefixedArray* const patches_; }; } // namespace art #endif // ART_COMPILER_COMPILED_METHOD_H_ android-platform-art-8.1.0+r23/compiler/compiled_method_test.cc000066400000000000000000000243321336577252300245050ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "compiled_method.h" namespace art { TEST(CompiledMethod, SrcMapElemOperators) { SrcMapElem elems[] = { { 1u, -1 }, { 1u, 0 }, { 1u, 1 }, { 2u, -1 }, { 2u, 0 }, // Index 4. { 2u, 1 }, { 2u, 0u }, // Index 6: Arbitrarily add identical SrcMapElem with index 4. }; for (size_t i = 0; i != arraysize(elems); ++i) { for (size_t j = 0; j != arraysize(elems); ++j) { bool expected = (i != 6u ? i : 4u) == (j != 6u ? j : 4u); EXPECT_EQ(expected, elems[i] == elems[j]) << i << " " << j; } } for (size_t i = 0; i != arraysize(elems); ++i) { for (size_t j = 0; j != arraysize(elems); ++j) { bool expected = (i != 6u ? i : 4u) < (j != 6u ? j : 4u); EXPECT_EQ(expected, elems[i] < elems[j]) << i << " " << j; } } } TEST(CompiledMethod, LinkerPatchOperators) { const DexFile* dex_file1 = reinterpret_cast(1); const DexFile* dex_file2 = reinterpret_cast(2); LinkerPatch patches[] = { LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1000u), LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1000u), LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1001u), LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Index 3. LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1000u), LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1000u), LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1001u), LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1001u), LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3000u, 1000u), LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3001u, 1000u), LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3000u, 1001u), LinkerPatch::MethodBssEntryPatch(16u, dex_file1, 3001u, 1001u), LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1000u), LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1000u), LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1001u), LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1001u), LinkerPatch::CodePatch(16u, dex_file1, 1000u), LinkerPatch::CodePatch(16u, dex_file1, 1001u), LinkerPatch::CodePatch(16u, dex_file2, 1000u), LinkerPatch::CodePatch(16u, dex_file2, 1001u), LinkerPatch::RelativeCodePatch(16u, dex_file1, 1000u), LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u), LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u), LinkerPatch::RelativeCodePatch(16u, dex_file2, 1001u), LinkerPatch::RelativeTypePatch(16u, dex_file1, 3000u, 1000u), LinkerPatch::RelativeTypePatch(16u, dex_file1, 3001u, 1000u), LinkerPatch::RelativeTypePatch(16u, dex_file1, 3000u, 1001u), LinkerPatch::RelativeTypePatch(16u, dex_file1, 3001u, 1001u), LinkerPatch::RelativeTypePatch(16u, dex_file2, 3000u, 1000u), LinkerPatch::RelativeTypePatch(16u, dex_file2, 3001u, 1000u), LinkerPatch::RelativeTypePatch(16u, dex_file2, 3000u, 1001u), LinkerPatch::RelativeTypePatch(16u, dex_file2, 3001u, 1001u), LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3000u, 1000u), LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3001u, 1000u), LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3000u, 1001u), LinkerPatch::TypeBssEntryPatch(16u, dex_file1, 3001u, 1001u), LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3000u, 1000u), LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3001u, 1000u), LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3000u, 1001u), LinkerPatch::TypeBssEntryPatch(16u, dex_file2, 3001u, 1001u), LinkerPatch::RelativeStringPatch(16u, dex_file1, 3000u, 1000u), LinkerPatch::RelativeStringPatch(16u, dex_file1, 3001u, 1000u), LinkerPatch::RelativeStringPatch(16u, dex_file1, 3000u, 1001u), LinkerPatch::RelativeStringPatch(16u, dex_file1, 3001u, 1001u), LinkerPatch::RelativeStringPatch(16u, dex_file2, 3000u, 1000u), LinkerPatch::RelativeStringPatch(16u, dex_file2, 3001u, 1000u), LinkerPatch::RelativeStringPatch(16u, dex_file2, 3000u, 1001u), LinkerPatch::RelativeStringPatch(16u, dex_file2, 3001u, 1001u), LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3000u, 1000u), LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3001u, 1000u), LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3000u, 1001u), LinkerPatch::StringBssEntryPatch(16u, dex_file1, 3001u, 1001u), LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1000u), LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1000u), LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3000u, 1001u), LinkerPatch::StringBssEntryPatch(16u, dex_file2, 3001u, 1001u), LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 0u), LinkerPatch::BakerReadBarrierBranchPatch(16u, 0u, 1u), LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 0u), LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 1u), LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1000u), LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1000u), LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1001u), LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1001u), LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1000u), LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1000u), LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3000u, 1001u), LinkerPatch::RelativeMethodPatch(32u, dex_file2, 3001u, 1001u), LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3000u, 1000u), LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3001u, 1000u), LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3000u, 1001u), LinkerPatch::MethodBssEntryPatch(32u, dex_file1, 3001u, 1001u), LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1000u), LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1000u), LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1001u), LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1001u), LinkerPatch::CodePatch(32u, dex_file1, 1000u), LinkerPatch::CodePatch(32u, dex_file1, 1001u), LinkerPatch::CodePatch(32u, dex_file2, 1000u), LinkerPatch::CodePatch(32u, dex_file2, 1001u), LinkerPatch::RelativeCodePatch(32u, dex_file1, 1000u), LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u), LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u), LinkerPatch::RelativeCodePatch(32u, dex_file2, 1001u), LinkerPatch::RelativeTypePatch(32u, dex_file1, 3000u, 1000u), LinkerPatch::RelativeTypePatch(32u, dex_file1, 3001u, 1000u), LinkerPatch::RelativeTypePatch(32u, dex_file1, 3000u, 1001u), LinkerPatch::RelativeTypePatch(32u, dex_file1, 3001u, 1001u), LinkerPatch::RelativeTypePatch(32u, dex_file2, 3000u, 1000u), LinkerPatch::RelativeTypePatch(32u, dex_file2, 3001u, 1000u), LinkerPatch::RelativeTypePatch(32u, dex_file2, 3000u, 1001u), LinkerPatch::RelativeTypePatch(32u, dex_file2, 3001u, 1001u), LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3000u, 1000u), LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3001u, 1000u), LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3000u, 1001u), LinkerPatch::TypeBssEntryPatch(32u, dex_file1, 3001u, 1001u), LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3000u, 1000u), LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3001u, 1000u), LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3000u, 1001u), LinkerPatch::TypeBssEntryPatch(32u, dex_file2, 3001u, 1001u), LinkerPatch::RelativeStringPatch(32u, dex_file1, 3000u, 1000u), LinkerPatch::RelativeStringPatch(32u, dex_file1, 3001u, 1000u), LinkerPatch::RelativeStringPatch(32u, dex_file1, 3000u, 1001u), LinkerPatch::RelativeStringPatch(32u, dex_file1, 3001u, 1001u), LinkerPatch::RelativeStringPatch(32u, dex_file2, 3000u, 1000u), LinkerPatch::RelativeStringPatch(32u, dex_file2, 3001u, 1000u), LinkerPatch::RelativeStringPatch(32u, dex_file2, 3000u, 1001u), LinkerPatch::RelativeStringPatch(32u, dex_file2, 3001u, 1001u), LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3000u, 1000u), LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3001u, 1000u), LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3000u, 1001u), LinkerPatch::StringBssEntryPatch(32u, dex_file1, 3001u, 1001u), LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1000u), LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1000u), LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3000u, 1001u), LinkerPatch::StringBssEntryPatch(32u, dex_file2, 3001u, 1001u), LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 0u), LinkerPatch::BakerReadBarrierBranchPatch(32u, 0u, 1u), LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 0u), LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 1u), LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Same as patch at index 3. }; constexpr size_t last_index = arraysize(patches) - 1u; for (size_t i = 0; i != arraysize(patches); ++i) { for (size_t j = 0; j != arraysize(patches); ++j) { bool expected = (i != last_index ? i : 3u) == (j != last_index ? j : 3u); EXPECT_EQ(expected, patches[i] == patches[j]) << i << " " << j; } } for (size_t i = 0; i != arraysize(patches); ++i) { for (size_t j = 0; j != arraysize(patches); ++j) { bool expected = (i != last_index ? i : 3u) < (j != last_index ? j : 3u); EXPECT_EQ(expected, patches[i] < patches[j]) << i << " " << j; } } } } // namespace art android-platform-art-8.1.0+r23/compiler/compiler.cc000066400000000000000000000040661336577252300221260ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compiler.h" #include "base/logging.h" #include "driver/compiler_driver.h" #include "optimizing/optimizing_compiler.h" #include "utils.h" namespace art { Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) { switch (kind) { case kQuick: // TODO: Remove Quick in options. case kOptimizing: return CreateOptimizingCompiler(driver); default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE(); } } bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item, uint32_t method_idx, const DexFile& dex_file) { /* * Skip compilation for pathologically large methods - either by instruction count or num vregs. * Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter * of that, which also guarantees we cannot overflow our 16-bit internal Quick SSA name space. */ if (code_item.insns_size_in_code_units_ >= UINT16_MAX / 4) { LOG(INFO) << "Method exceeds compiler instruction limit: " << code_item.insns_size_in_code_units_ << " in " << dex_file.PrettyMethod(method_idx); return true; } if (code_item.registers_size_ >= UINT16_MAX / 4) { LOG(INFO) << "Method exceeds compiler virtual register limit: " << code_item.registers_size_ << " in " << dex_file.PrettyMethod(method_idx); return true; } return false; } } // namespace art android-platform-art-8.1.0+r23/compiler/compiler.h000066400000000000000000000100231336577252300217560ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_COMPILER_H_ #define ART_COMPILER_COMPILER_H_ #include "dex_file.h" #include "base/mutex.h" #include "os.h" namespace art { namespace jit { class JitCodeCache; class JitLogger; } // namespace jit namespace mirror { class ClassLoader; class DexCache; } // namespace mirror class ArtMethod; class CompilerDriver; class CompiledMethod; template class Handle; class OatWriter; class Thread; class Compiler { public: enum Kind { kQuick, kOptimizing }; enum JniOptimizationFlags { kNone = 0x0, kFastNative = 0x1, kCriticalNative = 0x2, }; static Compiler* Create(CompilerDriver* driver, Kind kind); virtual void Init() = 0; virtual void UnInit() const = 0; virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const = 0; virtual CompiledMethod* Compile(const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, Handle class_loader, const DexFile& dex_file, Handle dex_cache) const = 0; virtual CompiledMethod* JniCompile(uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file, JniOptimizationFlags optimization_flags) const = 0; virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED, jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, bool osr ATTRIBUTE_UNUSED, jit::JitLogger* jit_logger ATTRIBUTE_UNUSED) REQUIRES_SHARED(Locks::mutator_lock_) { return false; } virtual uintptr_t GetEntryPointOf(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) = 0; uint64_t GetMaximumCompilationTimeBeforeWarning() const { return maximum_compilation_time_before_warning_; } virtual ~Compiler() {} /* * @brief Generate and return Dwarf CFI initialization, if supported by the * backend. * @param driver CompilerDriver for this compile. * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF * information. * @note This is used for backtrace information in generated code. */ virtual std::vector* GetCallFrameInformationInitialization( const CompilerDriver& driver ATTRIBUTE_UNUSED) const { return nullptr; } // Returns whether the method to compile is such a pathological case that // it's not worth compiling. static bool IsPathologicalCase(const DexFile::CodeItem& code_item, uint32_t method_idx, const DexFile& dex_file); protected: Compiler(CompilerDriver* driver, uint64_t warning) : driver_(driver), maximum_compilation_time_before_warning_(warning) { } CompilerDriver* GetCompilerDriver() const { return driver_; } private: CompilerDriver* const driver_; const uint64_t maximum_compilation_time_before_warning_; DISALLOW_COPY_AND_ASSIGN(Compiler); }; } // namespace art #endif // ART_COMPILER_COMPILER_H_ android-platform-art-8.1.0+r23/compiler/debug/000077500000000000000000000000001336577252300210655ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/debug/dwarf/000077500000000000000000000000001336577252300221705ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/debug/dwarf/debug_abbrev_writer.h000066400000000000000000000064441336577252300263540ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_ #define ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_ #include #include #include #include "base/casts.h" #include "base/stl_util.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/writer.h" #include "leb128.h" namespace art { namespace dwarf { // Writer for the .debug_abbrev. // // Abbreviations specify the format of entries in .debug_info. // Each entry specifies abbreviation code, which in turns // determines all the attributes and their format. // It is possible to think of them as type definitions. template > class DebugAbbrevWriter FINAL : private Writer { static_assert(std::is_same::value, "Invalid value type"); public: explicit DebugAbbrevWriter(Vector* buffer) : Writer(buffer), current_abbrev_(buffer->get_allocator()) { this->PushUint8(0); // Add abbrev table terminator. } // Start abbreviation declaration. void StartAbbrev(Tag tag) { DCHECK(current_abbrev_.empty()); EncodeUnsignedLeb128(¤t_abbrev_, tag); has_children_offset_ = current_abbrev_.size(); current_abbrev_.push_back(0); // Place-holder for DW_CHILDREN. } // Add attribute specification. void AddAbbrevAttribute(Attribute name, Form type) { EncodeUnsignedLeb128(¤t_abbrev_, name); EncodeUnsignedLeb128(¤t_abbrev_, type); } // End abbreviation declaration and return its code. // This will deduplicate abbreviations. uint32_t EndAbbrev(Children has_children) { DCHECK(!current_abbrev_.empty()); current_abbrev_[has_children_offset_] = has_children; auto it = abbrev_codes_.insert(std::make_pair(std::move(current_abbrev_), NextAbbrevCode())); uint32_t abbrev_code = it.first->second; if (UNLIKELY(it.second)) { // Inserted new entry. const Vector& abbrev = it.first->first; this->Pop(); // Remove abbrev table terminator. this->PushUleb128(abbrev_code); this->PushData(abbrev.data(), abbrev.size()); this->PushUint8(0); // Attribute list end. this->PushUint8(0); // Attribute list end. this->PushUint8(0); // Add abbrev table terminator. } current_abbrev_.clear(); return abbrev_code; } // Get the next free abbrev code. uint32_t NextAbbrevCode() { return dchecked_integral_cast(1 + abbrev_codes_.size()); } private: Vector current_abbrev_; size_t has_children_offset_ = 0; std::unordered_map > abbrev_codes_; }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_DEBUG_ABBREV_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/debug_frame_opcode_writer.h000066400000000000000000000247251336577252300275400ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_ #define ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_ #include "base/bit_utils.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/register.h" #include "debug/dwarf/writer.h" namespace art { namespace dwarf { // Writer for .debug_frame opcodes (DWARF-3). // See the DWARF specification for the precise meaning of the opcodes. // The writer is very light-weight, however it will do the following for you: // * Choose the most compact encoding of a given opcode. // * Keep track of current state and convert absolute values to deltas. // * Divide by header-defined factors as appropriate. template > class DebugFrameOpCodeWriter : private Writer { static_assert(std::is_same::value, "Invalid value type"); public: // To save space, DWARF divides most offsets by header-defined factors. // They are used in integer divisions, so we make them constants. // We usually subtract from stack base pointer, so making the factor // negative makes the encoded values positive and thus easier to encode. static constexpr int kDataAlignmentFactor = -4; static constexpr int kCodeAlignmentFactor = 1; // Explicitely advance the program counter to given location. void ALWAYS_INLINE AdvancePC(int absolute_pc) { DCHECK_GE(absolute_pc, current_pc_); if (UNLIKELY(enabled_)) { int delta = FactorCodeOffset(absolute_pc - current_pc_); if (delta != 0) { if (delta <= 0x3F) { this->PushUint8(DW_CFA_advance_loc | delta); } else if (delta <= UINT8_MAX) { this->PushUint8(DW_CFA_advance_loc1); this->PushUint8(delta); } else if (delta <= UINT16_MAX) { this->PushUint8(DW_CFA_advance_loc2); this->PushUint16(delta); } else { this->PushUint8(DW_CFA_advance_loc4); this->PushUint32(delta); } } current_pc_ = absolute_pc; } } // Override this method to automatically advance the PC before each opcode. virtual void ImplicitlyAdvancePC() { } // Common alias in assemblers - spill relative to current stack pointer. void ALWAYS_INLINE RelOffset(Reg reg, int offset) { Offset(reg, offset - current_cfa_offset_); } // Common alias in assemblers - increase stack frame size. void ALWAYS_INLINE AdjustCFAOffset(int delta) { DefCFAOffset(current_cfa_offset_ + delta); } // Custom alias - spill many registers based on bitmask. void ALWAYS_INLINE RelOffsetForMany(Reg reg_base, int offset, uint32_t reg_mask, int reg_size) { DCHECK(reg_size == 4 || reg_size == 8); if (UNLIKELY(enabled_)) { for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) { // Skip zero bits and go to the set bit. int num_zeros = CTZ(reg_mask); i += num_zeros; reg_mask >>= num_zeros; RelOffset(Reg(reg_base.num() + i), offset); offset += reg_size; } } } // Custom alias - unspill many registers based on bitmask. void ALWAYS_INLINE RestoreMany(Reg reg_base, uint32_t reg_mask) { if (UNLIKELY(enabled_)) { for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) { // Skip zero bits and go to the set bit. int num_zeros = CTZ(reg_mask); i += num_zeros; reg_mask >>= num_zeros; Restore(Reg(reg_base.num() + i)); } } } void ALWAYS_INLINE Nop() { if (UNLIKELY(enabled_)) { this->PushUint8(DW_CFA_nop); } } void ALWAYS_INLINE Offset(Reg reg, int offset) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); int factored_offset = FactorDataOffset(offset); // May change sign. if (factored_offset >= 0) { if (0 <= reg.num() && reg.num() <= 0x3F) { this->PushUint8(DW_CFA_offset | reg.num()); this->PushUleb128(factored_offset); } else { this->PushUint8(DW_CFA_offset_extended); this->PushUleb128(reg.num()); this->PushUleb128(factored_offset); } } else { uses_dwarf3_features_ = true; this->PushUint8(DW_CFA_offset_extended_sf); this->PushUleb128(reg.num()); this->PushSleb128(factored_offset); } } } void ALWAYS_INLINE Restore(Reg reg) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); if (0 <= reg.num() && reg.num() <= 0x3F) { this->PushUint8(DW_CFA_restore | reg.num()); } else { this->PushUint8(DW_CFA_restore_extended); this->PushUleb128(reg.num()); } } } void ALWAYS_INLINE Undefined(Reg reg) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); this->PushUint8(DW_CFA_undefined); this->PushUleb128(reg.num()); } } void ALWAYS_INLINE SameValue(Reg reg) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); this->PushUint8(DW_CFA_same_value); this->PushUleb128(reg.num()); } } // The previous value of "reg" is stored in register "new_reg". void ALWAYS_INLINE Register(Reg reg, Reg new_reg) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); this->PushUint8(DW_CFA_register); this->PushUleb128(reg.num()); this->PushUleb128(new_reg.num()); } } void ALWAYS_INLINE RememberState() { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); this->PushUint8(DW_CFA_remember_state); } } void ALWAYS_INLINE RestoreState() { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); this->PushUint8(DW_CFA_restore_state); } } void ALWAYS_INLINE DefCFA(Reg reg, int offset) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); if (offset >= 0) { this->PushUint8(DW_CFA_def_cfa); this->PushUleb128(reg.num()); this->PushUleb128(offset); // Non-factored. } else { uses_dwarf3_features_ = true; this->PushUint8(DW_CFA_def_cfa_sf); this->PushUleb128(reg.num()); this->PushSleb128(FactorDataOffset(offset)); } } current_cfa_offset_ = offset; } void ALWAYS_INLINE DefCFARegister(Reg reg) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); this->PushUint8(DW_CFA_def_cfa_register); this->PushUleb128(reg.num()); } } void ALWAYS_INLINE DefCFAOffset(int offset) { if (UNLIKELY(enabled_)) { if (current_cfa_offset_ != offset) { ImplicitlyAdvancePC(); if (offset >= 0) { this->PushUint8(DW_CFA_def_cfa_offset); this->PushUleb128(offset); // Non-factored. } else { uses_dwarf3_features_ = true; this->PushUint8(DW_CFA_def_cfa_offset_sf); this->PushSleb128(FactorDataOffset(offset)); } } } // Uncoditional so that the user can still get and check the value. current_cfa_offset_ = offset; } void ALWAYS_INLINE ValOffset(Reg reg, int offset) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); uses_dwarf3_features_ = true; int factored_offset = FactorDataOffset(offset); // May change sign. if (factored_offset >= 0) { this->PushUint8(DW_CFA_val_offset); this->PushUleb128(reg.num()); this->PushUleb128(factored_offset); } else { this->PushUint8(DW_CFA_val_offset_sf); this->PushUleb128(reg.num()); this->PushSleb128(factored_offset); } } } void ALWAYS_INLINE DefCFAExpression(uint8_t* expr, int expr_size) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); uses_dwarf3_features_ = true; this->PushUint8(DW_CFA_def_cfa_expression); this->PushUleb128(expr_size); this->PushData(expr, expr_size); } } void ALWAYS_INLINE Expression(Reg reg, uint8_t* expr, int expr_size) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); uses_dwarf3_features_ = true; this->PushUint8(DW_CFA_expression); this->PushUleb128(reg.num()); this->PushUleb128(expr_size); this->PushData(expr, expr_size); } } void ALWAYS_INLINE ValExpression(Reg reg, uint8_t* expr, int expr_size) { if (UNLIKELY(enabled_)) { ImplicitlyAdvancePC(); uses_dwarf3_features_ = true; this->PushUint8(DW_CFA_val_expression); this->PushUleb128(reg.num()); this->PushUleb128(expr_size); this->PushData(expr, expr_size); } } bool IsEnabled() const { return enabled_; } void SetEnabled(bool value) { enabled_ = value; if (enabled_ && opcodes_.capacity() == 0u) { opcodes_.reserve(kDefaultCapacity); } } int GetCurrentPC() const { return current_pc_; } int GetCurrentCFAOffset() const { return current_cfa_offset_; } void SetCurrentCFAOffset(int offset) { current_cfa_offset_ = offset; } using Writer::data; explicit DebugFrameOpCodeWriter(bool enabled = true, const typename Vector::allocator_type& alloc = typename Vector::allocator_type()) : Writer(&opcodes_), enabled_(false), opcodes_(alloc), current_cfa_offset_(0), current_pc_(0), uses_dwarf3_features_(false) { SetEnabled(enabled); } virtual ~DebugFrameOpCodeWriter() { } protected: // Best guess based on couple of observed outputs. static constexpr size_t kDefaultCapacity = 32u; int FactorDataOffset(int offset) const { DCHECK_EQ(offset % kDataAlignmentFactor, 0); return offset / kDataAlignmentFactor; } int FactorCodeOffset(int offset) const { DCHECK_EQ(offset % kCodeAlignmentFactor, 0); return offset / kCodeAlignmentFactor; } bool enabled_; // If disabled all writes are no-ops. Vector opcodes_; int current_cfa_offset_; int current_pc_; bool uses_dwarf3_features_; private: DISALLOW_COPY_AND_ASSIGN(DebugFrameOpCodeWriter); }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/debug_info_entry_writer.h000066400000000000000000000161671336577252300272720ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_ #define ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_ #include #include #include "base/casts.h" #include "debug/dwarf/debug_abbrev_writer.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/expression.h" #include "debug/dwarf/writer.h" #include "leb128.h" namespace art { namespace dwarf { /* * Writer for debug information entries (DIE). * * Usage: * StartTag(DW_TAG_compile_unit); * WriteStrp(DW_AT_producer, "Compiler name", debug_str); * StartTag(DW_TAG_subprogram); * WriteStrp(DW_AT_name, "Foo", debug_str); * EndTag(); * EndTag(); */ template > class DebugInfoEntryWriter FINAL : private Writer { static_assert(std::is_same::value, "Invalid value type"); public: static constexpr size_t kCompilationUnitHeaderSize = 11; // Start debugging information entry. // Returns offset of the entry in compilation unit. size_t StartTag(Tag tag) { if (inside_entry_) { // Write abbrev code for the previous entry. // Parent entry is finalized before any children are written. this->UpdateUleb128(abbrev_code_offset_, debug_abbrev_->EndAbbrev(DW_CHILDREN_yes)); inside_entry_ = false; } debug_abbrev_->StartAbbrev(tag); // Abbrev code placeholder of sufficient size. abbrev_code_offset_ = this->data()->size(); this->PushUleb128(debug_abbrev_->NextAbbrevCode()); depth_++; inside_entry_ = true; return abbrev_code_offset_ + kCompilationUnitHeaderSize; } // End debugging information entry. void EndTag() { DCHECK_GT(depth_, 0); if (inside_entry_) { // Write abbrev code for this entry. this->UpdateUleb128(abbrev_code_offset_, debug_abbrev_->EndAbbrev(DW_CHILDREN_no)); inside_entry_ = false; // This entry has no children and so there is no terminator. } else { // The entry has been already finalized so it must be parent entry // and we need to write the terminator required by DW_CHILDREN_yes. this->PushUint8(0); } depth_--; } void WriteAddr(Attribute attrib, uint64_t value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_addr); patch_locations_.push_back(this->data()->size()); if (is64bit_) { this->PushUint64(value); } else { this->PushUint32(value); } } void WriteBlock(Attribute attrib, const uint8_t* ptr, size_t num_bytes) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_block); this->PushUleb128(num_bytes); this->PushData(ptr, num_bytes); } void WriteExprLoc(Attribute attrib, const Expression& expr) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_exprloc); this->PushUleb128(dchecked_integral_cast(expr.size())); this->PushData(expr.data()); } void WriteData1(Attribute attrib, uint8_t value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data1); this->PushUint8(value); } void WriteData2(Attribute attrib, uint16_t value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data2); this->PushUint16(value); } void WriteData4(Attribute attrib, uint32_t value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data4); this->PushUint32(value); } void WriteData8(Attribute attrib, uint64_t value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_data8); this->PushUint64(value); } void WriteSecOffset(Attribute attrib, uint32_t offset) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_sec_offset); this->PushUint32(offset); } void WriteSdata(Attribute attrib, int value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_sdata); this->PushSleb128(value); } void WriteUdata(Attribute attrib, int value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_udata); this->PushUleb128(value); } void WriteUdata(Attribute attrib, uint32_t value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_udata); this->PushUleb128(value); } void WriteFlag(Attribute attrib, bool value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_flag); this->PushUint8(value ? 1 : 0); } void WriteFlagPresent(Attribute attrib) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_flag_present); } void WriteRef4(Attribute attrib, uint32_t cu_offset) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_ref4); this->PushUint32(cu_offset); } void WriteRef(Attribute attrib, uint32_t cu_offset) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_ref_udata); this->PushUleb128(cu_offset); } void WriteString(Attribute attrib, const char* value) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_string); this->PushString(value); } void WriteStrp(Attribute attrib, size_t debug_str_offset) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_strp); this->PushUint32(dchecked_integral_cast(debug_str_offset)); } void WriteStrp(Attribute attrib, const char* str, size_t len, std::vector* debug_str) { debug_abbrev_->AddAbbrevAttribute(attrib, DW_FORM_strp); this->PushUint32(debug_str->size()); debug_str->insert(debug_str->end(), str, str + len); debug_str->push_back(0); } void WriteStrp(Attribute attrib, const char* str, std::vector* debug_str) { WriteStrp(attrib, str, strlen(str), debug_str); } bool Is64bit() const { return is64bit_; } const std::vector& GetPatchLocations() const { return patch_locations_; } int Depth() const { return depth_; } using Writer::data; using Writer::size; using Writer::UpdateUint32; DebugInfoEntryWriter(bool is64bitArch, DebugAbbrevWriter* debug_abbrev, const typename Vector::allocator_type& alloc = typename Vector::allocator_type()) : Writer(&entries_), debug_abbrev_(debug_abbrev), entries_(alloc), is64bit_(is64bitArch) { } ~DebugInfoEntryWriter() { DCHECK(!inside_entry_); DCHECK_EQ(depth_, 0); } private: DebugAbbrevWriter* debug_abbrev_; Vector entries_; bool is64bit_; int depth_ = 0; size_t abbrev_code_offset_ = 0; // Location to patch once we know the code. bool inside_entry_ = false; // Entry ends at first child (if any). std::vector patch_locations_; }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_DEBUG_INFO_ENTRY_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/debug_line_opcode_writer.h000066400000000000000000000175541336577252300273770ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_ #define ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_ #include #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/writer.h" namespace art { namespace dwarf { // Writer for the .debug_line opcodes (DWARF-3). // The writer is very light-weight, however it will do the following for you: // * Choose the most compact encoding of a given opcode. // * Keep track of current state and convert absolute values to deltas. // * Divide by header-defined factors as appropriate. template> class DebugLineOpCodeWriter FINAL : private Writer { static_assert(std::is_same::value, "Invalid value type"); public: static constexpr int kOpcodeBase = 13; static constexpr bool kDefaultIsStmt = false; static constexpr int kLineBase = -5; static constexpr int kLineRange = 14; void AddRow() { this->PushUint8(DW_LNS_copy); } void AdvancePC(uint64_t absolute_address) { DCHECK_NE(current_address_, 0u); // Use SetAddress for the first advance. DCHECK_GE(absolute_address, current_address_); if (absolute_address != current_address_) { uint64_t delta = FactorCodeOffset(absolute_address - current_address_); if (delta <= INT32_MAX) { this->PushUint8(DW_LNS_advance_pc); this->PushUleb128(static_cast(delta)); current_address_ = absolute_address; } else { SetAddress(absolute_address); } } } void AdvanceLine(int absolute_line) { int delta = absolute_line - current_line_; if (delta != 0) { this->PushUint8(DW_LNS_advance_line); this->PushSleb128(delta); current_line_ = absolute_line; } } void SetFile(int file) { if (current_file_ != file) { this->PushUint8(DW_LNS_set_file); this->PushUleb128(file); current_file_ = file; } } void SetColumn(int column) { this->PushUint8(DW_LNS_set_column); this->PushUleb128(column); } void SetIsStmt(bool is_stmt) { if (is_stmt_ != is_stmt) { this->PushUint8(DW_LNS_negate_stmt); is_stmt_ = is_stmt; } } void SetBasicBlock() { this->PushUint8(DW_LNS_set_basic_block); } void SetPrologueEnd() { uses_dwarf3_features_ = true; this->PushUint8(DW_LNS_set_prologue_end); } void SetEpilogueBegin() { uses_dwarf3_features_ = true; this->PushUint8(DW_LNS_set_epilogue_begin); } void SetISA(int isa) { uses_dwarf3_features_ = true; this->PushUint8(DW_LNS_set_isa); this->PushUleb128(isa); } void EndSequence() { this->PushUint8(0); this->PushUleb128(1); this->PushUint8(DW_LNE_end_sequence); current_address_ = 0; current_file_ = 1; current_line_ = 1; is_stmt_ = kDefaultIsStmt; } // Uncoditionally set address using the long encoding. // This gives the linker opportunity to relocate the address. void SetAddress(uint64_t absolute_address) { DCHECK_GE(absolute_address, current_address_); FactorCodeOffset(absolute_address); // Check if it is factorable. this->PushUint8(0); if (use_64bit_address_) { this->PushUleb128(1 + 8); this->PushUint8(DW_LNE_set_address); patch_locations_.push_back(this->data()->size()); this->PushUint64(absolute_address); } else { this->PushUleb128(1 + 4); this->PushUint8(DW_LNE_set_address); patch_locations_.push_back(this->data()->size()); this->PushUint32(absolute_address); } current_address_ = absolute_address; } void DefineFile(const char* filename, int directory_index, int modification_time, int file_size) { int size = 1 + strlen(filename) + 1 + UnsignedLeb128Size(directory_index) + UnsignedLeb128Size(modification_time) + UnsignedLeb128Size(file_size); this->PushUint8(0); this->PushUleb128(size); size_t start = data()->size(); this->PushUint8(DW_LNE_define_file); this->PushString(filename); this->PushUleb128(directory_index); this->PushUleb128(modification_time); this->PushUleb128(file_size); DCHECK_EQ(start + size, data()->size()); } // Compact address and line opcode. void AddRow(uint64_t absolute_address, int absolute_line) { DCHECK_GE(absolute_address, current_address_); // If the address is definitely too far, use the long encoding. uint64_t delta_address = FactorCodeOffset(absolute_address - current_address_); if (delta_address > UINT8_MAX) { AdvancePC(absolute_address); delta_address = 0; } // If the line is definitely too far, use the long encoding. int delta_line = absolute_line - current_line_; if (!(kLineBase <= delta_line && delta_line < kLineBase + kLineRange)) { AdvanceLine(absolute_line); delta_line = 0; } // Both address and line should be reasonable now. Use the short encoding. int opcode = kOpcodeBase + (delta_line - kLineBase) + (static_cast(delta_address) * kLineRange); if (opcode > UINT8_MAX) { // If the address is still too far, try to increment it by const amount. int const_advance = (0xFF - kOpcodeBase) / kLineRange; opcode -= (kLineRange * const_advance); if (opcode <= UINT8_MAX) { this->PushUint8(DW_LNS_const_add_pc); } else { // Give up and use long encoding for address. AdvancePC(absolute_address); // Still use the opcode to do line advance and copy. opcode = kOpcodeBase + (delta_line - kLineBase); } } DCHECK(kOpcodeBase <= opcode && opcode <= 0xFF); this->PushUint8(opcode); // Special opcode. current_line_ = absolute_line; current_address_ = absolute_address; } int GetCodeFactorBits() const { return code_factor_bits_; } uint64_t CurrentAddress() const { return current_address_; } int CurrentFile() const { return current_file_; } int CurrentLine() const { return current_line_; } const std::vector& GetPatchLocations() const { return patch_locations_; } using Writer::data; DebugLineOpCodeWriter(bool use64bitAddress, int codeFactorBits, const typename Vector::allocator_type& alloc = typename Vector::allocator_type()) : Writer(&opcodes_), opcodes_(alloc), uses_dwarf3_features_(false), use_64bit_address_(use64bitAddress), code_factor_bits_(codeFactorBits), current_address_(0), current_file_(1), current_line_(1), is_stmt_(kDefaultIsStmt) { } private: uint64_t FactorCodeOffset(uint64_t offset) const { DCHECK_GE(code_factor_bits_, 0); DCHECK_EQ((offset >> code_factor_bits_) << code_factor_bits_, offset); return offset >> code_factor_bits_; } Vector opcodes_; bool uses_dwarf3_features_; bool use_64bit_address_; int code_factor_bits_; uint64_t current_address_; int current_file_; int current_line_; bool is_stmt_; std::vector patch_locations_; DISALLOW_COPY_AND_ASSIGN(DebugLineOpCodeWriter); }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_DEBUG_LINE_OPCODE_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/dwarf_constants.h000066400000000000000000000433771336577252300255560ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_ #define ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_ namespace art { namespace dwarf { // Based on the Dwarf 4 specification at dwarfstd.com and issues marked // for inclusion in Dwarf 5 on same. Values not specified in the Dwarf 4 // standard might change or be removed in the future and may be different // than the values used currently by other implementations for the same trait, // use at your own risk. enum Tag { DW_TAG_array_type = 0x01, DW_TAG_class_type = 0x02, DW_TAG_entry_point = 0x03, DW_TAG_enumeration_type = 0x04, DW_TAG_formal_parameter = 0x05, DW_TAG_imported_declaration = 0x08, DW_TAG_label = 0x0a, DW_TAG_lexical_block = 0x0b, DW_TAG_member = 0x0d, DW_TAG_pointer_type = 0x0f, DW_TAG_reference_type = 0x10, DW_TAG_compile_unit = 0x11, DW_TAG_string_type = 0x12, DW_TAG_structure_type = 0x13, DW_TAG_subroutine_type = 0x15, DW_TAG_typedef = 0x16, DW_TAG_union_type = 0x17, DW_TAG_unspecified_parameters = 0x18, DW_TAG_variant = 0x19, DW_TAG_common_block = 0x1a, DW_TAG_common_inclusion = 0x1b, DW_TAG_inheritance = 0x1c, DW_TAG_inlined_subroutine = 0x1d, DW_TAG_module = 0x1e, DW_TAG_ptr_to_member_type = 0x1f, DW_TAG_set_type = 0x20, DW_TAG_subrange_type = 0x21, DW_TAG_with_stmt = 0x22, DW_TAG_access_declaration = 0x23, DW_TAG_base_type = 0x24, DW_TAG_catch_block = 0x25, DW_TAG_const_type = 0x26, DW_TAG_constant = 0x27, DW_TAG_enumerator = 0x28, DW_TAG_file_type = 0x29, DW_TAG_friend = 0x2a, DW_TAG_namelist = 0x2b, DW_TAG_namelist_item = 0x2c, DW_TAG_packed_type = 0x2d, DW_TAG_subprogram = 0x2e, DW_TAG_template_type_parameter = 0x2f, DW_TAG_template_value_parameter = 0x30, DW_TAG_thrown_type = 0x31, DW_TAG_try_block = 0x32, DW_TAG_variant_part = 0x33, DW_TAG_variable = 0x34, DW_TAG_volatile_type = 0x35, DW_TAG_dwarf_procedure = 0x36, DW_TAG_restrict_type = 0x37, DW_TAG_interface_type = 0x38, DW_TAG_namespace = 0x39, DW_TAG_imported_module = 0x3a, DW_TAG_unspecified_type = 0x3b, DW_TAG_partial_unit = 0x3c, DW_TAG_imported_unit = 0x3d, DW_TAG_condition = 0x3f, DW_TAG_shared_type = 0x40, DW_TAG_type_unit = 0x41, DW_TAG_rvalue_reference_type = 0x42, DW_TAG_template_alias = 0x43, #ifdef INCLUDE_DWARF5_VALUES // Values to be added in Dwarf 5. Final value not yet specified. Values listed // may be different than other implementations. Use with caution. // TODO Update these values when Dwarf 5 is released. DW_TAG_coarray_type = 0x44, DW_TAG_call_site = 0x45, DW_TAG_call_site_parameter = 0x46, DW_TAG_generic_subrange = 0x47, DW_TAG_atomic_type = 0x48, DW_TAG_dynamic_type = 0x49, DW_TAG_aligned_type = 0x50, #endif DW_TAG_lo_user = 0x4080, DW_TAG_hi_user = 0xffff }; enum Children : uint8_t { DW_CHILDREN_no = 0x00, DW_CHILDREN_yes = 0x01 }; enum Attribute { DW_AT_sibling = 0x01, DW_AT_location = 0x02, DW_AT_name = 0x03, DW_AT_ordering = 0x09, DW_AT_byte_size = 0x0b, DW_AT_bit_offset = 0x0c, DW_AT_bit_size = 0x0d, DW_AT_stmt_list = 0x10, DW_AT_low_pc = 0x11, DW_AT_high_pc = 0x12, DW_AT_language = 0x13, DW_AT_discr = 0x15, DW_AT_discr_value = 0x16, DW_AT_visibility = 0x17, DW_AT_import = 0x18, DW_AT_string_length = 0x19, DW_AT_common_reference = 0x1a, DW_AT_comp_dir = 0x1b, DW_AT_const_value = 0x1c, DW_AT_containing_type = 0x1d, DW_AT_default_value = 0x1e, DW_AT_inline = 0x20, DW_AT_is_optional = 0x21, DW_AT_lower_bound = 0x22, DW_AT_producer = 0x25, DW_AT_prototyped = 0x27, DW_AT_return_addr = 0x2a, DW_AT_start_scope = 0x2c, DW_AT_bit_stride = 0x2e, DW_AT_upper_bound = 0x2f, DW_AT_abstract_origin = 0x31, DW_AT_accessibility = 0x32, DW_AT_address_class = 0x33, DW_AT_artificial = 0x34, DW_AT_base_types = 0x35, DW_AT_calling_convention = 0x36, DW_AT_count = 0x37, DW_AT_data_member_location = 0x38, DW_AT_decl_column = 0x39, DW_AT_decl_file = 0x3a, DW_AT_decl_line = 0x3b, DW_AT_declaration = 0x3c, DW_AT_discr_list = 0x3d, DW_AT_encoding = 0x3e, DW_AT_external = 0x3f, DW_AT_frame_base = 0x40, DW_AT_friend = 0x41, DW_AT_identifier_case = 0x42, DW_AT_macro_info = 0x43, DW_AT_namelist_item = 0x44, DW_AT_priority = 0x45, DW_AT_segment = 0x46, DW_AT_specification = 0x47, DW_AT_static_link = 0x48, DW_AT_type = 0x49, DW_AT_use_location = 0x4a, DW_AT_variable_parameter = 0x4b, DW_AT_virtuality = 0x4c, DW_AT_vtable_elem_location = 0x4d, DW_AT_allocated = 0x4e, DW_AT_associated = 0x4f, DW_AT_data_location = 0x50, DW_AT_byte_stride = 0x51, DW_AT_entry_pc = 0x52, DW_AT_use_UTF8 = 0x53, DW_AT_extension = 0x54, DW_AT_ranges = 0x55, DW_AT_trampoline = 0x56, DW_AT_call_column = 0x57, DW_AT_call_file = 0x58, DW_AT_call_line = 0x59, DW_AT_description = 0x5a, DW_AT_binary_scale = 0x5b, DW_AT_decimal_scale = 0x5c, DW_AT_small = 0x5d, DW_AT_decimal_sign = 0x5e, DW_AT_digit_count = 0x5f, DW_AT_picture_string = 0x60, DW_AT_mutable = 0x61, DW_AT_threads_scaled = 0x62, DW_AT_explicit = 0x63, DW_AT_object_pointer = 0x64, DW_AT_endianity = 0x65, DW_AT_elemental = 0x66, DW_AT_pure = 0x67, DW_AT_recursive = 0x68, DW_AT_signature = 0x69, DW_AT_main_subprogram = 0x6a, DW_AT_data_bit_offset = 0x6b, DW_AT_const_expr = 0x6c, DW_AT_enum_class = 0x6d, DW_AT_linkage_name = 0x6e, #ifdef INCLUDE_DWARF5_VALUES // Values to be added in Dwarf 5. Final value not yet specified. Values listed // may be different than other implementations. Use with caution. // TODO Update these values when Dwarf 5 is released. DW_AT_call_site_value = 0x6f, DW_AT_call_site_data_value = 0x70, DW_AT_call_site_target = 0x71, DW_AT_call_site_target_clobbered = 0x72, DW_AT_tail_call = 0x73, DW_AT_all_tail_call_sites = 0x74, DW_AT_all_call_sites = 0x75, DW_AT_all_source_call_sites = 0x76, DW_AT_call_site_parameter = 0x77, DW_AT_tail_call = 0x78, DW_AT_all_tail_call_sites = 0x79, DW_AT_all_call_sites = 0x7a, DW_AT_all_source_call_sites = 0x7b, DW_AT_rank = 0x7c, DW_AT_string_bitsize = 0x7d, DW_AT_string_byte_size = 0x7e, DW_AT_reference = 0x7f, DW_AT_rvalue_reference = 0x80, DW_AT_noreturn = 0x81, DW_AT_alignment = 0x82, #endif DW_AT_lo_user = 0x2000, DW_AT_hi_user = 0xffff }; enum Form : uint8_t { DW_FORM_addr = 0x01, DW_FORM_block2 = 0x03, DW_FORM_block4 = 0x04, DW_FORM_data2 = 0x05, DW_FORM_data4 = 0x06, DW_FORM_data8 = 0x07, DW_FORM_string = 0x08, DW_FORM_block = 0x09, DW_FORM_block1 = 0x0a, DW_FORM_data1 = 0x0b, DW_FORM_flag = 0x0c, DW_FORM_sdata = 0x0d, DW_FORM_strp = 0x0e, DW_FORM_udata = 0x0f, DW_FORM_ref_addr = 0x10, DW_FORM_ref1 = 0x11, DW_FORM_ref2 = 0x12, DW_FORM_ref4 = 0x13, DW_FORM_ref8 = 0x14, DW_FORM_ref_udata = 0x15, DW_FORM_indirect = 0x16, DW_FORM_sec_offset = 0x17, DW_FORM_exprloc = 0x18, DW_FORM_flag_present = 0x19, DW_FORM_ref_sig8 = 0x20 }; enum Operation : uint16_t { DW_OP_addr = 0x03, DW_OP_deref = 0x06, DW_OP_const1u = 0x08, DW_OP_const1s = 0x09, DW_OP_const2u = 0x0a, DW_OP_const2s = 0x0b, DW_OP_const4u = 0x0c, DW_OP_const4s = 0x0d, DW_OP_const8u = 0x0e, DW_OP_const8s = 0x0f, DW_OP_constu = 0x10, DW_OP_consts = 0x11, DW_OP_dup = 0x12, DW_OP_drop = 0x13, DW_OP_over = 0x14, DW_OP_pick = 0x15, DW_OP_swap = 0x16, DW_OP_rot = 0x17, DW_OP_xderef = 0x18, DW_OP_abs = 0x19, DW_OP_and = 0x1a, DW_OP_div = 0x1b, DW_OP_minus = 0x1c, DW_OP_mod = 0x1d, DW_OP_mul = 0x1e, DW_OP_neg = 0x1f, DW_OP_not = 0x20, DW_OP_or = 0x21, DW_OP_plus = 0x22, DW_OP_plus_uconst = 0x23, DW_OP_shl = 0x24, DW_OP_shr = 0x25, DW_OP_shra = 0x26, DW_OP_xor = 0x27, DW_OP_skip = 0x2f, DW_OP_bra = 0x28, DW_OP_eq = 0x29, DW_OP_ge = 0x2a, DW_OP_gt = 0x2b, DW_OP_le = 0x2c, DW_OP_lt = 0x2d, DW_OP_ne = 0x2e, DW_OP_lit0 = 0x30, DW_OP_lit1 = 0x31, DW_OP_lit2 = 0x32, DW_OP_lit3 = 0x33, DW_OP_lit4 = 0x34, DW_OP_lit5 = 0x35, DW_OP_lit6 = 0x36, DW_OP_lit7 = 0x37, DW_OP_lit8 = 0x38, DW_OP_lit9 = 0x39, DW_OP_lit10 = 0x3a, DW_OP_lit11 = 0x3b, DW_OP_lit12 = 0x3c, DW_OP_lit13 = 0x3d, DW_OP_lit14 = 0x3e, DW_OP_lit15 = 0x3f, DW_OP_lit16 = 0x40, DW_OP_lit17 = 0x41, DW_OP_lit18 = 0x42, DW_OP_lit19 = 0x43, DW_OP_lit20 = 0x44, DW_OP_lit21 = 0x45, DW_OP_lit22 = 0x46, DW_OP_lit23 = 0x47, DW_OP_lit24 = 0x48, DW_OP_lit25 = 0x49, DW_OP_lit26 = 0x4a, DW_OP_lit27 = 0x4b, DW_OP_lit28 = 0x4c, DW_OP_lit29 = 0x4d, DW_OP_lit30 = 0x4e, DW_OP_lit31 = 0x4f, DW_OP_reg0 = 0x50, DW_OP_reg1 = 0x51, DW_OP_reg2 = 0x52, DW_OP_reg3 = 0x53, DW_OP_reg4 = 0x54, DW_OP_reg5 = 0x55, DW_OP_reg6 = 0x56, DW_OP_reg7 = 0x57, DW_OP_reg8 = 0x58, DW_OP_reg9 = 0x59, DW_OP_reg10 = 0x5a, DW_OP_reg11 = 0x5b, DW_OP_reg12 = 0x5c, DW_OP_reg13 = 0x5d, DW_OP_reg14 = 0x5e, DW_OP_reg15 = 0x5f, DW_OP_reg16 = 0x60, DW_OP_reg17 = 0x61, DW_OP_reg18 = 0x62, DW_OP_reg19 = 0x63, DW_OP_reg20 = 0x64, DW_OP_reg21 = 0x65, DW_OP_reg22 = 0x66, DW_OP_reg23 = 0x67, DW_OP_reg24 = 0x68, DW_OP_reg25 = 0x69, DW_OP_reg26 = 0x6a, DW_OP_reg27 = 0x6b, DW_OP_reg28 = 0x6c, DW_OP_reg29 = 0x6d, DW_OP_reg30 = 0x6e, DW_OP_reg31 = 0x6f, DW_OP_breg0 = 0x70, DW_OP_breg1 = 0x71, DW_OP_breg2 = 0x72, DW_OP_breg3 = 0x73, DW_OP_breg4 = 0x74, DW_OP_breg5 = 0x75, DW_OP_breg6 = 0x76, DW_OP_breg7 = 0x77, DW_OP_breg8 = 0x78, DW_OP_breg9 = 0x79, DW_OP_breg10 = 0x7a, DW_OP_breg11 = 0x7b, DW_OP_breg12 = 0x7c, DW_OP_breg13 = 0x7d, DW_OP_breg14 = 0x7e, DW_OP_breg15 = 0x7f, DW_OP_breg16 = 0x80, DW_OP_breg17 = 0x81, DW_OP_breg18 = 0x82, DW_OP_breg19 = 0x83, DW_OP_breg20 = 0x84, DW_OP_breg21 = 0x85, DW_OP_breg22 = 0x86, DW_OP_breg23 = 0x87, DW_OP_breg24 = 0x88, DW_OP_breg25 = 0x89, DW_OP_breg26 = 0x8a, DW_OP_breg27 = 0x8b, DW_OP_breg28 = 0x8c, DW_OP_breg29 = 0x8d, DW_OP_breg30 = 0x8e, DW_OP_breg31 = 0x8f, DW_OP_regx = 0x90, DW_OP_fbreg = 0x91, DW_OP_bregx = 0x92, DW_OP_piece = 0x93, DW_OP_deref_size = 0x94, DW_OP_xderef_size = 0x95, DW_OP_nop = 0x96, DW_OP_push_object_address = 0x97, DW_OP_call2 = 0x98, DW_OP_call4 = 0x99, DW_OP_call_ref = 0x9a, DW_OP_form_tls_address = 0x9b, DW_OP_call_frame_cfa = 0x9c, DW_OP_bit_piece = 0x9d, DW_OP_implicit_value = 0x9e, DW_OP_stack_value = 0x9f, #ifdef INCLUDE_DWARF5_VALUES // Values to be added in Dwarf 5. Final value not yet specified. Values listed // may be different than other implementations. Use with caution. // TODO Update these values when Dwarf 5 is released. DW_OP_entry_value = 0xa0, DW_OP_const_type = 0xa1, DW_OP_regval_type = 0xa2, DW_OP_deref_type = 0xa3, DW_OP_xderef_type = 0xa4, DW_OP_convert = 0xa5, DW_OP_reinterpret = 0xa6, #endif DW_OP_lo_user = 0xe0, DW_OP_hi_user = 0xff }; enum BaseTypeEncoding : uint8_t { DW_ATE_address = 0x01, DW_ATE_boolean = 0x02, DW_ATE_complex_float = 0x03, DW_ATE_float = 0x04, DW_ATE_signed = 0x05, DW_ATE_signed_char = 0x06, DW_ATE_unsigned = 0x07, DW_ATE_unsigned_char = 0x08, DW_ATE_imaginary_float = 0x09, DW_ATE_packed_decimal = 0x0a, DW_ATE_numeric_string = 0x0b, DW_ATE_edited = 0x0c, DW_ATE_signed_fixed = 0x0d, DW_ATE_unsigned_fixed = 0x0e, DW_ATE_decimal_float = 0x0f, DW_ATE_UTF = 0x10, DW_ATE_lo_user = 0x80, DW_ATE_hi_user = 0xff }; enum DecimalSign : uint8_t { DW_DS_unsigned = 0x01, DW_DS_leading_overpunch = 0x02, DW_DS_trailing_overpunch = 0x03, DW_DS_leading_separate = 0x04, DW_DS_trailing_separate = 0x05 }; enum Endianity : uint8_t { DW_END_default = 0x00, DW_END_big = 0x01, DW_END_little = 0x02, DW_END_lo_user = 0x40, DW_END_hi_user = 0xff }; enum Accessibility : uint8_t { DW_ACCESS_public = 0x01, DW_ACCESS_protected = 0x02, DW_ACCESS_private = 0x03 }; enum Visibility : uint8_t { DW_VIS_local = 0x01, DW_VIS_exported = 0x02, DW_VIS_qualified = 0x03 }; enum Virtuality : uint8_t { DW_VIRTUALITY_none = 0x00, DW_VIRTUALITY_virtual = 0x01, DW_VIRTUALITY_pure_virtual = 0x02 }; enum Language { DW_LANG_C89 = 0x01, DW_LANG_C = 0x02, DW_LANG_Ada83 = 0x03, DW_LANG_C_plus_plus = 0x04, DW_LANG_Cobol74 = 0x05, DW_LANG_Cobol85 = 0x06, DW_LANG_Fortran77 = 0x07, DW_LANG_Fortran90 = 0x08, DW_LANG_Pascal83 = 0x09, DW_LANG_Modula2 = 0x0a, DW_LANG_Java = 0x0b, DW_LANG_C99 = 0x0c, DW_LANG_Ada95 = 0x0d, DW_LANG_Fortran95 = 0x0e, DW_LANG_PLI = 0x0f, DW_LANG_ObjC = 0x10, DW_LANG_ObjC_plus_plus = 0x11, DW_LANG_UPC = 0x12, DW_LANG_D = 0x13, DW_LANG_Python = 0x14, #ifdef INCLUDE_DWARF5_VALUES // Values to be added in Dwarf 5. Final value not yet specified. Values listed // may be different than other implementations. Use with caution. // TODO Update these values when Dwarf 5 is released. DW_LANG_OpenCL = 0x15, DW_LANG_Go = 0x16, DW_LANG_Modula3 = 0x17, DW_LANG_Haskell = 0x18, DW_LANG_C_plus_plus_03 = 0x19, DW_LANG_C_plus_plus_11 = 0x1a, DW_LANG_OCaml = 0x1b, DW_LANG_Rust = 0x1c, DW_LANG_C11 = 0x1d, DW_LANG_Swift = 0x1e, DW_LANG_Julia = 0x1f, #endif DW_LANG_lo_user = 0x8000, DW_LANG_hi_user = 0xffff }; enum Identifier : uint8_t { DW_ID_case_sensitive = 0x00, DW_ID_up_case = 0x01, DW_ID_down_case = 0x02, DW_ID_case_insensitive = 0x03 }; enum CallingConvention : uint8_t { DW_CC_normal = 0x01, DW_CC_program = 0x02, DW_CC_nocall = 0x03, DW_CC_lo_user = 0x40, DW_CC_hi_user = 0xff }; enum Inline : uint8_t { DW_INL_not_inlined = 0x00, DW_INL_inlined = 0x01, DW_INL_declared_not_inlined = 0x02, DW_INL_declared_inlined = 0x03 }; enum ArrayOrdering : uint8_t { DW_ORD_row_major = 0x00, DW_ORD_col_major = 0x01 }; enum DiscriminantList : uint8_t { DW_DSC_label = 0x00, DW_DSC_range = 0x01 }; enum LineNumberOpcode : uint8_t { DW_LNS_copy = 0x01, DW_LNS_advance_pc = 0x02, DW_LNS_advance_line = 0x03, DW_LNS_set_file = 0x04, DW_LNS_set_column = 0x05, DW_LNS_negate_stmt = 0x06, DW_LNS_set_basic_block = 0x07, DW_LNS_const_add_pc = 0x08, DW_LNS_fixed_advance_pc = 0x09, DW_LNS_set_prologue_end = 0x0a, DW_LNS_set_epilogue_begin = 0x0b, DW_LNS_set_isa = 0x0c }; enum LineNumberExtendedOpcode : uint8_t { DW_LNE_end_sequence = 0x01, DW_LNE_set_address = 0x02, DW_LNE_define_file = 0x03, DW_LNE_set_discriminator = 0x04, DW_LNE_lo_user = 0x80, DW_LNE_hi_user = 0xff }; #ifdef INCLUDE_DWARF5_VALUES enum LineNumberFormat : uint8_t { // Values to be added in Dwarf 5. Final value not yet specified. Values listed // may be different than other implementations. Use with caution. // TODO Update these values when Dwarf 5 is released. // DW_LNF_path = 0x1, DW_LNF_include_index = 0x2, DW_LNF_timestamp = 0x3, DW_LNF_size = 0x4, DW_LNF_MD5 = 0x5, DW_LNF_lo_user = 0x2000, DW_LNF_hi_user = 0x3fff }; #endif enum MacroInfo : uint8_t { DW_MACINFO_define = 0x01, DW_MACINFO_undef = 0x02, DW_MACINFO_start_file = 0x03, DW_MACINFO_end_file = 0x04, DW_MACINFO_vendor_ext = 0xff }; #ifdef INCLUDE_DWARF5_VALUES enum Macro : uint8_t { // Values to be added in Dwarf 5. Final value not yet specified. Values listed // may be different than other implementations. Use with caution. // TODO Update these values when Dwarf 5 is released. DW_MACRO_define = 0x01, DW_MACRO_undef = 0x02, DW_MACRO_start_file = 0x03, DW_MACRO_end_file = 0x04, DW_MACRO_define_indirect = 0x05, DW_MACRO_undef_indirect = 0x06, DW_MACRO_transparent_include = 0x07, DW_MACRO_define_indirectx = 0x0b, DW_MACRO_undef_indirectx = 0x0c, DW_MACRO_lo_user = 0xe0, DW_MACRO_hi_user = 0xff }; #endif const uint32_t CIE_ID_32 = 0xffffffff; const uint64_t CIE_ID_64 = 0xffffffffffffffff; enum CallFrameInstruction : uint8_t { DW_CFA_advance_loc = 0x40, DW_CFA_offset = 0x80, DW_CFA_restore = 0xc0, DW_CFA_nop = 0x00, DW_CFA_set_loc = 0x01, DW_CFA_advance_loc1 = 0x02, DW_CFA_advance_loc2 = 0x03, DW_CFA_advance_loc4 = 0x04, DW_CFA_offset_extended = 0x05, DW_CFA_restore_extended = 0x06, DW_CFA_undefined = 0x07, DW_CFA_same_value = 0x08, DW_CFA_register = 0x09, DW_CFA_remember_state = 0x0a, DW_CFA_restore_state = 0x0b, DW_CFA_def_cfa = 0x0c, DW_CFA_def_cfa_register = 0x0d, DW_CFA_def_cfa_offset = 0x0e, DW_CFA_def_cfa_expression = 0x0f, DW_CFA_expression = 0x10, DW_CFA_offset_extended_sf = 0x11, DW_CFA_def_cfa_sf = 0x12, DW_CFA_def_cfa_offset_sf = 0x13, DW_CFA_val_offset = 0x14, DW_CFA_val_offset_sf = 0x15, DW_CFA_val_expression = 0x16, DW_CFA_lo_user = 0x1c, DW_CFA_hi_user = 0x3f }; enum ExceptionHeaderValueFormat : uint8_t { DW_EH_PE_native = 0x00, DW_EH_PE_uleb128 = 0x01, DW_EH_PE_udata2 = 0x02, DW_EH_PE_udata4 = 0x03, DW_EH_PE_udata8 = 0x04, DW_EH_PE_sleb128 = 0x09, DW_EH_PE_sdata2 = 0x0A, DW_EH_PE_sdata4 = 0x0B, DW_EH_PE_sdata8 = 0x0C, DW_EH_PE_omit = 0xFF, }; enum ExceptionHeaderValueApplication : uint8_t { DW_EH_PE_absptr = 0x00, DW_EH_PE_pcrel = 0x10, DW_EH_PE_textrel = 0x20, DW_EH_PE_datarel = 0x30, DW_EH_PE_funcrel = 0x40, DW_EH_PE_aligned = 0x50, }; enum CFIFormat : uint8_t { // This is the original format as defined by the specification. // It is used for the .debug_frame section. DW_DEBUG_FRAME_FORMAT, // Slightly modified format used for the .eh_frame section. DW_EH_FRAME_FORMAT }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_DWARF_CONSTANTS_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/dwarf_test.cc000066400000000000000000000331631336577252300246470ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dwarf_test.h" #include "debug/dwarf/debug_frame_opcode_writer.h" #include "debug/dwarf/debug_info_entry_writer.h" #include "debug/dwarf/debug_line_opcode_writer.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/headers.h" #include "gtest/gtest.h" namespace art { namespace dwarf { // Run the tests only on host since we need objdump. #ifndef ART_TARGET_ANDROID constexpr CFIFormat kCFIFormat = DW_DEBUG_FRAME_FORMAT; TEST_F(DwarfTest, DebugFrame) { const bool is64bit = false; // Pick offset value which would catch Uleb vs Sleb errors. const int offset = 40000; ASSERT_EQ(UnsignedLeb128Size(offset / 4), 2u); ASSERT_EQ(SignedLeb128Size(offset / 4), 3u); DW_CHECK("Data alignment factor: -4"); const Reg reg(6); // Test the opcodes in the order mentioned in the spec. // There are usually several encoding variations of each opcode. DebugFrameOpCodeWriter<> opcodes; DW_CHECK("FDE"); int pc = 0; for (int i : {0, 1, 0x3F, 0x40, 0xFF, 0x100, 0xFFFF, 0x10000}) { pc += i; opcodes.AdvancePC(pc); } DW_CHECK_NEXT("DW_CFA_advance_loc: 1 to 01000001"); DW_CHECK_NEXT("DW_CFA_advance_loc: 63 to 01000040"); DW_CHECK_NEXT("DW_CFA_advance_loc1: 64 to 01000080"); DW_CHECK_NEXT("DW_CFA_advance_loc1: 255 to 0100017f"); DW_CHECK_NEXT("DW_CFA_advance_loc2: 256 to 0100027f"); DW_CHECK_NEXT("DW_CFA_advance_loc2: 65535 to 0101027e"); DW_CHECK_NEXT("DW_CFA_advance_loc4: 65536 to 0102027e"); opcodes.DefCFA(reg, offset); DW_CHECK_NEXT("DW_CFA_def_cfa: r6 (esi) ofs 40000"); opcodes.DefCFA(reg, -offset); DW_CHECK_NEXT("DW_CFA_def_cfa_sf: r6 (esi) ofs -40000"); opcodes.DefCFARegister(reg); DW_CHECK_NEXT("DW_CFA_def_cfa_register: r6 (esi)"); opcodes.DefCFAOffset(offset); DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 40000"); opcodes.DefCFAOffset(-offset); DW_CHECK_NEXT("DW_CFA_def_cfa_offset_sf: -40000"); uint8_t expr[] = { 0 }; opcodes.DefCFAExpression(expr, arraysize(expr)); DW_CHECK_NEXT("DW_CFA_def_cfa_expression"); opcodes.Undefined(reg); DW_CHECK_NEXT("DW_CFA_undefined: r6 (esi)"); opcodes.SameValue(reg); DW_CHECK_NEXT("DW_CFA_same_value: r6 (esi)"); opcodes.Offset(Reg(0x3F), -offset); // Bad register likely means that it does not exist on x86, // but we want to test high register numbers anyway. DW_CHECK_NEXT("DW_CFA_offset: bad register: r63 at cfa-40000"); opcodes.Offset(Reg(0x40), -offset); DW_CHECK_NEXT("DW_CFA_offset_extended: bad register: r64 at cfa-40000"); opcodes.Offset(Reg(0x40), offset); DW_CHECK_NEXT("DW_CFA_offset_extended_sf: bad register: r64 at cfa+40000"); opcodes.ValOffset(reg, -offset); DW_CHECK_NEXT("DW_CFA_val_offset: r6 (esi) at cfa-40000"); opcodes.ValOffset(reg, offset); DW_CHECK_NEXT("DW_CFA_val_offset_sf: r6 (esi) at cfa+40000"); opcodes.Register(reg, Reg(1)); DW_CHECK_NEXT("DW_CFA_register: r6 (esi) in r1 (ecx)"); opcodes.Expression(reg, expr, arraysize(expr)); DW_CHECK_NEXT("DW_CFA_expression: r6 (esi)"); opcodes.ValExpression(reg, expr, arraysize(expr)); DW_CHECK_NEXT("DW_CFA_val_expression: r6 (esi)"); opcodes.Restore(Reg(0x3F)); DW_CHECK_NEXT("DW_CFA_restore: bad register: r63"); opcodes.Restore(Reg(0x40)); DW_CHECK_NEXT("DW_CFA_restore_extended: bad register: r64"); opcodes.Restore(reg); DW_CHECK_NEXT("DW_CFA_restore: r6 (esi)"); opcodes.RememberState(); DW_CHECK_NEXT("DW_CFA_remember_state"); opcodes.RestoreState(); DW_CHECK_NEXT("DW_CFA_restore_state"); opcodes.Nop(); DW_CHECK_NEXT("DW_CFA_nop"); // Also test helpers. opcodes.DefCFA(Reg(4), 100); // ESP DW_CHECK_NEXT("DW_CFA_def_cfa: r4 (esp) ofs 100"); opcodes.AdjustCFAOffset(8); DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 108"); opcodes.RelOffset(Reg(0), 0); // push R0 DW_CHECK_NEXT("DW_CFA_offset: r0 (eax) at cfa-108"); opcodes.RelOffset(Reg(1), 4); // push R1 DW_CHECK_NEXT("DW_CFA_offset: r1 (ecx) at cfa-104"); opcodes.RelOffsetForMany(Reg(2), 8, 1 | (1 << 3), 4); // push R2 and R5 DW_CHECK_NEXT("DW_CFA_offset: r2 (edx) at cfa-100"); DW_CHECK_NEXT("DW_CFA_offset: r5 (ebp) at cfa-96"); opcodes.RestoreMany(Reg(2), 1 | (1 << 3)); // pop R2 and R5 DW_CHECK_NEXT("DW_CFA_restore: r2 (edx)"); DW_CHECK_NEXT("DW_CFA_restore: r5 (ebp)"); DebugFrameOpCodeWriter<> initial_opcodes; WriteCIE(is64bit, Reg(is64bit ? 16 : 8), initial_opcodes, kCFIFormat, &debug_frame_data_); std::vector debug_frame_patches; std::vector expected_patches { 28 }; // NOLINT WriteFDE(is64bit, 0, 0, 0x01000000, 0x01000000, ArrayRef(*opcodes.data()), kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); EXPECT_EQ(expected_patches, debug_frame_patches); CheckObjdumpOutput(is64bit, "-W"); } TEST_F(DwarfTest, DebugFrame64) { constexpr bool is64bit = true; DebugFrameOpCodeWriter<> initial_opcodes; WriteCIE(is64bit, Reg(16), initial_opcodes, kCFIFormat, &debug_frame_data_); DebugFrameOpCodeWriter<> opcodes; std::vector debug_frame_patches; std::vector expected_patches { 32 }; // NOLINT WriteFDE(is64bit, 0, 0, 0x0100000000000000, 0x0200000000000000, ArrayRef(*opcodes.data()), kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000"); EXPECT_EQ(expected_patches, debug_frame_patches); CheckObjdumpOutput(is64bit, "-W"); } // Test x86_64 register mapping. It is the only non-trivial architecture. // ARM, X86, and Mips have: dwarf_reg = art_reg + constant. TEST_F(DwarfTest, x86_64_RegisterMapping) { constexpr bool is64bit = true; DebugFrameOpCodeWriter<> opcodes; for (int i = 0; i < 16; i++) { opcodes.RelOffset(Reg::X86_64Core(i), 0); } DW_CHECK("FDE"); DW_CHECK_NEXT("DW_CFA_offset: r0 (rax)"); DW_CHECK_NEXT("DW_CFA_offset: r2 (rcx)"); DW_CHECK_NEXT("DW_CFA_offset: r1 (rdx)"); DW_CHECK_NEXT("DW_CFA_offset: r3 (rbx)"); DW_CHECK_NEXT("DW_CFA_offset: r7 (rsp)"); DW_CHECK_NEXT("DW_CFA_offset: r6 (rbp)"); DW_CHECK_NEXT("DW_CFA_offset: r4 (rsi)"); DW_CHECK_NEXT("DW_CFA_offset: r5 (rdi)"); DW_CHECK_NEXT("DW_CFA_offset: r8 (r8)"); DW_CHECK_NEXT("DW_CFA_offset: r9 (r9)"); DW_CHECK_NEXT("DW_CFA_offset: r10 (r10)"); DW_CHECK_NEXT("DW_CFA_offset: r11 (r11)"); DW_CHECK_NEXT("DW_CFA_offset: r12 (r12)"); DW_CHECK_NEXT("DW_CFA_offset: r13 (r13)"); DW_CHECK_NEXT("DW_CFA_offset: r14 (r14)"); DW_CHECK_NEXT("DW_CFA_offset: r15 (r15)"); DebugFrameOpCodeWriter<> initial_opcodes; WriteCIE(is64bit, Reg(16), initial_opcodes, kCFIFormat, &debug_frame_data_); std::vector debug_frame_patches; WriteFDE(is64bit, 0, 0, 0x0100000000000000, 0x0200000000000000, ArrayRef(*opcodes.data()), kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); CheckObjdumpOutput(is64bit, "-W"); } TEST_F(DwarfTest, DebugLine) { const bool is64bit = false; const int code_factor_bits = 1; DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits); std::vector include_directories; include_directories.push_back("/path/to/source"); DW_CHECK("/path/to/source"); std::vector files { { "file0.c", 0, 1000, 2000 }, { "file1.c", 1, 1000, 2000 }, { "file2.c", 1, 1000, 2000 }, }; DW_CHECK("1\t0\t1000\t2000\tfile0.c"); DW_CHECK_NEXT("2\t1\t1000\t2000\tfile1.c"); DW_CHECK_NEXT("3\t1\t1000\t2000\tfile2.c"); DW_CHECK("Line Number Statements"); opcodes.SetAddress(0x01000000); DW_CHECK_NEXT("Extended opcode 2: set Address to 0x1000000"); opcodes.AddRow(); DW_CHECK_NEXT("Copy"); opcodes.AdvancePC(0x01000100); DW_CHECK_NEXT("Advance PC by 256 to 0x1000100"); opcodes.SetFile(2); DW_CHECK_NEXT("Set File Name to entry 2 in the File Name Table"); opcodes.AdvanceLine(3); DW_CHECK_NEXT("Advance Line by 2 to 3"); opcodes.SetColumn(4); DW_CHECK_NEXT("Set column to 4"); opcodes.SetIsStmt(true); DW_CHECK_NEXT("Set is_stmt to 1"); opcodes.SetIsStmt(false); DW_CHECK_NEXT("Set is_stmt to 0"); opcodes.SetBasicBlock(); DW_CHECK_NEXT("Set basic block"); opcodes.SetPrologueEnd(); DW_CHECK_NEXT("Set prologue_end to true"); opcodes.SetEpilogueBegin(); DW_CHECK_NEXT("Set epilogue_begin to true"); opcodes.SetISA(5); DW_CHECK_NEXT("Set ISA to 5"); opcodes.EndSequence(); DW_CHECK_NEXT("Extended opcode 1: End of Sequence"); opcodes.DefineFile("file.c", 0, 1000, 2000); DW_CHECK_NEXT("Extended opcode 3: define new File Table entry"); DW_CHECK_NEXT("Entry\tDir\tTime\tSize\tName"); DW_CHECK_NEXT("1\t0\t1000\t2000\tfile.c"); std::vector debug_line_patches; std::vector expected_patches { 87 }; // NOLINT WriteDebugLineTable(include_directories, files, opcodes, 0, &debug_line_data_, &debug_line_patches); EXPECT_EQ(expected_patches, debug_line_patches); CheckObjdumpOutput(is64bit, "-W"); } // DWARF has special one byte codes which advance PC and line at the same time. TEST_F(DwarfTest, DebugLineSpecialOpcodes) { const bool is64bit = false; const int code_factor_bits = 1; uint32_t pc = 0x01000000; int line = 1; DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits); opcodes.SetAddress(pc); size_t num_rows = 0; DW_CHECK("Line Number Statements:"); DW_CHECK("Special opcode"); DW_CHECK("Advance PC by constant"); DW_CHECK("Decoded dump of debug contents of section .debug_line:"); DW_CHECK("Line number Starting address"); for (int addr_delta = 0; addr_delta < 80; addr_delta += 2) { for (int line_delta = 16; line_delta >= -16; --line_delta) { pc += addr_delta; line += line_delta; opcodes.AddRow(pc, line); num_rows++; ASSERT_EQ(opcodes.CurrentAddress(), pc); ASSERT_EQ(opcodes.CurrentLine(), line); char expected[1024]; sprintf(expected, "%i 0x%x", line, pc); DW_CHECK_NEXT(expected); } } EXPECT_LT(opcodes.data()->size(), num_rows * 3); std::vector directories; std::vector files { { "file.c", 0, 1000, 2000 } }; // NOLINT std::vector debug_line_patches; WriteDebugLineTable(directories, files, opcodes, 0, &debug_line_data_, &debug_line_patches); CheckObjdumpOutput(is64bit, "-W -WL"); } TEST_F(DwarfTest, DebugInfo) { constexpr bool is64bit = false; DebugAbbrevWriter<> debug_abbrev(&debug_abbrev_data_); DebugInfoEntryWriter<> info(is64bit, &debug_abbrev); DW_CHECK("Contents of the .debug_info section:"); info.StartTag(dwarf::DW_TAG_compile_unit); DW_CHECK("Abbrev Number: 1 (DW_TAG_compile_unit)"); info.WriteStrp(dwarf::DW_AT_producer, "Compiler name", &debug_str_data_); DW_CHECK_NEXT("DW_AT_producer : (indirect string, offset: 0x0): Compiler name"); info.WriteAddr(dwarf::DW_AT_low_pc, 0x01000000); DW_CHECK_NEXT("DW_AT_low_pc : 0x1000000"); info.WriteAddr(dwarf::DW_AT_high_pc, 0x02000000); DW_CHECK_NEXT("DW_AT_high_pc : 0x2000000"); info.StartTag(dwarf::DW_TAG_subprogram); DW_CHECK("Abbrev Number: 2 (DW_TAG_subprogram)"); info.WriteStrp(dwarf::DW_AT_name, "Foo", &debug_str_data_); DW_CHECK_NEXT("DW_AT_name : (indirect string, offset: 0xe): Foo"); info.WriteAddr(dwarf::DW_AT_low_pc, 0x01010000); DW_CHECK_NEXT("DW_AT_low_pc : 0x1010000"); info.WriteAddr(dwarf::DW_AT_high_pc, 0x01020000); DW_CHECK_NEXT("DW_AT_high_pc : 0x1020000"); info.EndTag(); // DW_TAG_subprogram info.StartTag(dwarf::DW_TAG_subprogram); DW_CHECK("Abbrev Number: 2 (DW_TAG_subprogram)"); info.WriteStrp(dwarf::DW_AT_name, "Bar", &debug_str_data_); DW_CHECK_NEXT("DW_AT_name : (indirect string, offset: 0x12): Bar"); info.WriteAddr(dwarf::DW_AT_low_pc, 0x01020000); DW_CHECK_NEXT("DW_AT_low_pc : 0x1020000"); info.WriteAddr(dwarf::DW_AT_high_pc, 0x01030000); DW_CHECK_NEXT("DW_AT_high_pc : 0x1030000"); info.EndTag(); // DW_TAG_subprogram info.EndTag(); // DW_TAG_compile_unit // Test that previous list was properly terminated and empty children. info.StartTag(dwarf::DW_TAG_compile_unit); info.EndTag(); // DW_TAG_compile_unit // The abbrev table is just side product, but check it as well. DW_CHECK("Abbrev Number: 3 (DW_TAG_compile_unit)"); DW_CHECK("Contents of the .debug_abbrev section:"); DW_CHECK("1 DW_TAG_compile_unit [has children]"); DW_CHECK_NEXT("DW_AT_producer DW_FORM_strp"); DW_CHECK_NEXT("DW_AT_low_pc DW_FORM_addr"); DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr"); DW_CHECK("2 DW_TAG_subprogram [no children]"); DW_CHECK_NEXT("DW_AT_name DW_FORM_strp"); DW_CHECK_NEXT("DW_AT_low_pc DW_FORM_addr"); DW_CHECK_NEXT("DW_AT_high_pc DW_FORM_addr"); DW_CHECK("3 DW_TAG_compile_unit [no children]"); std::vector debug_info_patches; std::vector expected_patches { 16, 20, 29, 33, 42, 46 }; // NOLINT dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info, 0, &debug_info_data_, &debug_info_patches); EXPECT_EQ(expected_patches, debug_info_patches); CheckObjdumpOutput(is64bit, "-W"); } #endif // ART_TARGET_ANDROID } // namespace dwarf } // namespace art android-platform-art-8.1.0+r23/compiler/debug/dwarf/dwarf_test.h000066400000000000000000000133271336577252300245110ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ #define ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ #include #include #include #include #include #include #include #include "base/unix_file/fd_file.h" #include "common_runtime_test.h" #include "elf_builder.h" #include "gtest/gtest.h" #include "linker/file_output_stream.h" #include "os.h" namespace art { namespace dwarf { #define DW_CHECK(substring) Check(substring, false, __FILE__, __LINE__) #define DW_CHECK_NEXT(substring) Check(substring, true, __FILE__, __LINE__) class DwarfTest : public CommonRuntimeTest { public: static constexpr bool kPrintObjdumpOutput = false; // debugging. struct ExpectedLine { std::string substring; bool next; const char* at_file; int at_line; }; // Check that the objdump output contains given output. // If next is true, it must be the next line. Otherwise lines are skipped. void Check(const char* substr, bool next, const char* at_file, int at_line) { expected_lines_.push_back(ExpectedLine {substr, next, at_file, at_line}); } // Pretty-print the generated DWARF data using objdump. template std::vector Objdump(const char* args) { // Write simple elf file with just the DWARF sections. InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86; ScratchFile file; FileOutputStream output_stream(file.GetFile()); ElfBuilder builder(isa, nullptr, &output_stream); builder.Start(); if (!debug_info_data_.empty()) { builder.WriteSection(".debug_info", &debug_info_data_); } if (!debug_abbrev_data_.empty()) { builder.WriteSection(".debug_abbrev", &debug_abbrev_data_); } if (!debug_str_data_.empty()) { builder.WriteSection(".debug_str", &debug_str_data_); } if (!debug_line_data_.empty()) { builder.WriteSection(".debug_line", &debug_line_data_); } if (!debug_frame_data_.empty()) { builder.WriteSection(".debug_frame", &debug_frame_data_); } builder.End(); EXPECT_TRUE(builder.Good()); // Read the elf file back using objdump. std::vector lines; std::string cmd = GetAndroidHostToolsDir(); cmd = cmd + "objdump " + args + " " + file.GetFilename() + " 2>&1"; FILE* output = popen(cmd.data(), "r"); char buffer[1024]; const char* line; while ((line = fgets(buffer, sizeof(buffer), output)) != nullptr) { if (kPrintObjdumpOutput) { printf("%s", line); } if (line[0] != '\0' && line[0] != '\n') { EXPECT_TRUE(strstr(line, "objdump: Error:") == nullptr) << line; EXPECT_TRUE(strstr(line, "objdump: Warning:") == nullptr) << line; std::string str(line); if (str.back() == '\n') { str.pop_back(); } lines.push_back(str); } } pclose(output); return lines; } std::vector Objdump(bool is64bit, const char* args) { if (is64bit) { return Objdump(args); } else { return Objdump(args); } } // Compare objdump output to the recorded checks. void CheckObjdumpOutput(bool is64bit, const char* args) { std::vector actual_lines = Objdump(is64bit, args); auto actual_line = actual_lines.begin(); for (const ExpectedLine& expected_line : expected_lines_) { const std::string& substring = expected_line.substring; if (actual_line == actual_lines.end()) { ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << "Expected '" << substring << "'.\n" << "Seen end of output."; } else if (expected_line.next) { if (actual_line->find(substring) == std::string::npos) { ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << "Expected '" << substring << "'.\n" << "Seen '" << actual_line->data() << "'."; } else { // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data()); } actual_line++; } else { bool found = false; for (auto it = actual_line; it < actual_lines.end(); it++) { if (it->find(substring) != std::string::npos) { actual_line = it; found = true; break; } } if (!found) { ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) << "Expected '" << substring << "'.\n" << "Not found anywhere in the rest of the output."; } else { // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data()); actual_line++; } } } } // Buffers which are going to assembled into ELF file and passed to objdump. std::vector debug_frame_data_; std::vector debug_info_data_; std::vector debug_abbrev_data_; std::vector debug_str_data_; std::vector debug_line_data_; // The expected output of objdump. std::vector expected_lines_; }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_DWARF_TEST_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/expression.h000066400000000000000000000070551336577252300245470ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_ #define ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_ #include #include #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/writer.h" namespace art { namespace dwarf { // Writer for DWARF expressions which are used in .debug_info and .debug_loc sections. // See the DWARF specification for the precise meaning of the opcodes. // If multiple equivalent encodings are possible, it will choose the most compact one. // The writer is not exhaustive - it only implements opcodes we have needed so far. class Expression : private Writer<> { public: using Writer<>::data; using Writer<>::size; // Push signed integer on the stack. void WriteOpConsts(int32_t value) { if (0 <= value && value < 32) { PushUint8(DW_OP_lit0 + value); } else { PushUint8(DW_OP_consts); PushSleb128(value); } } // Push unsigned integer on the stack. void WriteOpConstu(uint32_t value) { if (value < 32) { PushUint8(DW_OP_lit0 + value); } else { PushUint8(DW_OP_constu); PushUleb128(value); } } // Variable is stored in given register. void WriteOpReg(uint32_t dwarf_reg_num) { if (dwarf_reg_num < 32) { PushUint8(DW_OP_reg0 + dwarf_reg_num); } else { PushUint8(DW_OP_regx); PushUleb128(dwarf_reg_num); } } // Variable is stored on stack. Also see DW_AT_frame_base. void WriteOpFbreg(int32_t stack_offset) { PushUint8(DW_OP_fbreg); PushSleb128(stack_offset); } // The variable is stored in multiple locations (pieces). void WriteOpPiece(uint32_t num_bytes) { PushUint8(DW_OP_piece); PushUleb128(num_bytes); } // Loads 32-bit or 64-bit value depending on architecture. void WriteOpDeref() { PushUint8(DW_OP_deref); } // Loads value of given byte size. void WriteOpDerefSize(uint8_t num_bytes) { PushUint8(DW_OP_deref_size); PushUint8(num_bytes); } // Pop two values and push their sum. void WriteOpPlus() { PushUint8(DW_OP_plus); } // Add constant value to value on top of stack. void WriteOpPlusUconst(uint32_t offset) { PushUint8(DW_OP_plus_uconst); PushUleb128(offset); } // Negate top of stack. void WriteOpNeg() { PushUint8(DW_OP_neg); } // Pop two values and push their bitwise-AND. void WriteOpAnd() { PushUint8(DW_OP_and); } // Push stack base pointer as determined from .debug_frame. void WriteOpCallFrameCfa() { PushUint8(DW_OP_call_frame_cfa); } // Push address of the variable we are working with. void WriteOpPushObjectAddress() { PushUint8(DW_OP_push_object_address); } // Return the top stack as the value of the variable. // Otherwise, the top of stack is the variable's location. void WriteOpStackValue() { PushUint8(DW_OP_stack_value); } explicit Expression(std::vector* buffer) : Writer<>(buffer) { buffer->clear(); } }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_EXPRESSION_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/headers.h000066400000000000000000000211011336577252300237470ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_HEADERS_H_ #define ART_COMPILER_DEBUG_DWARF_HEADERS_H_ #include #include "base/array_ref.h" #include "debug/dwarf/debug_frame_opcode_writer.h" #include "debug/dwarf/debug_info_entry_writer.h" #include "debug/dwarf/debug_line_opcode_writer.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/register.h" #include "debug/dwarf/writer.h" namespace art { namespace dwarf { // Note that all headers start with 32-bit length. // DWARF also supports 64-bit lengths, but we never use that. // It is intended to support very large debug sections (>4GB), // and compilers are expected *not* to use it by default. // In particular, it is not related to machine architecture. // Write common information entry (CIE) to .debug_frame or .eh_frame section. template void WriteCIE(bool is64bit, Reg return_address_register, const DebugFrameOpCodeWriter& opcodes, CFIFormat format, std::vector* buffer) { static_assert(std::is_same::value, "Invalid value type"); Writer<> writer(buffer); size_t cie_header_start_ = writer.data()->size(); writer.PushUint32(0); // Length placeholder. writer.PushUint32((format == DW_EH_FRAME_FORMAT) ? 0 : 0xFFFFFFFF); // CIE id. writer.PushUint8(1); // Version. writer.PushString("zR"); writer.PushUleb128(DebugFrameOpCodeWriter::kCodeAlignmentFactor); writer.PushSleb128(DebugFrameOpCodeWriter::kDataAlignmentFactor); writer.PushUleb128(return_address_register.num()); // ubyte in DWARF2. writer.PushUleb128(1); // z: Augmentation data size. if (is64bit) { if (format == DW_EH_FRAME_FORMAT) { writer.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata8); // R: Pointer encoding. } else { DCHECK(format == DW_DEBUG_FRAME_FORMAT); writer.PushUint8(DW_EH_PE_absptr | DW_EH_PE_udata8); // R: Pointer encoding. } } else { if (format == DW_EH_FRAME_FORMAT) { writer.PushUint8(DW_EH_PE_pcrel | DW_EH_PE_sdata4); // R: Pointer encoding. } else { DCHECK(format == DW_DEBUG_FRAME_FORMAT); writer.PushUint8(DW_EH_PE_absptr | DW_EH_PE_udata4); // R: Pointer encoding. } } writer.PushData(opcodes.data()); writer.Pad(is64bit ? 8 : 4); writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4); } // Write frame description entry (FDE) to .debug_frame or .eh_frame section. inline void WriteFDE(bool is64bit, uint64_t section_address, // Absolute address of the section. uint64_t cie_address, // Absolute address of last CIE. uint64_t code_address, uint64_t code_size, const ArrayRef& opcodes, CFIFormat format, uint64_t buffer_address, // Address of buffer in linked application. std::vector* buffer, std::vector* patch_locations) { CHECK_GE(cie_address, section_address); CHECK_GE(buffer_address, section_address); Writer<> writer(buffer); size_t fde_header_start = writer.data()->size(); writer.PushUint32(0); // Length placeholder. if (format == DW_EH_FRAME_FORMAT) { uint32_t cie_pointer = (buffer_address + buffer->size()) - cie_address; writer.PushUint32(cie_pointer); } else { DCHECK(format == DW_DEBUG_FRAME_FORMAT); uint32_t cie_pointer = cie_address - section_address; writer.PushUint32(cie_pointer); } if (format == DW_EH_FRAME_FORMAT) { // .eh_frame encodes the location as relative address. code_address -= buffer_address + buffer->size(); } else { DCHECK(format == DW_DEBUG_FRAME_FORMAT); // Relocate code_address if it has absolute value. patch_locations->push_back(buffer_address + buffer->size() - section_address); } if (is64bit) { writer.PushUint64(code_address); writer.PushUint64(code_size); } else { writer.PushUint32(code_address); writer.PushUint32(code_size); } writer.PushUleb128(0); // Augmentation data size. writer.PushData(opcodes.data(), opcodes.size()); writer.Pad(is64bit ? 8 : 4); writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4); } // Write compilation unit (CU) to .debug_info section. template void WriteDebugInfoCU(uint32_t debug_abbrev_offset, const DebugInfoEntryWriter& entries, size_t debug_info_offset, // offset from start of .debug_info. std::vector* debug_info, std::vector* debug_info_patches) { static_assert(std::is_same::value, "Invalid value type"); Writer<> writer(debug_info); size_t start = writer.data()->size(); writer.PushUint32(0); // Length placeholder. writer.PushUint16(4); // Version. writer.PushUint32(debug_abbrev_offset); writer.PushUint8(entries.Is64bit() ? 8 : 4); size_t entries_offset = writer.data()->size(); DCHECK_EQ(entries_offset, DebugInfoEntryWriter::kCompilationUnitHeaderSize); writer.PushData(entries.data()); writer.UpdateUint32(start, writer.data()->size() - start - 4); // Copy patch locations and make them relative to .debug_info section. for (uintptr_t patch_location : entries.GetPatchLocations()) { debug_info_patches->push_back(debug_info_offset + entries_offset + patch_location); } } struct FileEntry { std::string file_name; int directory_index; int modification_time; int file_size; }; // Write line table to .debug_line section. template void WriteDebugLineTable(const std::vector& include_directories, const std::vector& files, const DebugLineOpCodeWriter& opcodes, size_t debug_line_offset, // offset from start of .debug_line. std::vector* debug_line, std::vector* debug_line_patches) { static_assert(std::is_same::value, "Invalid value type"); Writer<> writer(debug_line); size_t header_start = writer.data()->size(); writer.PushUint32(0); // Section-length placeholder. writer.PushUint16(3); // .debug_line version. size_t header_length_pos = writer.data()->size(); writer.PushUint32(0); // Header-length placeholder. writer.PushUint8(1 << opcodes.GetCodeFactorBits()); writer.PushUint8(DebugLineOpCodeWriter::kDefaultIsStmt ? 1 : 0); writer.PushInt8(DebugLineOpCodeWriter::kLineBase); writer.PushUint8(DebugLineOpCodeWriter::kLineRange); writer.PushUint8(DebugLineOpCodeWriter::kOpcodeBase); static const int opcode_lengths[DebugLineOpCodeWriter::kOpcodeBase] = { 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 }; for (int i = 1; i < DebugLineOpCodeWriter::kOpcodeBase; i++) { writer.PushUint8(opcode_lengths[i]); } for (const std::string& directory : include_directories) { writer.PushData(directory.data(), directory.size() + 1); } writer.PushUint8(0); // Terminate include_directories list. for (const FileEntry& file : files) { writer.PushData(file.file_name.data(), file.file_name.size() + 1); writer.PushUleb128(file.directory_index); writer.PushUleb128(file.modification_time); writer.PushUleb128(file.file_size); } writer.PushUint8(0); // Terminate file list. writer.UpdateUint32(header_length_pos, writer.data()->size() - header_length_pos - 4); size_t opcodes_offset = writer.data()->size(); writer.PushData(opcodes.data()); writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4); // Copy patch locations and make them relative to .debug_line section. for (uintptr_t patch_location : opcodes.GetPatchLocations()) { debug_line_patches->push_back(debug_line_offset + opcodes_offset + patch_location); } } } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_HEADERS_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/register.h000066400000000000000000000044621336577252300241730ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_REGISTER_H_ #define ART_COMPILER_DEBUG_DWARF_REGISTER_H_ namespace art { namespace dwarf { // Represents DWARF register. class Reg { public: explicit Reg(int reg_num) : num_(reg_num) { } int num() const { return num_; } // TODO: Arm S0–S31 register mapping is obsolescent. // We should use VFP-v3/Neon D0-D31 mapping instead. // However, D0 is aliased to pair of S0 and S1, so using that // mapping we cannot easily say S0 is spilled and S1 is not. // There are ways around this in DWARF but they are complex. // It would be much simpler to always spill whole D registers. // Arm64 mapping is correct since we already do this there. // libunwind might struggle with the new mapping as well. static Reg ArmCore(int num) { return Reg(num); } // R0-R15. static Reg ArmFp(int num) { return Reg(64 + num); } // S0–S31. static Reg ArmDp(int num) { return Reg(256 + num); } // D0–D31. static Reg Arm64Core(int num) { return Reg(num); } // X0-X31. static Reg Arm64Fp(int num) { return Reg(64 + num); } // V0-V31. static Reg MipsCore(int num) { return Reg(num); } static Reg Mips64Core(int num) { return Reg(num); } static Reg MipsFp(int num) { return Reg(32 + num); } static Reg Mips64Fp(int num) { return Reg(32 + num); } static Reg X86Core(int num) { return Reg(num); } static Reg X86Fp(int num) { return Reg(21 + num); } static Reg X86_64Core(int num) { static const int map[8] = {0, 2, 1, 3, 7, 6, 4, 5}; return Reg(num < 8 ? map[num] : num); } static Reg X86_64Fp(int num) { return Reg(17 + num); } private: int num_; }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_REGISTER_H_ android-platform-art-8.1.0+r23/compiler/debug/dwarf/writer.h000066400000000000000000000114521336577252300236600ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_DWARF_WRITER_H_ #define ART_COMPILER_DEBUG_DWARF_WRITER_H_ #include #include #include "base/bit_utils.h" #include "base/logging.h" #include "leb128.h" namespace art { namespace dwarf { // The base class for all DWARF writers. template > class Writer { static_assert(std::is_same::value, "Invalid value type"); public: void PushUint8(int value) { DCHECK_GE(value, 0); DCHECK_LE(value, UINT8_MAX); data_->push_back(value & 0xff); } void PushUint16(int value) { DCHECK_GE(value, 0); DCHECK_LE(value, UINT16_MAX); data_->push_back((value >> 0) & 0xff); data_->push_back((value >> 8) & 0xff); } void PushUint32(uint32_t value) { data_->push_back((value >> 0) & 0xff); data_->push_back((value >> 8) & 0xff); data_->push_back((value >> 16) & 0xff); data_->push_back((value >> 24) & 0xff); } void PushUint32(int value) { DCHECK_GE(value, 0); PushUint32(static_cast(value)); } void PushUint32(uint64_t value) { DCHECK_LE(value, UINT32_MAX); PushUint32(static_cast(value)); } void PushUint64(uint64_t value) { data_->push_back((value >> 0) & 0xff); data_->push_back((value >> 8) & 0xff); data_->push_back((value >> 16) & 0xff); data_->push_back((value >> 24) & 0xff); data_->push_back((value >> 32) & 0xff); data_->push_back((value >> 40) & 0xff); data_->push_back((value >> 48) & 0xff); data_->push_back((value >> 56) & 0xff); } void PushInt8(int value) { DCHECK_GE(value, INT8_MIN); DCHECK_LE(value, INT8_MAX); PushUint8(static_cast(value)); } void PushInt16(int value) { DCHECK_GE(value, INT16_MIN); DCHECK_LE(value, INT16_MAX); PushUint16(static_cast(value)); } void PushInt32(int value) { PushUint32(static_cast(value)); } void PushInt64(int64_t value) { PushUint64(static_cast(value)); } // Variable-length encoders. void PushUleb128(uint32_t value) { EncodeUnsignedLeb128(data_, value); } void PushUleb128(int value) { DCHECK_GE(value, 0); EncodeUnsignedLeb128(data_, value); } void PushSleb128(int value) { EncodeSignedLeb128(data_, value); } // Miscellaneous functions. void PushString(const char* value) { data_->insert(data_->end(), value, value + strlen(value) + 1); } void PushData(const uint8_t* ptr, size_t num_bytes) { data_->insert(data_->end(), ptr, ptr + num_bytes); } void PushData(const char* ptr, size_t num_bytes) { data_->insert(data_->end(), ptr, ptr + num_bytes); } void PushData(const Vector* buffer) { data_->insert(data_->end(), buffer->begin(), buffer->end()); } void UpdateUint32(size_t offset, uint32_t value) { DCHECK_LT(offset + 3, data_->size()); (*data_)[offset + 0] = (value >> 0) & 0xFF; (*data_)[offset + 1] = (value >> 8) & 0xFF; (*data_)[offset + 2] = (value >> 16) & 0xFF; (*data_)[offset + 3] = (value >> 24) & 0xFF; } void UpdateUint64(size_t offset, uint64_t value) { DCHECK_LT(offset + 7, data_->size()); (*data_)[offset + 0] = (value >> 0) & 0xFF; (*data_)[offset + 1] = (value >> 8) & 0xFF; (*data_)[offset + 2] = (value >> 16) & 0xFF; (*data_)[offset + 3] = (value >> 24) & 0xFF; (*data_)[offset + 4] = (value >> 32) & 0xFF; (*data_)[offset + 5] = (value >> 40) & 0xFF; (*data_)[offset + 6] = (value >> 48) & 0xFF; (*data_)[offset + 7] = (value >> 56) & 0xFF; } void UpdateUleb128(size_t offset, uint32_t value) { DCHECK_LE(offset + UnsignedLeb128Size(value), data_->size()); UpdateUnsignedLeb128(data_->data() + offset, value); } void Pop() { return data_->pop_back(); } void Pad(int alignment) { DCHECK_NE(alignment, 0); data_->resize(RoundUp(data_->size(), alignment), 0); } const Vector* data() const { return data_; } size_t size() const { return data_->size(); } explicit Writer(Vector* buffer) : data_(buffer) { } private: Vector* const data_; DISALLOW_COPY_AND_ASSIGN(Writer); }; } // namespace dwarf } // namespace art #endif // ART_COMPILER_DEBUG_DWARF_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_compilation_unit.h000066400000000000000000000022671336577252300254500ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ #define ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ #include #include "debug/method_debug_info.h" namespace art { namespace debug { struct ElfCompilationUnit { std::vector methods; size_t debug_line_offset = 0; bool is_code_address_text_relative; // Is the address offset from start of .text section? uint64_t code_address = std::numeric_limits::max(); uint64_t code_end = 0; }; } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_COMPILATION_UNIT_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_debug_frame_writer.h000066400000000000000000000244051336577252300257250ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ #include #include "arch/instruction_set.h" #include "debug/dwarf/debug_frame_opcode_writer.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/dwarf/headers.h" #include "debug/method_debug_info.h" #include "elf_builder.h" namespace art { namespace debug { static void WriteCIE(InstructionSet isa, dwarf::CFIFormat format, std::vector* buffer) { using Reg = dwarf::Reg; // Scratch registers should be marked as undefined. This tells the // debugger that its value in the previous frame is not recoverable. bool is64bit = Is64BitInstructionSet(isa); switch (isa) { case kArm: case kThumb2: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::ArmCore(13), 0); // R13(SP). // core registers. for (int reg = 0; reg < 13; reg++) { if (reg < 4 || reg == 12) { opcodes.Undefined(Reg::ArmCore(reg)); } else { opcodes.SameValue(Reg::ArmCore(reg)); } } // fp registers. for (int reg = 0; reg < 32; reg++) { if (reg < 16) { opcodes.Undefined(Reg::ArmFp(reg)); } else { opcodes.SameValue(Reg::ArmFp(reg)); } } auto return_reg = Reg::ArmCore(14); // R14(LR). WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } case kArm64: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::Arm64Core(31), 0); // R31(SP). // core registers. for (int reg = 0; reg < 30; reg++) { if (reg < 8 || reg == 16 || reg == 17) { opcodes.Undefined(Reg::Arm64Core(reg)); } else { opcodes.SameValue(Reg::Arm64Core(reg)); } } // fp registers. for (int reg = 0; reg < 32; reg++) { if (reg < 8 || reg >= 16) { opcodes.Undefined(Reg::Arm64Fp(reg)); } else { opcodes.SameValue(Reg::Arm64Fp(reg)); } } auto return_reg = Reg::Arm64Core(30); // R30(LR). WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } case kMips: case kMips64: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::MipsCore(29), 0); // R29(SP). // core registers. for (int reg = 1; reg < 26; reg++) { if (reg < 16 || reg == 24 || reg == 25) { // AT, V*, A*, T*. opcodes.Undefined(Reg::MipsCore(reg)); } else { opcodes.SameValue(Reg::MipsCore(reg)); } } // fp registers. for (int reg = 0; reg < 32; reg++) { if (reg < 24) { opcodes.Undefined(Reg::Mips64Fp(reg)); } else { opcodes.SameValue(Reg::Mips64Fp(reg)); } } auto return_reg = Reg::MipsCore(31); // R31(RA). WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } case kX86: { // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296 constexpr bool generate_opcodes_for_x86_fp = false; dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::X86Core(4), 4); // R4(ESP). opcodes.Offset(Reg::X86Core(8), -4); // R8(EIP). // core registers. for (int reg = 0; reg < 8; reg++) { if (reg <= 3) { opcodes.Undefined(Reg::X86Core(reg)); } else if (reg == 4) { // Stack pointer. } else { opcodes.SameValue(Reg::X86Core(reg)); } } // fp registers. if (generate_opcodes_for_x86_fp) { for (int reg = 0; reg < 8; reg++) { opcodes.Undefined(Reg::X86Fp(reg)); } } auto return_reg = Reg::X86Core(8); // R8(EIP). WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } case kX86_64: { dwarf::DebugFrameOpCodeWriter<> opcodes; opcodes.DefCFA(Reg::X86_64Core(4), 8); // R4(RSP). opcodes.Offset(Reg::X86_64Core(16), -8); // R16(RIP). // core registers. for (int reg = 0; reg < 16; reg++) { if (reg == 4) { // Stack pointer. } else if (reg < 12 && reg != 3 && reg != 5) { // except EBX and EBP. opcodes.Undefined(Reg::X86_64Core(reg)); } else { opcodes.SameValue(Reg::X86_64Core(reg)); } } // fp registers. for (int reg = 0; reg < 16; reg++) { if (reg < 12) { opcodes.Undefined(Reg::X86_64Fp(reg)); } else { opcodes.SameValue(Reg::X86_64Fp(reg)); } } auto return_reg = Reg::X86_64Core(16); // R16(RIP). WriteCIE(is64bit, return_reg, opcodes, format, buffer); return; } case kNone: break; } LOG(FATAL) << "Cannot write CIE frame for ISA " << isa; UNREACHABLE(); } template void WriteCFISection(ElfBuilder* builder, const ArrayRef& method_infos, dwarf::CFIFormat format, bool write_oat_patches) { CHECK(format == dwarf::DW_DEBUG_FRAME_FORMAT || format == dwarf::DW_EH_FRAME_FORMAT); typedef typename ElfTypes::Addr Elf_Addr; // The methods can be written in any order. // Let's therefore sort them in the lexicographical order of the opcodes. // This has no effect on its own. However, if the final .debug_frame section is // compressed it reduces the size since similar opcodes sequences are grouped. std::vector sorted_method_infos; sorted_method_infos.reserve(method_infos.size()); for (size_t i = 0; i < method_infos.size(); i++) { if (!method_infos[i].cfi.empty() && !method_infos[i].deduped) { sorted_method_infos.push_back(&method_infos[i]); } } if (sorted_method_infos.empty()) { return; } std::stable_sort( sorted_method_infos.begin(), sorted_method_infos.end(), [](const MethodDebugInfo* lhs, const MethodDebugInfo* rhs) { ArrayRef l = lhs->cfi; ArrayRef r = rhs->cfi; return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end()); }); std::vector binary_search_table; std::vector patch_locations; if (format == dwarf::DW_EH_FRAME_FORMAT) { binary_search_table.reserve(2 * sorted_method_infos.size()); } else { patch_locations.reserve(sorted_method_infos.size()); } // Write .eh_frame/.debug_frame section. auto* cfi_section = (format == dwarf::DW_DEBUG_FRAME_FORMAT ? builder->GetDebugFrame() : builder->GetEhFrame()); { cfi_section->Start(); const bool is64bit = Is64BitInstructionSet(builder->GetIsa()); const Elf_Addr cfi_address = cfi_section->GetAddress(); const Elf_Addr cie_address = cfi_address; Elf_Addr buffer_address = cfi_address; std::vector buffer; // Small temporary buffer. WriteCIE(builder->GetIsa(), format, &buffer); cfi_section->WriteFully(buffer.data(), buffer.size()); buffer_address += buffer.size(); buffer.clear(); for (const MethodDebugInfo* mi : sorted_method_infos) { DCHECK(!mi->deduped); DCHECK(!mi->cfi.empty()); const Elf_Addr code_address = mi->code_address + (mi->is_code_address_text_relative ? builder->GetText()->GetAddress() : 0); if (format == dwarf::DW_EH_FRAME_FORMAT) { binary_search_table.push_back(dchecked_integral_cast(code_address)); binary_search_table.push_back(dchecked_integral_cast(buffer_address)); } WriteFDE(is64bit, cfi_address, cie_address, code_address, mi->code_size, mi->cfi, format, buffer_address, &buffer, &patch_locations); cfi_section->WriteFully(buffer.data(), buffer.size()); buffer_address += buffer.size(); buffer.clear(); } cfi_section->End(); } if (format == dwarf::DW_EH_FRAME_FORMAT) { auto* header_section = builder->GetEhFrameHdr(); header_section->Start(); uint32_t header_address = dchecked_integral_cast(header_section->GetAddress()); // Write .eh_frame_hdr section. std::vector buffer; dwarf::Writer<> header(&buffer); header.PushUint8(1); // Version. // Encoding of .eh_frame pointer - libunwind does not honor datarel here, // so we have to use pcrel which means relative to the pointer's location. header.PushUint8(dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4); // Encoding of binary search table size. header.PushUint8(dwarf::DW_EH_PE_udata4); // Encoding of binary search table addresses - libunwind supports only this // specific combination, which means relative to the start of .eh_frame_hdr. header.PushUint8(dwarf::DW_EH_PE_datarel | dwarf::DW_EH_PE_sdata4); // .eh_frame pointer header.PushInt32(cfi_section->GetAddress() - (header_address + 4u)); // Binary search table size (number of entries). header.PushUint32(dchecked_integral_cast(binary_search_table.size()/2)); header_section->WriteFully(buffer.data(), buffer.size()); // Binary search table. for (size_t i = 0; i < binary_search_table.size(); i++) { // Make addresses section-relative since we know the header address now. binary_search_table[i] -= header_address; } header_section->WriteFully(binary_search_table.data(), binary_search_table.size()); header_section->End(); } else { if (write_oat_patches) { builder->WritePatches(".debug_frame.oat_patches", ArrayRef(patch_locations)); } } } } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_DEBUG_FRAME_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_debug_info_writer.h000066400000000000000000000646321336577252300255740ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ #include #include #include #include "art_field-inl.h" #include "debug/dwarf/debug_abbrev_writer.h" #include "debug/dwarf/debug_info_entry_writer.h" #include "debug/elf_compilation_unit.h" #include "debug/elf_debug_loc_writer.h" #include "debug/method_debug_info.h" #include "dex_file-inl.h" #include "dex_file.h" #include "elf_builder.h" #include "linear_alloc.h" #include "mirror/array.h" #include "mirror/class-inl.h" #include "mirror/class.h" namespace art { namespace debug { typedef std::vector LocalInfos; static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) { static_cast(ctx)->push_back(entry); } static std::vector GetParamNames(const MethodDebugInfo* mi) { std::vector names; if (mi->code_item != nullptr) { DCHECK(mi->dex_file != nullptr); const uint8_t* stream = mi->dex_file->GetDebugInfoStream(mi->code_item); if (stream != nullptr) { DecodeUnsignedLeb128(&stream); // line. uint32_t parameters_size = DecodeUnsignedLeb128(&stream); for (uint32_t i = 0; i < parameters_size; ++i) { uint32_t id = DecodeUnsignedLeb128P1(&stream); names.push_back(mi->dex_file->StringDataByIdx(dex::StringIndex(id))); } } } return names; } // Helper class to write .debug_info and its supporting sections. template class ElfDebugInfoWriter { using Elf_Addr = typename ElfTypes::Addr; public: explicit ElfDebugInfoWriter(ElfBuilder* builder) : builder_(builder), debug_abbrev_(&debug_abbrev_buffer_) { } void Start() { builder_->GetDebugInfo()->Start(); } void End(bool write_oat_patches) { builder_->GetDebugInfo()->End(); if (write_oat_patches) { builder_->WritePatches(".debug_info.oat_patches", ArrayRef(debug_info_patches_)); } builder_->WriteSection(".debug_abbrev", &debug_abbrev_buffer_); if (!debug_loc_.empty()) { builder_->WriteSection(".debug_loc", &debug_loc_); } if (!debug_ranges_.empty()) { builder_->WriteSection(".debug_ranges", &debug_ranges_); } } private: ElfBuilder* builder_; std::vector debug_info_patches_; std::vector debug_abbrev_buffer_; dwarf::DebugAbbrevWriter<> debug_abbrev_; std::vector debug_loc_; std::vector debug_ranges_; std::unordered_set defined_dex_classes_; // For CHECKs only. template friend class ElfCompilationUnitWriter; }; // Helper class to write one compilation unit. // It holds helper methods and temporary state. template class ElfCompilationUnitWriter { using Elf_Addr = typename ElfTypes::Addr; public: explicit ElfCompilationUnitWriter(ElfDebugInfoWriter* owner) : owner_(owner), info_(Is64BitInstructionSet(owner_->builder_->GetIsa()), &owner->debug_abbrev_) { } void Write(const ElfCompilationUnit& compilation_unit) { CHECK(!compilation_unit.methods.empty()); const Elf_Addr base_address = compilation_unit.is_code_address_text_relative ? owner_->builder_->GetText()->GetAddress() : 0; const uint64_t cu_size = compilation_unit.code_end - compilation_unit.code_address; using namespace dwarf; // NOLINT. For easy access to DWARF constants. info_.StartTag(DW_TAG_compile_unit); info_.WriteString(DW_AT_producer, "Android dex2oat"); info_.WriteData1(DW_AT_language, DW_LANG_Java); info_.WriteString(DW_AT_comp_dir, "$JAVA_SRC_ROOT"); info_.WriteAddr(DW_AT_low_pc, base_address + compilation_unit.code_address); info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast(cu_size)); info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset); const char* last_dex_class_desc = nullptr; for (auto mi : compilation_unit.methods) { DCHECK(mi->dex_file != nullptr); const DexFile* dex = mi->dex_file; const DexFile::CodeItem* dex_code = mi->code_item; const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index); const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method); const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto); const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method); const bool is_static = (mi->access_flags & kAccStatic) != 0; // Enclose the method in correct class definition. if (last_dex_class_desc != dex_class_desc) { if (last_dex_class_desc != nullptr) { EndClassTag(); } // Write reference tag for the class we are about to declare. size_t reference_tag_offset = info_.StartTag(DW_TAG_reference_type); type_cache_.emplace(std::string(dex_class_desc), reference_tag_offset); size_t type_attrib_offset = info_.size(); info_.WriteRef4(DW_AT_type, 0); info_.EndTag(); // Declare the class that owns this method. size_t class_offset = StartClassTag(dex_class_desc); info_.UpdateUint32(type_attrib_offset, class_offset); info_.WriteFlagPresent(DW_AT_declaration); // Check that each class is defined only once. bool unique = owner_->defined_dex_classes_.insert(dex_class_desc).second; CHECK(unique) << "Redefinition of " << dex_class_desc; last_dex_class_desc = dex_class_desc; } int start_depth = info_.Depth(); info_.StartTag(DW_TAG_subprogram); WriteName(dex->GetMethodName(dex_method)); info_.WriteAddr(DW_AT_low_pc, base_address + mi->code_address); info_.WriteUdata(DW_AT_high_pc, mi->code_size); std::vector expr_buffer; Expression expr(&expr_buffer); expr.WriteOpCallFrameCfa(); info_.WriteExprLoc(DW_AT_frame_base, expr); WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto)); // Decode dex register locations for all stack maps. // It might be expensive, so do it just once and reuse the result. std::vector dex_reg_maps; if (mi->code_info != nullptr) { const CodeInfo code_info(mi->code_info); CodeInfoEncoding encoding = code_info.ExtractEncoding(); for (size_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); ++s) { const StackMap& stack_map = code_info.GetStackMapAt(s, encoding); dex_reg_maps.push_back(code_info.GetDexRegisterMapOf( stack_map, encoding, dex_code->registers_size_)); } } // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not // guarantee order or uniqueness so it is safer to iterate over them manually. // DecodeDebugLocalInfo might not also be available if there is no debug info. std::vector param_names = GetParamNames(mi); uint32_t arg_reg = 0; if (!is_static) { info_.StartTag(DW_TAG_formal_parameter); WriteName("this"); info_.WriteFlagPresent(DW_AT_artificial); WriteLazyType(dex_class_desc); if (dex_code != nullptr) { // Write the stack location of the parameter. const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; const bool is64bitValue = false; WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); } arg_reg++; info_.EndTag(); } if (dex_params != nullptr) { for (uint32_t i = 0; i < dex_params->Size(); ++i) { info_.StartTag(DW_TAG_formal_parameter); // Parameter names may not be always available. if (i < param_names.size()) { WriteName(param_names[i]); } // Write the type. const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_); WriteLazyType(type_desc); const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J'; if (dex_code != nullptr) { // Write the stack location of the parameter. const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); } arg_reg += is64bitValue ? 2 : 1; info_.EndTag(); } if (dex_code != nullptr) { DCHECK_EQ(arg_reg, dex_code->ins_size_); } } // Write local variables. LocalInfos local_infos; if (dex->DecodeDebugLocalInfo(dex_code, is_static, mi->dex_method_index, LocalInfoCallback, &local_infos)) { for (const DexFile::LocalInfo& var : local_infos) { if (var.reg_ < dex_code->registers_size_ - dex_code->ins_size_) { info_.StartTag(DW_TAG_variable); WriteName(var.name_); WriteLazyType(var.descriptor_); bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J'; WriteRegLocation(mi, dex_reg_maps, var.reg_, is64bitValue, compilation_unit.code_address, var.start_address_, var.end_address_); info_.EndTag(); } } } info_.EndTag(); CHECK_EQ(info_.Depth(), start_depth); // Balanced start/end. } if (last_dex_class_desc != nullptr) { EndClassTag(); } FinishLazyTypes(); CloseNamespacesAboveDepth(0); info_.EndTag(); // DW_TAG_compile_unit CHECK_EQ(info_.Depth(), 0); std::vector buffer; buffer.reserve(info_.data()->size() + KB); const size_t offset = owner_->builder_->GetDebugInfo()->GetSize(); // All compilation units share single table which is at the start of .debug_abbrev. const size_t debug_abbrev_offset = 0; WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_); owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size()); } void Write(const ArrayRef& types) REQUIRES_SHARED(Locks::mutator_lock_) { using namespace dwarf; // NOLINT. For easy access to DWARF constants. info_.StartTag(DW_TAG_compile_unit); info_.WriteString(DW_AT_producer, "Android dex2oat"); info_.WriteData1(DW_AT_language, DW_LANG_Java); // Base class references to be patched at the end. std::map base_class_references; // Already written declarations or definitions. std::map class_declarations; std::vector expr_buffer; for (mirror::Class* type : types) { if (type->IsPrimitive()) { // For primitive types the definition and the declaration is the same. if (type->GetPrimitiveType() != Primitive::kPrimVoid) { WriteTypeDeclaration(type->GetDescriptor(nullptr)); } } else if (type->IsArrayClass()) { mirror::Class* element_type = type->GetComponentType(); uint32_t component_size = type->GetComponentSize(); uint32_t data_offset = mirror::Array::DataOffset(component_size).Uint32Value(); uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value(); CloseNamespacesAboveDepth(0); // Declare in root namespace. info_.StartTag(DW_TAG_array_type); std::string descriptor_string; WriteLazyType(element_type->GetDescriptor(&descriptor_string)); WriteLinkageName(type); info_.WriteUdata(DW_AT_data_member_location, data_offset); info_.StartTag(DW_TAG_subrange_type); Expression count_expr(&expr_buffer); count_expr.WriteOpPushObjectAddress(); count_expr.WriteOpPlusUconst(length_offset); count_expr.WriteOpDerefSize(4); // Array length is always 32-bit wide. info_.WriteExprLoc(DW_AT_count, count_expr); info_.EndTag(); // DW_TAG_subrange_type. info_.EndTag(); // DW_TAG_array_type. } else if (type->IsInterface()) { // Skip. Variables cannot have an interface as a dynamic type. // We do not expose the interface information to the debugger in any way. } else { std::string descriptor_string; const char* desc = type->GetDescriptor(&descriptor_string); size_t class_offset = StartClassTag(desc); class_declarations.emplace(type, class_offset); if (!type->IsVariableSize()) { info_.WriteUdata(DW_AT_byte_size, type->GetObjectSize()); } WriteLinkageName(type); if (type->IsObjectClass()) { // Generate artificial member which is used to get the dynamic type of variable. // The run-time value of this field will correspond to linkage name of some type. // We need to do it only once in j.l.Object since all other types inherit it. info_.StartTag(DW_TAG_member); WriteName(".dynamic_type"); WriteLazyType(sizeof(uintptr_t) == 8 ? "J" : "I"); info_.WriteFlagPresent(DW_AT_artificial); // Create DWARF expression to get the value of the methods_ field. Expression expr(&expr_buffer); // The address of the object has been implicitly pushed on the stack. // Dereference the klass_ field of Object (32-bit; possibly poisoned). DCHECK_EQ(type->ClassOffset().Uint32Value(), 0u); DCHECK_EQ(sizeof(mirror::HeapReference), 4u); expr.WriteOpDerefSize(4); if (kPoisonHeapReferences) { expr.WriteOpNeg(); // DWARF stack is pointer sized. Ensure that the high bits are clear. expr.WriteOpConstu(0xFFFFFFFF); expr.WriteOpAnd(); } // Add offset to the methods_ field. expr.WriteOpPlusUconst(mirror::Class::MethodsOffset().Uint32Value()); // Top of stack holds the location of the field now. info_.WriteExprLoc(DW_AT_data_member_location, expr); info_.EndTag(); // DW_TAG_member. } // Base class. mirror::Class* base_class = type->GetSuperClass(); if (base_class != nullptr) { info_.StartTag(DW_TAG_inheritance); base_class_references.emplace(info_.size(), base_class); info_.WriteRef4(DW_AT_type, 0); info_.WriteUdata(DW_AT_data_member_location, 0); info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public); info_.EndTag(); // DW_TAG_inheritance. } // Member variables. for (uint32_t i = 0, count = type->NumInstanceFields(); i < count; ++i) { ArtField* field = type->GetInstanceField(i); info_.StartTag(DW_TAG_member); WriteName(field->GetName()); WriteLazyType(field->GetTypeDescriptor()); info_.WriteUdata(DW_AT_data_member_location, field->GetOffset().Uint32Value()); uint32_t access_flags = field->GetAccessFlags(); if (access_flags & kAccPublic) { info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public); } else if (access_flags & kAccProtected) { info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_protected); } else if (access_flags & kAccPrivate) { info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private); } info_.EndTag(); // DW_TAG_member. } if (type->IsStringClass()) { // Emit debug info about an artifical class member for java.lang.String which represents // the first element of the data stored in a string instance. Consumers of the debug // info will be able to read the content of java.lang.String based on the count (real // field) and based on the location of this data member. info_.StartTag(DW_TAG_member); WriteName("value"); // We don't support fields with C like array types so we just say its type is java char. WriteLazyType("C"); // char. info_.WriteUdata(DW_AT_data_member_location, mirror::String::ValueOffset().Uint32Value()); info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private); info_.EndTag(); // DW_TAG_member. } EndClassTag(); } } // Write base class declarations. for (const auto& base_class_reference : base_class_references) { size_t reference_offset = base_class_reference.first; mirror::Class* base_class = base_class_reference.second; const auto it = class_declarations.find(base_class); if (it != class_declarations.end()) { info_.UpdateUint32(reference_offset, it->second); } else { // Declare base class. We can not use the standard WriteLazyType // since we want to avoid the DW_TAG_reference_tag wrapping. std::string tmp_storage; const char* base_class_desc = base_class->GetDescriptor(&tmp_storage); size_t base_class_declaration_offset = StartClassTag(base_class_desc); info_.WriteFlagPresent(DW_AT_declaration); WriteLinkageName(base_class); EndClassTag(); class_declarations.emplace(base_class, base_class_declaration_offset); info_.UpdateUint32(reference_offset, base_class_declaration_offset); } } FinishLazyTypes(); CloseNamespacesAboveDepth(0); info_.EndTag(); // DW_TAG_compile_unit. CHECK_EQ(info_.Depth(), 0); std::vector buffer; buffer.reserve(info_.data()->size() + KB); const size_t offset = owner_->builder_->GetDebugInfo()->GetSize(); // All compilation units share single table which is at the start of .debug_abbrev. const size_t debug_abbrev_offset = 0; WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_); owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size()); } // Write table into .debug_loc which describes location of dex register. // The dex register might be valid only at some points and it might // move between machine registers and stack. void WriteRegLocation(const MethodDebugInfo* method_info, const std::vector& dex_register_maps, uint16_t vreg, bool is64bitValue, uint64_t compilation_unit_code_address, uint32_t dex_pc_low = 0, uint32_t dex_pc_high = 0xFFFFFFFF) { WriteDebugLocEntry(method_info, dex_register_maps, vreg, is64bitValue, compilation_unit_code_address, dex_pc_low, dex_pc_high, owner_->builder_->GetIsa(), &info_, &owner_->debug_loc_, &owner_->debug_ranges_); } // Linkage name uniquely identifies type. // It is used to determine the dynamic type of objects. // We use the methods_ field of class since it is unique and it is not moved by the GC. void WriteLinkageName(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) { auto* methods_ptr = type->GetMethodsPtr(); if (methods_ptr == nullptr) { // Some types might have no methods. Allocate empty array instead. LinearAlloc* allocator = Runtime::Current()->GetLinearAlloc(); void* storage = allocator->Alloc(Thread::Current(), sizeof(LengthPrefixedArray)); methods_ptr = new (storage) LengthPrefixedArray(0); type->SetMethodsPtr(methods_ptr, 0, 0); DCHECK(type->GetMethodsPtr() != nullptr); } char name[32]; snprintf(name, sizeof(name), "0x%" PRIXPTR, reinterpret_cast(methods_ptr)); info_.WriteString(dwarf::DW_AT_linkage_name, name); } // Some types are difficult to define as we go since they need // to be enclosed in the right set of namespaces. Therefore we // just define all types lazily at the end of compilation unit. void WriteLazyType(const char* type_descriptor) { if (type_descriptor != nullptr && type_descriptor[0] != 'V') { lazy_types_.emplace(std::string(type_descriptor), info_.size()); info_.WriteRef4(dwarf::DW_AT_type, 0); } } void FinishLazyTypes() { for (const auto& lazy_type : lazy_types_) { info_.UpdateUint32(lazy_type.second, WriteTypeDeclaration(lazy_type.first)); } lazy_types_.clear(); } private: void WriteName(const char* name) { if (name != nullptr) { info_.WriteString(dwarf::DW_AT_name, name); } } // Convert dex type descriptor to DWARF. // Returns offset in the compilation unit. size_t WriteTypeDeclaration(const std::string& desc) { using namespace dwarf; // NOLINT. For easy access to DWARF constants. DCHECK(!desc.empty()); const auto it = type_cache_.find(desc); if (it != type_cache_.end()) { return it->second; } size_t offset; if (desc[0] == 'L') { // Class type. For example: Lpackage/name; size_t class_offset = StartClassTag(desc.c_str()); info_.WriteFlagPresent(DW_AT_declaration); EndClassTag(); // Reference to the class type. offset = info_.StartTag(DW_TAG_reference_type); info_.WriteRef(DW_AT_type, class_offset); info_.EndTag(); } else if (desc[0] == '[') { // Array type. size_t element_type = WriteTypeDeclaration(desc.substr(1)); CloseNamespacesAboveDepth(0); // Declare in root namespace. size_t array_type = info_.StartTag(DW_TAG_array_type); info_.WriteFlagPresent(DW_AT_declaration); info_.WriteRef(DW_AT_type, element_type); info_.EndTag(); offset = info_.StartTag(DW_TAG_reference_type); info_.WriteRef4(DW_AT_type, array_type); info_.EndTag(); } else { // Primitive types. DCHECK_EQ(desc.size(), 1u); const char* name; uint32_t encoding; uint32_t byte_size; switch (desc[0]) { case 'B': name = "byte"; encoding = DW_ATE_signed; byte_size = 1; break; case 'C': name = "char"; encoding = DW_ATE_UTF; byte_size = 2; break; case 'D': name = "double"; encoding = DW_ATE_float; byte_size = 8; break; case 'F': name = "float"; encoding = DW_ATE_float; byte_size = 4; break; case 'I': name = "int"; encoding = DW_ATE_signed; byte_size = 4; break; case 'J': name = "long"; encoding = DW_ATE_signed; byte_size = 8; break; case 'S': name = "short"; encoding = DW_ATE_signed; byte_size = 2; break; case 'Z': name = "boolean"; encoding = DW_ATE_boolean; byte_size = 1; break; case 'V': LOG(FATAL) << "Void type should not be encoded"; UNREACHABLE(); default: LOG(FATAL) << "Unknown dex type descriptor: \"" << desc << "\""; UNREACHABLE(); } CloseNamespacesAboveDepth(0); // Declare in root namespace. offset = info_.StartTag(DW_TAG_base_type); WriteName(name); info_.WriteData1(DW_AT_encoding, encoding); info_.WriteData1(DW_AT_byte_size, byte_size); info_.EndTag(); } type_cache_.emplace(desc, offset); return offset; } // Start DW_TAG_class_type tag nested in DW_TAG_namespace tags. // Returns offset of the class tag in the compilation unit. size_t StartClassTag(const char* desc) { std::string name = SetNamespaceForClass(desc); size_t offset = info_.StartTag(dwarf::DW_TAG_class_type); WriteName(name.c_str()); return offset; } void EndClassTag() { info_.EndTag(); } // Set the current namespace nesting to one required by the given class. // Returns the class name with namespaces, 'L', and ';' stripped. std::string SetNamespaceForClass(const char* desc) { DCHECK(desc != nullptr && desc[0] == 'L'); desc++; // Skip the initial 'L'. size_t depth = 0; for (const char* end; (end = strchr(desc, '/')) != nullptr; desc = end + 1, ++depth) { // Check whether the name at this depth is already what we need. if (depth < current_namespace_.size()) { const std::string& name = current_namespace_[depth]; if (name.compare(0, name.size(), desc, end - desc) == 0) { continue; } } // Otherwise we need to open a new namespace tag at this depth. CloseNamespacesAboveDepth(depth); info_.StartTag(dwarf::DW_TAG_namespace); std::string name(desc, end - desc); WriteName(name.c_str()); current_namespace_.push_back(std::move(name)); } CloseNamespacesAboveDepth(depth); return std::string(desc, strchr(desc, ';') - desc); } // Close namespace tags to reach the given nesting depth. void CloseNamespacesAboveDepth(size_t depth) { DCHECK_LE(depth, current_namespace_.size()); while (current_namespace_.size() > depth) { info_.EndTag(); current_namespace_.pop_back(); } } // For access to the ELF sections. ElfDebugInfoWriter* owner_; // Temporary buffer to create and store the entries. dwarf::DebugInfoEntryWriter<> info_; // Cache of already translated type descriptors. std::map type_cache_; // type_desc -> definition_offset. // 32-bit references which need to be resolved to a type later. // Given type may be used multiple times. Therefore we need a multimap. std::multimap lazy_types_; // type_desc -> patch_offset. // The current set of open namespace tags which are active and not closed yet. std::vector current_namespace_; }; } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_DEBUG_INFO_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_debug_line_writer.h000066400000000000000000000266101336577252300255620ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ #include #include #include "compiled_method.h" #include "debug/dwarf/debug_line_opcode_writer.h" #include "debug/dwarf/headers.h" #include "debug/elf_compilation_unit.h" #include "dex_file-inl.h" #include "elf_builder.h" #include "stack_map.h" namespace art { namespace debug { typedef std::vector PositionInfos; static bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) { static_cast(ctx)->push_back(entry); return false; } template class ElfDebugLineWriter { using Elf_Addr = typename ElfTypes::Addr; public: explicit ElfDebugLineWriter(ElfBuilder* builder) : builder_(builder) { } void Start() { builder_->GetDebugLine()->Start(); } // Write line table for given set of methods. // Returns the number of bytes written. size_t WriteCompilationUnit(ElfCompilationUnit& compilation_unit) { const InstructionSet isa = builder_->GetIsa(); const bool is64bit = Is64BitInstructionSet(isa); const Elf_Addr base_address = compilation_unit.is_code_address_text_relative ? builder_->GetText()->GetAddress() : 0; compilation_unit.debug_line_offset = builder_->GetDebugLine()->GetSize(); std::vector files; std::unordered_map files_map; std::vector directories; std::unordered_map directories_map; int code_factor_bits_ = 0; int dwarf_isa = -1; switch (isa) { case kArm: // arm actually means thumb2. case kThumb2: code_factor_bits_ = 1; // 16-bit instuctions dwarf_isa = 1; // DW_ISA_ARM_thumb. break; case kArm64: case kMips: case kMips64: code_factor_bits_ = 2; // 32-bit instructions break; case kNone: case kX86: case kX86_64: break; } std::unordered_set seen_addresses(compilation_unit.methods.size()); dwarf::DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits_); for (const MethodDebugInfo* mi : compilation_unit.methods) { // Ignore function if we have already generated line table for the same address. // It would confuse the debugger and the DWARF specification forbids it. // We allow the line table for method to be replicated in different compilation unit. // This ensures that each compilation unit contains line table for all its methods. if (!seen_addresses.insert(mi->code_address).second) { continue; } uint32_t prologue_end = std::numeric_limits::max(); std::vector pc2dex_map; if (mi->code_info != nullptr) { // Use stack maps to create mapping table from pc to dex. const CodeInfo code_info(mi->code_info); const CodeInfoEncoding encoding = code_info.ExtractEncoding(); pc2dex_map.reserve(code_info.GetNumberOfStackMaps(encoding)); for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) { StackMap stack_map = code_info.GetStackMapAt(s, encoding); DCHECK(stack_map.IsValid()); const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa); const int32_t dex = stack_map.GetDexPc(encoding.stack_map.encoding); pc2dex_map.push_back({pc, dex}); if (stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) { // Guess that the first map with local variables is the end of prologue. prologue_end = std::min(prologue_end, pc); } } std::sort(pc2dex_map.begin(), pc2dex_map.end()); } if (pc2dex_map.empty()) { continue; } // Compensate for compiler's off-by-one-instruction error. // // The compiler generates stackmap with PC *after* the branch instruction // (because this is the PC which is easier to obtain when unwinding). // // However, the debugger is more clever and it will ask us for line-number // mapping at the location of the branch instruction (since the following // instruction could belong to other line, this is the correct thing to do). // // So we really want to just decrement the PC by one instruction so that the // branch instruction is covered as well. However, we do not know the size // of the previous instruction, and we can not subtract just a fixed amount // (the debugger would trust us that the PC is valid; it might try to set // breakpoint there at some point, and setting breakpoint in mid-instruction // would make the process crash in spectacular way). // // Therefore, we say that the PC which the compiler gave us for the stackmap // is the end of its associated address range, and we use the PC from the // previous stack map as the start of the range. This ensures that the PC is // valid and that the branch instruction is covered. // // This ensures we have correct line number mapping at call sites (which is // important for backtraces), but there is nothing we can do for non-call // sites (so stepping through optimized code in debugger is not possible). // // We do not adjust the stackmaps if the code was compiled as debuggable. // In that case, the stackmaps should accurately cover all instructions. if (!mi->is_native_debuggable) { for (size_t i = pc2dex_map.size() - 1; i > 0; --i) { pc2dex_map[i].from_ = pc2dex_map[i - 1].from_; } pc2dex_map[0].from_ = 0; } Elf_Addr method_address = base_address + mi->code_address; PositionInfos dex2line_map; DCHECK(mi->dex_file != nullptr); const DexFile* dex = mi->dex_file; if (!dex->DecodeDebugPositionInfo(mi->code_item, PositionInfoCallback, &dex2line_map)) { continue; } if (dex2line_map.empty()) { continue; } opcodes.SetAddress(method_address); if (dwarf_isa != -1) { opcodes.SetISA(dwarf_isa); } // Get and deduplicate directory and filename. int file_index = 0; // 0 - primary source file of the compilation. auto& dex_class_def = dex->GetClassDef(mi->class_def_index); const char* source_file = dex->GetSourceFile(dex_class_def); if (source_file != nullptr) { std::string file_name(source_file); size_t file_name_slash = file_name.find_last_of('/'); std::string class_name(dex->GetClassDescriptor(dex_class_def)); size_t class_name_slash = class_name.find_last_of('/'); std::string full_path(file_name); // Guess directory from package name. int directory_index = 0; // 0 - current directory of the compilation. if (file_name_slash == std::string::npos && // Just filename. class_name.front() == 'L' && // Type descriptor for a class. class_name_slash != std::string::npos) { // Has package name. std::string package_name = class_name.substr(1, class_name_slash - 1); auto it = directories_map.find(package_name); if (it == directories_map.end()) { directory_index = 1 + directories.size(); directories_map.emplace(package_name, directory_index); directories.push_back(package_name); } else { directory_index = it->second; } full_path = package_name + "/" + file_name; } // Add file entry. auto it2 = files_map.find(full_path); if (it2 == files_map.end()) { file_index = 1 + files.size(); files_map.emplace(full_path, file_index); files.push_back(dwarf::FileEntry { file_name, directory_index, 0, // Modification time - NA. 0, // File size - NA. }); } else { file_index = it2->second; } } opcodes.SetFile(file_index); // Generate mapping opcodes from PC to Java lines. if (file_index != 0) { // If the method was not compiled as native-debuggable, we still generate all available // lines, but we try to prevent the debugger from stepping and setting breakpoints since // the information is too inaccurate for that (breakpoints would be set after the calls). const bool default_is_stmt = mi->is_native_debuggable; bool first = true; for (SrcMapElem pc2dex : pc2dex_map) { uint32_t pc = pc2dex.from_; int dex_pc = pc2dex.to_; // Find mapping with address with is greater than our dex pc; then go back one step. auto dex2line = std::upper_bound( dex2line_map.begin(), dex2line_map.end(), dex_pc, [](uint32_t address, const DexFile::PositionInfo& entry) { return address < entry.address_; }); // Look for first valid mapping after the prologue. if (dex2line != dex2line_map.begin() && pc >= prologue_end) { int line = (--dex2line)->line_; if (first) { first = false; if (pc > 0) { // Assume that any preceding code is prologue. int first_line = dex2line_map.front().line_; // Prologue is not a sensible place for a breakpoint. opcodes.SetIsStmt(false); opcodes.AddRow(method_address, first_line); opcodes.SetPrologueEnd(); } opcodes.SetIsStmt(default_is_stmt); opcodes.AddRow(method_address + pc, line); } else if (line != opcodes.CurrentLine()) { opcodes.SetIsStmt(default_is_stmt); opcodes.AddRow(method_address + pc, line); } } } } else { // line 0 - instruction cannot be attributed to any source line. opcodes.AddRow(method_address, 0); } opcodes.AdvancePC(method_address + mi->code_size); opcodes.EndSequence(); } std::vector buffer; buffer.reserve(opcodes.data()->size() + KB); size_t offset = builder_->GetDebugLine()->GetSize(); WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches_); builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size()); return buffer.size(); } void End(bool write_oat_patches) { builder_->GetDebugLine()->End(); if (write_oat_patches) { builder_->WritePatches(".debug_line.oat_patches", ArrayRef(debug_line_patches_)); } } private: ElfBuilder* builder_; std::vector debug_line_patches_; }; } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_debug_loc_writer.h000066400000000000000000000320621336577252300254060ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ #include #include #include "arch/instruction_set.h" #include "compiled_method.h" #include "debug/dwarf/debug_info_entry_writer.h" #include "debug/dwarf/register.h" #include "debug/method_debug_info.h" #include "stack_map.h" namespace art { namespace debug { using Reg = dwarf::Reg; static Reg GetDwarfCoreReg(InstructionSet isa, int machine_reg) { switch (isa) { case kArm: case kThumb2: return Reg::ArmCore(machine_reg); case kArm64: return Reg::Arm64Core(machine_reg); case kX86: return Reg::X86Core(machine_reg); case kX86_64: return Reg::X86_64Core(machine_reg); case kMips: return Reg::MipsCore(machine_reg); case kMips64: return Reg::Mips64Core(machine_reg); case kNone: LOG(FATAL) << "No instruction set"; } UNREACHABLE(); } static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) { switch (isa) { case kArm: case kThumb2: return Reg::ArmFp(machine_reg); case kArm64: return Reg::Arm64Fp(machine_reg); case kX86: return Reg::X86Fp(machine_reg); case kX86_64: return Reg::X86_64Fp(machine_reg); case kMips: return Reg::MipsFp(machine_reg); case kMips64: return Reg::Mips64Fp(machine_reg); case kNone: LOG(FATAL) << "No instruction set"; } UNREACHABLE(); } struct VariableLocation { uint32_t low_pc; // Relative to compilation unit. uint32_t high_pc; // Relative to compilation unit. DexRegisterLocation reg_lo; // May be None if the location is unknown. DexRegisterLocation reg_hi; // Most significant bits of 64-bit value. }; // Get the location of given dex register (e.g. stack or machine register). // Note that the location might be different based on the current pc. // The result will cover all ranges where the variable is in scope. // PCs corresponding to stackmap with dex register map are accurate, // all other PCs are best-effort only. static std::vector GetVariableLocations( const MethodDebugInfo* method_info, const std::vector& dex_register_maps, uint16_t vreg, bool is64bitValue, uint64_t compilation_unit_code_address, uint32_t dex_pc_low, uint32_t dex_pc_high, InstructionSet isa) { std::vector variable_locations; // Get stack maps sorted by pc (they might not be sorted internally). // TODO(dsrbecky) Remove this once stackmaps get sorted by pc. const CodeInfo code_info(method_info->code_info); const CodeInfoEncoding encoding = code_info.ExtractEncoding(); std::map stack_maps; // low_pc -> stack_map_index. for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) { StackMap stack_map = code_info.GetStackMapAt(s, encoding); DCHECK(stack_map.IsValid()); if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) { // The compiler creates stackmaps without register maps at the start of // basic blocks in order to keep instruction-accurate line number mapping. // However, we never stop at those (breakpoint locations always have map). // Therefore, for the purpose of local variables, we ignore them. // The main reason for this is to save space by avoiding undefined gaps. continue; } const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map.encoding, isa); DCHECK_LE(pc_offset, method_info->code_size); DCHECK_LE(compilation_unit_code_address, method_info->code_address); const uint32_t low_pc = dchecked_integral_cast( method_info->code_address + pc_offset - compilation_unit_code_address); stack_maps.emplace(low_pc, s); } // Create entries for the requested register based on stack map data. for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) { const uint32_t low_pc = it->first; const uint32_t stack_map_index = it->second; const StackMap& stack_map = code_info.GetStackMapAt(stack_map_index, encoding); auto next_it = it; next_it++; const uint32_t high_pc = next_it != stack_maps.end() ? next_it->first : method_info->code_address + method_info->code_size - compilation_unit_code_address; DCHECK_LE(low_pc, high_pc); if (low_pc == high_pc) { continue; // Ignore if the address range is empty. } // Check that the stack map is in the requested range. uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding); if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) { // The variable is not in scope at this PC. Therefore omit the entry. // Note that this is different to None() entry which means in scope, but unknown location. continue; } // Find the location of the dex register. DexRegisterLocation reg_lo = DexRegisterLocation::None(); DexRegisterLocation reg_hi = DexRegisterLocation::None(); DCHECK_LT(stack_map_index, dex_register_maps.size()); DexRegisterMap dex_register_map = dex_register_maps[stack_map_index]; DCHECK(dex_register_map.IsValid()); reg_lo = dex_register_map.GetDexRegisterLocation( vreg, method_info->code_item->registers_size_, code_info, encoding); if (is64bitValue) { reg_hi = dex_register_map.GetDexRegisterLocation( vreg + 1, method_info->code_item->registers_size_, code_info, encoding); } // Add location entry for this address range. if (!variable_locations.empty() && variable_locations.back().reg_lo == reg_lo && variable_locations.back().reg_hi == reg_hi && variable_locations.back().high_pc == low_pc) { // Merge with the previous entry (extend its range). variable_locations.back().high_pc = high_pc; } else { variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi}); } } return variable_locations; } // Write table into .debug_loc which describes location of dex register. // The dex register might be valid only at some points and it might // move between machine registers and stack. static void WriteDebugLocEntry(const MethodDebugInfo* method_info, const std::vector& dex_register_maps, uint16_t vreg, bool is64bitValue, uint64_t compilation_unit_code_address, uint32_t dex_pc_low, uint32_t dex_pc_high, InstructionSet isa, dwarf::DebugInfoEntryWriter<>* debug_info, std::vector* debug_loc_buffer, std::vector* debug_ranges_buffer) { using Kind = DexRegisterLocation::Kind; if (method_info->code_info == nullptr || dex_register_maps.empty()) { return; } std::vector variable_locations = GetVariableLocations( method_info, dex_register_maps, vreg, is64bitValue, compilation_unit_code_address, dex_pc_low, dex_pc_high, isa); // Write .debug_loc entries. dwarf::Writer<> debug_loc(debug_loc_buffer); const size_t debug_loc_offset = debug_loc.size(); const bool is64bit = Is64BitInstructionSet(isa); std::vector expr_buffer; for (const VariableLocation& variable_location : variable_locations) { // Translate dex register location to DWARF expression. // Note that 64-bit value might be split to two distinct locations. // (for example, two 32-bit machine registers, or even stack and register) dwarf::Expression expr(&expr_buffer); DexRegisterLocation reg_lo = variable_location.reg_lo; DexRegisterLocation reg_hi = variable_location.reg_hi; for (int piece = 0; piece < (is64bitValue ? 2 : 1); piece++) { DexRegisterLocation reg_loc = (piece == 0 ? reg_lo : reg_hi); const Kind kind = reg_loc.GetKind(); const int32_t value = reg_loc.GetValue(); if (kind == Kind::kInStack) { // The stack offset is relative to SP. Make it relative to CFA. expr.WriteOpFbreg(value - method_info->frame_size_in_bytes); if (piece == 0 && reg_hi.GetKind() == Kind::kInStack && reg_hi.GetValue() == value + 4) { break; // the high word is correctly implied by the low word. } } else if (kind == Kind::kInRegister) { expr.WriteOpReg(GetDwarfCoreReg(isa, value).num()); if (piece == 0 && reg_hi.GetKind() == Kind::kInRegisterHigh && reg_hi.GetValue() == value) { break; // the high word is correctly implied by the low word. } } else if (kind == Kind::kInFpuRegister) { if ((isa == kArm || isa == kThumb2) && piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegister && reg_hi.GetValue() == value + 1 && value % 2 == 0) { // Translate S register pair to D register (e.g. S4+S5 to D2). expr.WriteOpReg(Reg::ArmDp(value / 2).num()); break; } expr.WriteOpReg(GetDwarfFpReg(isa, value).num()); if (piece == 0 && reg_hi.GetKind() == Kind::kInFpuRegisterHigh && reg_hi.GetValue() == reg_lo.GetValue()) { break; // the high word is correctly implied by the low word. } } else if (kind == Kind::kConstant) { expr.WriteOpConsts(value); expr.WriteOpStackValue(); } else if (kind == Kind::kNone) { break; } else { // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind(). // kInRegisterHigh and kInFpuRegisterHigh should be handled by // the special cases above and they should not occur alone. LOG(ERROR) << "Unexpected register location kind: " << kind; break; } if (is64bitValue) { // Write the marker which is needed by split 64-bit values. // This code is skipped by the special cases. expr.WriteOpPiece(4); } } if (expr.size() > 0) { if (is64bit) { debug_loc.PushUint64(variable_location.low_pc); debug_loc.PushUint64(variable_location.high_pc); } else { debug_loc.PushUint32(variable_location.low_pc); debug_loc.PushUint32(variable_location.high_pc); } // Write the expression. debug_loc.PushUint16(expr.size()); debug_loc.PushData(expr.data()); } else { // Do not generate .debug_loc if the location is not known. } } // Write end-of-list entry. if (is64bit) { debug_loc.PushUint64(0); debug_loc.PushUint64(0); } else { debug_loc.PushUint32(0); debug_loc.PushUint32(0); } // Write .debug_ranges entries. // This includes ranges where the variable is in scope but the location is not known. dwarf::Writer<> debug_ranges(debug_ranges_buffer); size_t debug_ranges_offset = debug_ranges.size(); for (size_t i = 0; i < variable_locations.size(); i++) { uint32_t low_pc = variable_locations[i].low_pc; uint32_t high_pc = variable_locations[i].high_pc; while (i + 1 < variable_locations.size() && variable_locations[i+1].low_pc == high_pc) { // Merge address range with the next entry. high_pc = variable_locations[++i].high_pc; } if (is64bit) { debug_ranges.PushUint64(low_pc); debug_ranges.PushUint64(high_pc); } else { debug_ranges.PushUint32(low_pc); debug_ranges.PushUint32(high_pc); } } // Write end-of-list entry. if (is64bit) { debug_ranges.PushUint64(0); debug_ranges.PushUint64(0); } else { debug_ranges.PushUint32(0); debug_ranges.PushUint32(0); } // Simple de-duplication - check whether this entry is same as the last one (or tail of it). size_t debug_ranges_entry_size = debug_ranges.size() - debug_ranges_offset; if (debug_ranges_offset >= debug_ranges_entry_size) { size_t previous_offset = debug_ranges_offset - debug_ranges_entry_size; if (memcmp(debug_ranges_buffer->data() + previous_offset, debug_ranges_buffer->data() + debug_ranges_offset, debug_ranges_entry_size) == 0) { // Remove what we have just written and use the last entry instead. debug_ranges_buffer->resize(debug_ranges_offset); debug_ranges_offset = previous_offset; } } // Write attributes to .debug_info. debug_info->WriteSecOffset(dwarf::DW_AT_location, debug_loc_offset); debug_info->WriteSecOffset(dwarf::DW_AT_start_scope, debug_ranges_offset); } } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_debug_writer.cc000066400000000000000000000204361336577252300247110ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "elf_debug_writer.h" #include #include "base/array_ref.h" #include "debug/dwarf/dwarf_constants.h" #include "debug/elf_compilation_unit.h" #include "debug/elf_debug_frame_writer.h" #include "debug/elf_debug_info_writer.h" #include "debug/elf_debug_line_writer.h" #include "debug/elf_debug_loc_writer.h" #include "debug/elf_gnu_debugdata_writer.h" #include "debug/elf_symtab_writer.h" #include "debug/method_debug_info.h" #include "elf_builder.h" #include "linker/vector_output_stream.h" #include "oat.h" namespace art { namespace debug { template void WriteDebugInfo(ElfBuilder* builder, const ArrayRef& method_infos, dwarf::CFIFormat cfi_format, bool write_oat_patches) { // Write .strtab and .symtab. WriteDebugSymbols(builder, method_infos, true /* with_signature */); // Write .debug_frame. WriteCFISection(builder, method_infos, cfi_format, write_oat_patches); // Group the methods into compilation units based on source file. std::vector compilation_units; const char* last_source_file = nullptr; for (const MethodDebugInfo& mi : method_infos) { if (mi.dex_file != nullptr) { auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index); const char* source_file = mi.dex_file->GetSourceFile(dex_class_def); if (compilation_units.empty() || source_file != last_source_file) { compilation_units.push_back(ElfCompilationUnit()); } ElfCompilationUnit& cu = compilation_units.back(); cu.methods.push_back(&mi); // All methods must have the same addressing mode otherwise the min/max below does not work. DCHECK_EQ(cu.methods.front()->is_code_address_text_relative, mi.is_code_address_text_relative); cu.is_code_address_text_relative = mi.is_code_address_text_relative; cu.code_address = std::min(cu.code_address, mi.code_address); cu.code_end = std::max(cu.code_end, mi.code_address + mi.code_size); last_source_file = source_file; } } // Write .debug_line section. if (!compilation_units.empty()) { ElfDebugLineWriter line_writer(builder); line_writer.Start(); for (auto& compilation_unit : compilation_units) { line_writer.WriteCompilationUnit(compilation_unit); } line_writer.End(write_oat_patches); } // Write .debug_info section. if (!compilation_units.empty()) { ElfDebugInfoWriter info_writer(builder); info_writer.Start(); for (const auto& compilation_unit : compilation_units) { ElfCompilationUnitWriter cu_writer(&info_writer); cu_writer.Write(compilation_unit); } info_writer.End(write_oat_patches); } } std::vector MakeMiniDebugInfo( InstructionSet isa, const InstructionSetFeatures* features, size_t rodata_size, size_t text_size, const ArrayRef& method_infos) { if (Is64BitInstructionSet(isa)) { return MakeMiniDebugInfoInternal(isa, features, rodata_size, text_size, method_infos); } else { return MakeMiniDebugInfoInternal(isa, features, rodata_size, text_size, method_infos); } } template static std::vector WriteDebugElfFileForMethodsInternal( InstructionSet isa, const InstructionSetFeatures* features, const ArrayRef& method_infos) { std::vector buffer; buffer.reserve(KB); VectorOutputStream out("Debug ELF file", &buffer); std::unique_ptr> builder(new ElfBuilder(isa, features, &out)); // No program headers since the ELF file is not linked and has no allocated sections. builder->Start(false /* write_program_headers */); WriteDebugInfo(builder.get(), method_infos, dwarf::DW_DEBUG_FRAME_FORMAT, false /* write_oat_patches */); builder->End(); CHECK(builder->Good()); return buffer; } std::vector WriteDebugElfFileForMethods( InstructionSet isa, const InstructionSetFeatures* features, const ArrayRef& method_infos) { if (Is64BitInstructionSet(isa)) { return WriteDebugElfFileForMethodsInternal(isa, features, method_infos); } else { return WriteDebugElfFileForMethodsInternal(isa, features, method_infos); } } template static std::vector WriteDebugElfFileForClassesInternal( InstructionSet isa, const InstructionSetFeatures* features, const ArrayRef& types) REQUIRES_SHARED(Locks::mutator_lock_) { std::vector buffer; buffer.reserve(KB); VectorOutputStream out("Debug ELF file", &buffer); std::unique_ptr> builder(new ElfBuilder(isa, features, &out)); // No program headers since the ELF file is not linked and has no allocated sections. builder->Start(false /* write_program_headers */); ElfDebugInfoWriter info_writer(builder.get()); info_writer.Start(); ElfCompilationUnitWriter cu_writer(&info_writer); cu_writer.Write(types); info_writer.End(false /* write_oat_patches */); builder->End(); CHECK(builder->Good()); return buffer; } std::vector WriteDebugElfFileForClasses(InstructionSet isa, const InstructionSetFeatures* features, const ArrayRef& types) { if (Is64BitInstructionSet(isa)) { return WriteDebugElfFileForClassesInternal(isa, features, types); } else { return WriteDebugElfFileForClassesInternal(isa, features, types); } } std::vector MakeTrampolineInfos(const OatHeader& header) { std::map trampolines = { { "interpreterToInterpreterBridge", header.GetInterpreterToInterpreterBridgeOffset() }, { "interpreterToCompiledCodeBridge", header.GetInterpreterToCompiledCodeBridgeOffset() }, { "jniDlsymLookup", header.GetJniDlsymLookupOffset() }, { "quickGenericJniTrampoline", header.GetQuickGenericJniTrampolineOffset() }, { "quickImtConflictTrampoline", header.GetQuickImtConflictTrampolineOffset() }, { "quickResolutionTrampoline", header.GetQuickResolutionTrampolineOffset() }, { "quickToInterpreterBridge", header.GetQuickToInterpreterBridgeOffset() }, }; std::vector result; for (const auto& it : trampolines) { if (it.second != 0) { MethodDebugInfo info = MethodDebugInfo(); info.trampoline_name = it.first; info.isa = header.GetInstructionSet(); info.is_code_address_text_relative = true; info.code_address = it.second - header.GetExecutableOffset(); info.code_size = 0; // The symbol lasts until the next symbol. result.push_back(std::move(info)); } } return result; } // Explicit instantiations template void WriteDebugInfo( ElfBuilder* builder, const ArrayRef& method_infos, dwarf::CFIFormat cfi_format, bool write_oat_patches); template void WriteDebugInfo( ElfBuilder* builder, const ArrayRef& method_infos, dwarf::CFIFormat cfi_format, bool write_oat_patches); } // namespace debug } // namespace art android-platform-art-8.1.0+r23/compiler/debug/elf_debug_writer.h000066400000000000000000000037171336577252300245560ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ #include #include "base/array_ref.h" #include "base/macros.h" #include "base/mutex.h" #include "debug/dwarf/dwarf_constants.h" #include "elf_builder.h" namespace art { class OatHeader; namespace mirror { class Class; } // namespace mirror namespace debug { struct MethodDebugInfo; template void WriteDebugInfo( ElfBuilder* builder, const ArrayRef& method_infos, dwarf::CFIFormat cfi_format, bool write_oat_patches); std::vector MakeMiniDebugInfo( InstructionSet isa, const InstructionSetFeatures* features, size_t rodata_section_size, size_t text_section_size, const ArrayRef& method_infos); std::vector WriteDebugElfFileForMethods( InstructionSet isa, const InstructionSetFeatures* features, const ArrayRef& method_infos); std::vector WriteDebugElfFileForClasses( InstructionSet isa, const InstructionSetFeatures* features, const ArrayRef& types) REQUIRES_SHARED(Locks::mutator_lock_); std::vector MakeTrampolineInfos(const OatHeader& oat_header); } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_gnu_debugdata_writer.h000066400000000000000000000076431336577252300262630ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_ #include #include "arch/instruction_set.h" #include "elf_builder.h" #include "linker/vector_output_stream.h" // liblzma. #include "7zCrc.h" #include "XzCrc64.h" #include "XzEnc.h" namespace art { namespace debug { static void XzCompress(const std::vector* src, std::vector* dst) { // Configure the compression library. CrcGenerateTable(); Crc64GenerateTable(); CLzma2EncProps lzma2Props; Lzma2EncProps_Init(&lzma2Props); lzma2Props.lzmaProps.level = 1; // Fast compression. Lzma2EncProps_Normalize(&lzma2Props); CXzProps props; XzProps_Init(&props); props.lzma2Props = &lzma2Props; // Implement the required interface for communication (written in C so no virtual methods). struct XzCallbacks : public ISeqInStream, public ISeqOutStream, public ICompressProgress { static SRes ReadImpl(void* p, void* buf, size_t* size) { auto* ctx = static_cast(reinterpret_cast(p)); *size = std::min(*size, ctx->src_->size() - ctx->src_pos_); memcpy(buf, ctx->src_->data() + ctx->src_pos_, *size); ctx->src_pos_ += *size; return SZ_OK; } static size_t WriteImpl(void* p, const void* buf, size_t size) { auto* ctx = static_cast(reinterpret_cast(p)); const uint8_t* buffer = reinterpret_cast(buf); ctx->dst_->insert(ctx->dst_->end(), buffer, buffer + size); return size; } static SRes ProgressImpl(void* , UInt64, UInt64) { return SZ_OK; } size_t src_pos_; const std::vector* src_; std::vector* dst_; }; XzCallbacks callbacks; callbacks.Read = XzCallbacks::ReadImpl; callbacks.Write = XzCallbacks::WriteImpl; callbacks.Progress = XzCallbacks::ProgressImpl; callbacks.src_pos_ = 0; callbacks.src_ = src; callbacks.dst_ = dst; // Compress. SRes res = Xz_Encode(&callbacks, &callbacks, &props, &callbacks); CHECK_EQ(res, SZ_OK); } template static std::vector MakeMiniDebugInfoInternal( InstructionSet isa, const InstructionSetFeatures* features, size_t rodata_section_size, size_t text_section_size, const ArrayRef& method_infos) { std::vector buffer; buffer.reserve(KB); VectorOutputStream out("Mini-debug-info ELF file", &buffer); std::unique_ptr> builder(new ElfBuilder(isa, features, &out)); builder->Start(); // Mirror .rodata and .text as NOBITS sections. // It is needed to detected relocations after compression. builder->GetRoData()->WriteNoBitsSection(rodata_section_size); builder->GetText()->WriteNoBitsSection(text_section_size); WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */); WriteCFISection(builder.get(), method_infos, dwarf::DW_DEBUG_FRAME_FORMAT, false /* write_oat_paches */); builder->End(); CHECK(builder->Good()); std::vector compressed_buffer; compressed_buffer.reserve(buffer.size() / 4); XzCompress(&buffer, &compressed_buffer); return compressed_buffer; } } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_GNU_DEBUGDATA_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/elf_symtab_writer.h000066400000000000000000000103311336577252300247550ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ #include #include "debug/method_debug_info.h" #include "elf_builder.h" #include "utils.h" namespace art { namespace debug { // The ARM specification defines three special mapping symbols // $a, $t and $d which mark ARM, Thumb and data ranges respectively. // These symbols can be used by tools, for example, to pretty // print instructions correctly. Objdump will use them if they // exist, but it will still work well without them. // However, these extra symbols take space, so let's just generate // one symbol which marks the whole .text section as code. constexpr bool kGenerateSingleArmMappingSymbol = true; template static void WriteDebugSymbols(ElfBuilder* builder, const ArrayRef& method_infos, bool with_signature) { uint64_t mapping_symbol_address = std::numeric_limits::max(); auto* strtab = builder->GetStrTab(); auto* symtab = builder->GetSymTab(); if (method_infos.empty()) { return; } // Find all addresses which contain deduped methods. // The first instance of method is not marked deduped_, but the rest is. std::unordered_set deduped_addresses; for (const MethodDebugInfo& info : method_infos) { if (info.deduped) { deduped_addresses.insert(info.code_address); } } strtab->Start(); strtab->Write(""); // strtab should start with empty string. std::string last_name; size_t last_name_offset = 0; for (const MethodDebugInfo& info : method_infos) { if (info.deduped) { continue; // Add symbol only for the first instance. } size_t name_offset; if (info.trampoline_name != nullptr) { name_offset = strtab->Write(info.trampoline_name); } else { DCHECK(info.dex_file != nullptr); std::string name = info.dex_file->PrettyMethod(info.dex_method_index, with_signature); if (deduped_addresses.find(info.code_address) != deduped_addresses.end()) { name += " [DEDUPED]"; } // If we write method names without signature, we might see the same name multiple times. name_offset = (name == last_name ? last_name_offset : strtab->Write(name)); last_name = std::move(name); last_name_offset = name_offset; } const auto* text = info.is_code_address_text_relative ? builder->GetText() : nullptr; uint64_t address = info.code_address + (text != nullptr ? text->GetAddress() : 0); // Add in code delta, e.g., thumb bit 0 for Thumb2 code. address += CompiledMethod::CodeDelta(info.isa); symtab->Add(name_offset, text, address, info.code_size, STB_GLOBAL, STT_FUNC); // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2 // instructions, so that disassembler tools can correctly disassemble. // Note that even if we generate just a single mapping symbol, ARM's Streamline // requires it to match function symbol. Just address 0 does not work. if (info.isa == kThumb2) { if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) { symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE); mapping_symbol_address = address; } } } strtab->End(); // Symbols are buffered and written after names (because they are smaller). // We could also do two passes in this function to avoid the buffering. symtab->Start(); symtab->Write(); symtab->End(); } } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_ android-platform-art-8.1.0+r23/compiler/debug/method_debug_info.h000066400000000000000000000027101336577252300246770ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ #define ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ #include "compiled_method.h" #include "dex_file.h" namespace art { namespace debug { struct MethodDebugInfo { const char* trampoline_name; const DexFile* dex_file; // Native methods (trampolines) do not reference dex file. size_t class_def_index; uint32_t dex_method_index; uint32_t access_flags; const DexFile::CodeItem* code_item; InstructionSet isa; bool deduped; bool is_native_debuggable; bool is_optimized; bool is_code_address_text_relative; // Is the address offset from start of .text section? uint64_t code_address; uint32_t code_size; uint32_t frame_size_in_bytes; const void* code_info; ArrayRef cfi; }; } // namespace debug } // namespace art #endif // ART_COMPILER_DEBUG_METHOD_DEBUG_INFO_H_ android-platform-art-8.1.0+r23/compiler/dex/000077500000000000000000000000001336577252300205575ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/dex/dex_to_dex_compiler.cc000066400000000000000000000367321336577252300251150ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dex_to_dex_compiler.h" #include "android-base/stringprintf.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/logging.h" #include "base/mutex.h" #include "bytecode_utils.h" #include "compiled_method.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" #include "mirror/dex_cache.h" #include "quicken_info.h" #include "thread-current-inl.h" namespace art { namespace optimizer { using android::base::StringPrintf; // Controls quickening activation. const bool kEnableQuickening = true; // Control check-cast elision. const bool kEnableCheckCastEllision = true; struct QuickenedInfo { QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {} uint32_t dex_pc; uint16_t dex_member_index; }; class DexCompiler { public: DexCompiler(art::CompilerDriver& compiler, const DexCompilationUnit& unit, DexToDexCompilationLevel dex_to_dex_compilation_level) : driver_(compiler), unit_(unit), dex_to_dex_compilation_level_(dex_to_dex_compilation_level) {} ~DexCompiler() {} void Compile(); const std::vector& GetQuickenedInfo() const { return quickened_info_; } private: const DexFile& GetDexFile() const { return *unit_.GetDexFile(); } // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where // a barrier is required. void CompileReturnVoid(Instruction* inst, uint32_t dex_pc); // Compiles a CHECK-CAST into 2 NOP instructions if it is known to be safe. In // this case, returns the second NOP instruction pointer. Otherwise, returns // the given "inst". Instruction* CompileCheckCast(Instruction* inst, uint32_t dex_pc); // Compiles a field access into a quick field access. // The field index is replaced by an offset within an Object where we can read // from / write to this field. Therefore, this does not involve any resolution // at runtime. // Since the field index is encoded with 16 bits, we can replace it only if the // field offset can be encoded with 16 bits too. void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc, Instruction::Code new_opcode, bool is_put); // Compiles a virtual method invocation into a quick virtual method invocation. // The method index is replaced by the vtable index where the corresponding // Executable can be found. Therefore, this does not involve any resolution // at runtime. // Since the method index is encoded with 16 bits, we can replace it only if the // vtable index can be encoded with 16 bits too. void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, Instruction::Code new_opcode, bool is_range); CompilerDriver& driver_; const DexCompilationUnit& unit_; const DexToDexCompilationLevel dex_to_dex_compilation_level_; // Filled by the compiler when quickening, in order to encode that information // in the .oat file. The runtime will use that information to get to the original // opcodes. std::vector quickened_info_; DISALLOW_COPY_AND_ASSIGN(DexCompiler); }; void DexCompiler::Compile() { DCHECK_EQ(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kOptimize); for (CodeItemIterator it(*unit_.GetCodeItem()); !it.Done(); it.Advance()) { Instruction* inst = const_cast(&it.CurrentInstruction()); const uint32_t dex_pc = it.CurrentDexPc(); switch (inst->Opcode()) { case Instruction::RETURN_VOID: CompileReturnVoid(inst, dex_pc); break; case Instruction::CHECK_CAST: inst = CompileCheckCast(inst, dex_pc); if (inst->Opcode() == Instruction::NOP) { // We turned the CHECK_CAST into two NOPs, avoid visiting the second NOP twice since this // would add 2 quickening info entries. it.Advance(); } break; case Instruction::IGET: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_QUICK, false); break; case Instruction::IGET_WIDE: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE_QUICK, false); break; case Instruction::IGET_OBJECT: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT_QUICK, false); break; case Instruction::IGET_BOOLEAN: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BOOLEAN_QUICK, false); break; case Instruction::IGET_BYTE: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BYTE_QUICK, false); break; case Instruction::IGET_CHAR: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_CHAR_QUICK, false); break; case Instruction::IGET_SHORT: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_SHORT_QUICK, false); break; case Instruction::IPUT: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true); break; case Instruction::IPUT_BOOLEAN: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN_QUICK, true); break; case Instruction::IPUT_BYTE: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE_QUICK, true); break; case Instruction::IPUT_CHAR: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR_QUICK, true); break; case Instruction::IPUT_SHORT: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT_QUICK, true); break; case Instruction::IPUT_WIDE: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_WIDE_QUICK, true); break; case Instruction::IPUT_OBJECT: CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_OBJECT_QUICK, true); break; case Instruction::INVOKE_VIRTUAL: CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_QUICK, false); break; case Instruction::INVOKE_VIRTUAL_RANGE: CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_RANGE_QUICK, true); break; case Instruction::NOP: // We need to differentiate between check cast inserted NOP and normal NOP, put an invalid // index in the map for normal nops. This should be rare in real code. quickened_info_.push_back(QuickenedInfo(dex_pc, DexFile::kDexNoIndex16)); break; default: DCHECK(!inst->IsQuickened()); // Nothing to do. break; } } } void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) { DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID); if (unit_.IsConstructor()) { // Are we compiling a non clinit constructor which needs a barrier ? if (!unit_.IsStatic() && driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(), unit_.GetClassDefIndex())) { return; } } // Replace RETURN_VOID by RETURN_VOID_NO_BARRIER. VLOG(compiler) << "Replacing " << Instruction::Name(inst->Opcode()) << " by " << Instruction::Name(Instruction::RETURN_VOID_NO_BARRIER) << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " << GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true); inst->SetOpcode(Instruction::RETURN_VOID_NO_BARRIER); } Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) { if (!kEnableCheckCastEllision) { return inst; } if (!driver_.IsSafeCast(&unit_, dex_pc)) { return inst; } // Ok, this is a safe cast. Since the "check-cast" instruction size is 2 code // units and a "nop" instruction size is 1 code unit, we need to replace it by // 2 consecutive NOP instructions. // Because the caller loops over instructions by calling Instruction::Next onto // the current instruction, we need to return the 2nd NOP instruction. Indeed, // its next instruction is the former check-cast's next instruction. VLOG(compiler) << "Removing " << Instruction::Name(inst->Opcode()) << " by replacing it with 2 NOPs at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " << GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true); quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegA_21c())); quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegB_21c())); // We are modifying 4 consecutive bytes. inst->SetOpcode(Instruction::NOP); inst->SetVRegA_10x(0u); // keep compliant with verifier. // Get to next instruction which is the second half of check-cast and replace // it by a NOP. inst = const_cast(inst->Next()); inst->SetOpcode(Instruction::NOP); inst->SetVRegA_10x(0u); // keep compliant with verifier. return inst; } void DexCompiler::CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc, Instruction::Code new_opcode, bool is_put) { if (!kEnableQuickening) { return; } uint32_t field_idx = inst->VRegC_22c(); MemberOffset field_offset(0u); bool is_volatile; bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put, &field_offset, &is_volatile); if (fast_path && !is_volatile && IsUint<16>(field_offset.Int32Value())) { VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode()) << " to " << Instruction::Name(new_opcode) << " by replacing field index " << field_idx << " by field offset " << field_offset.Int32Value() << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " << GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true); // We are modifying 4 consecutive bytes. inst->SetOpcode(new_opcode); // Replace field index by field offset. inst->SetVRegC_22c(static_cast(field_offset.Int32Value())); quickened_info_.push_back(QuickenedInfo(dex_pc, field_idx)); } } void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, Instruction::Code new_opcode, bool is_range) { if (!kEnableQuickening) { return; } uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c(); ScopedObjectAccess soa(Thread::Current()); ClassLinker* class_linker = unit_.GetClassLinker(); ArtMethod* resolved_method = class_linker->ResolveMethod( GetDexFile(), method_idx, unit_.GetDexCache(), unit_.GetClassLoader(), /* referrer */ nullptr, kVirtual); if (UNLIKELY(resolved_method == nullptr)) { // Clean up any exception left by type resolution. soa.Self()->ClearException(); return; } uint32_t vtable_idx = resolved_method->GetMethodIndex(); DCHECK(IsUint<16>(vtable_idx)); VLOG(compiler) << "Quickening " << Instruction::Name(inst->Opcode()) << "(" << GetDexFile().PrettyMethod(method_idx, true) << ")" << " to " << Instruction::Name(new_opcode) << " by replacing method index " << method_idx << " by vtable index " << vtable_idx << " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method " << GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true); // We are modifying 4 consecutive bytes. inst->SetOpcode(new_opcode); // Replace method index by vtable index. if (is_range) { inst->SetVRegB_3rc(static_cast(vtable_idx)); } else { inst->SetVRegB_35c(static_cast(vtable_idx)); } quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx)); } CompiledMethod* ArtCompileDEX( CompilerDriver* driver, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type ATTRIBUTE_UNUSED, uint16_t class_def_idx, uint32_t method_idx, Handle class_loader, const DexFile& dex_file, DexToDexCompilationLevel dex_to_dex_compilation_level) { DCHECK(driver != nullptr); if (dex_to_dex_compilation_level != DexToDexCompilationLevel::kDontDexToDexCompile) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); art::DexCompilationUnit unit( class_loader, class_linker, dex_file, code_item, class_def_idx, method_idx, access_flags, driver->GetVerifiedMethod(&dex_file, method_idx), hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file))); art::optimizer::DexCompiler dex_compiler(*driver, unit, dex_to_dex_compilation_level); dex_compiler.Compile(); if (dex_compiler.GetQuickenedInfo().empty()) { // No need to create a CompiledMethod if there are no quickened opcodes. return nullptr; } // Create a `CompiledMethod`, with the quickened information in the vmap table. if (kIsDebugBuild) { // Double check that the counts line up with the size of the quicken info. size_t quicken_count = 0; for (CodeItemIterator it(*code_item); !it.Done(); it.Advance()) { if (QuickenInfoTable::NeedsIndexForInstruction(&it.CurrentInstruction())) { ++quicken_count; } } CHECK_EQ(quicken_count, dex_compiler.GetQuickenedInfo().size()); } std::vector quicken_data; for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) { // Dex pc is not serialized, only used for checking the instructions. Since we access the // array based on the index of the quickened instruction, the indexes must line up perfectly. // The reader side uses the NeedsIndexForInstruction function too. const Instruction* inst = Instruction::At(code_item->insns_ + info.dex_pc); CHECK(QuickenInfoTable::NeedsIndexForInstruction(inst)) << inst->Opcode(); // Add the index. quicken_data.push_back(static_cast(info.dex_member_index >> 0)); quicken_data.push_back(static_cast(info.dex_member_index >> 8)); } InstructionSet instruction_set = driver->GetInstructionSet(); if (instruction_set == kThumb2) { // Don't use the thumb2 instruction set to avoid the one off code delta. instruction_set = kArm; } return CompiledMethod::SwapAllocCompiledMethod( driver, instruction_set, ArrayRef(), // no code 0, 0, 0, ArrayRef(), // method_info ArrayRef(quicken_data), // vmap_table ArrayRef(), // cfi data ArrayRef()); } return nullptr; } } // namespace optimizer } // namespace art android-platform-art-8.1.0+r23/compiler/dex/dex_to_dex_compiler.h000066400000000000000000000034541336577252300247520ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ #define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ #include "dex_file.h" #include "handle.h" #include "invoke_type.h" namespace art { class CompiledMethod; class CompilerDriver; namespace mirror { class ClassLoader; } // namespace mirror namespace optimizer { enum class DexToDexCompilationLevel { kDontDexToDexCompile, // Only meaning wrt image time interpretation. kOptimize // Perform peep-hole optimizations. }; std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs); CompiledMethod* ArtCompileDEX(CompilerDriver* driver, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, Handle class_loader, const DexFile& dex_file, DexToDexCompilationLevel dex_to_dex_compilation_level); } // namespace optimizer } // namespace art #endif // ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_ android-platform-art-8.1.0+r23/compiler/dex/dex_to_dex_decompiler_test.cc000066400000000000000000000121221336577252300264500ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dex_to_dex_decompiler.h" #include "class_linker.h" #include "compiler/common_compiler_test.h" #include "compiler/compiled_method.h" #include "compiler/driver/compiler_options.h" #include "compiler/driver/compiler_driver.h" #include "compiler_callbacks.h" #include "dex_file.h" #include "handle_scope-inl.h" #include "verifier/method_verifier-inl.h" #include "mirror/class_loader.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "verifier/method_verifier-inl.h" #include "verifier/verifier_deps.h" namespace art { class DexToDexDecompilerTest : public CommonCompilerTest { public: void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) { TimingLogger timings("CompilerDriverTest::CompileAll", false, false); TimingLogger::ScopedTiming t(__FUNCTION__, &timings); compiler_options_->boot_image_ = false; compiler_options_->SetCompilerFilter(CompilerFilter::kQuicken); // Create the main VerifierDeps, here instead of in the compiler since we want to aggregate // the results for all the dex files, not just the results for the current dex file. Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps( new verifier::VerifierDeps(GetDexFiles(class_loader))); compiler_driver_->SetDexFilesForOatFile(GetDexFiles(class_loader)); compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings); } void RunTest(const char* dex_name) { Thread* self = Thread::Current(); // First load the original dex file. jobject original_class_loader; { ScopedObjectAccess soa(self); original_class_loader = LoadDex(dex_name); } const DexFile* original_dex_file = GetDexFiles(original_class_loader)[0]; // Load the dex file again and make it writable to quicken them. jobject class_loader; const DexFile* updated_dex_file = nullptr; { ScopedObjectAccess soa(self); class_loader = LoadDex(dex_name); updated_dex_file = GetDexFiles(class_loader)[0]; Runtime::Current()->GetClassLinker()->RegisterDexFile( *updated_dex_file, soa.Decode(class_loader).Ptr()); } // The dex files should be identical. int cmp = memcmp(original_dex_file->Begin(), updated_dex_file->Begin(), updated_dex_file->Size()); ASSERT_EQ(0, cmp); updated_dex_file->EnableWrite(); CompileAll(class_loader); // The dex files should be different after quickening. cmp = memcmp(original_dex_file->Begin(), updated_dex_file->Begin(), updated_dex_file->Size()); ASSERT_NE(0, cmp); // Unquicken the dex file. for (uint32_t i = 0; i < updated_dex_file->NumClassDefs(); ++i) { const DexFile::ClassDef& class_def = updated_dex_file->GetClassDef(i); const uint8_t* class_data = updated_dex_file->GetClassData(class_def); if (class_data == nullptr) { continue; } ClassDataItemIterator it(*updated_dex_file, class_data); it.SkipAllFields(); // Unquicken each method. while (it.HasNextDirectMethod()) { uint32_t method_idx = it.GetMemberIndex(); CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod(MethodReference(updated_dex_file, method_idx)); ArrayRef table; if (compiled_method != nullptr) { table = compiled_method->GetVmapTable(); } optimizer::ArtDecompileDEX( *it.GetMethodCodeItem(), table, /* decompile_return_instruction */ true); it.Next(); } while (it.HasNextVirtualMethod()) { uint32_t method_idx = it.GetMemberIndex(); CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod(MethodReference(updated_dex_file, method_idx)); ArrayRef table; if (compiled_method != nullptr) { table = compiled_method->GetVmapTable(); } optimizer::ArtDecompileDEX( *it.GetMethodCodeItem(), table, /* decompile_return_instruction */ true); it.Next(); } DCHECK(!it.HasNext()); } // Make sure after unquickening we go back to the same contents as the original dex file. cmp = memcmp(original_dex_file->Begin(), updated_dex_file->Begin(), updated_dex_file->Size()); ASSERT_EQ(0, cmp); } }; TEST_F(DexToDexDecompilerTest, VerifierDeps) { RunTest("VerifierDeps"); } TEST_F(DexToDexDecompilerTest, DexToDexDecompiler) { RunTest("DexToDexDecompiler"); } } // namespace art android-platform-art-8.1.0+r23/compiler/dex/inline_method_analyser.cc000066400000000000000000000730461336577252300256140ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "inline_method_analyser.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/enums.h" #include "class_linker-inl.h" #include "dex_file-inl.h" #include "dex_instruction.h" #include "dex_instruction-inl.h" #include "dex_instruction_utils.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" /* * NOTE: This code is part of the quick compiler. It lives in the runtime * only to allow the debugger to check whether a method has been inlined. */ namespace art { namespace { // anonymous namespace // Helper class for matching a pattern. class Matcher { public: // Match function type. typedef bool MatchFn(Matcher* matcher); template static bool Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]); // Match and advance. static bool Mark(Matcher* matcher); template static bool Required(Matcher* matcher); template static bool Repeated(Matcher* matcher); // On match, returns to the mark. // Match an individual instruction. template bool Opcode(); bool Const0(); bool IPutOnThis(); private: explicit Matcher(const DexFile::CodeItem* code_item) : code_item_(code_item), instruction_(Instruction::At(code_item->insns_)), pos_(0u), mark_(0u) { } static bool DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size); const DexFile::CodeItem* const code_item_; const Instruction* instruction_; size_t pos_; size_t mark_; }; template bool Matcher::Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]) { return DoMatch(code_item, pattern, size); } bool Matcher::Mark(Matcher* matcher) { matcher->pos_ += 1u; // Advance to the next match function before marking. matcher->mark_ = matcher->pos_; return true; } template bool Matcher::Required(Matcher* matcher) { if (!(matcher->*Fn)()) { return false; } matcher->pos_ += 1u; matcher->instruction_ = matcher->instruction_->Next(); return true; } template bool Matcher::Repeated(Matcher* matcher) { if (!(matcher->*Fn)()) { // Didn't match optional instruction, try the next match function. matcher->pos_ += 1u; return true; } matcher->pos_ = matcher->mark_; matcher->instruction_ = matcher->instruction_->Next(); return true; } template bool Matcher::Opcode() { return instruction_->Opcode() == opcode; } // Match const 0. bool Matcher::Const0() { return IsInstructionDirectConst(instruction_->Opcode()) && (instruction_->Opcode() == Instruction::CONST_WIDE ? instruction_->VRegB_51l() == 0 : instruction_->VRegB() == 0); } bool Matcher::IPutOnThis() { DCHECK_NE(code_item_->ins_size_, 0u); return IsInstructionIPut(instruction_->Opcode()) && instruction_->VRegB_22c() == code_item_->registers_size_ - code_item_->ins_size_; } bool Matcher::DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size) { Matcher matcher(code_item); while (matcher.pos_ != size) { if (!pattern[matcher.pos_](&matcher)) { return false; } } return true; } // Used for a single invoke in a constructor. In that situation, the method verifier makes // sure we invoke a constructor either in the same class or superclass with at least "this". ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_direct) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT); DCHECK_EQ(invoke_direct->VRegC_35c(), method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_); uint32_t method_index = invoke_direct->VRegB_35c(); ArtMethod* target_method = Runtime::Current()->GetClassLinker()->LookupResolvedMethod( method_index, method->GetDexCache(), method->GetClassLoader()); if (kIsDebugBuild && target_method != nullptr) { CHECK(!target_method->IsStatic()); CHECK(target_method->IsConstructor()); CHECK(target_method->GetDeclaringClass() == method->GetDeclaringClass() || target_method->GetDeclaringClass() == method->GetDeclaringClass()->GetSuperClass()); } return target_method; } // Return the forwarded arguments and check that all remaining arguments are zero. // If the check fails, return static_cast(-1). size_t CountForwardedConstructorArguments(const DexFile::CodeItem* code_item, const Instruction* invoke_direct, uint16_t zero_vreg_mask) { DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT); size_t number_of_args = invoke_direct->VRegA_35c(); DCHECK_NE(number_of_args, 0u); uint32_t args[Instruction::kMaxVarArgRegs]; invoke_direct->GetVarArgs(args); uint16_t this_vreg = args[0]; DCHECK_EQ(this_vreg, code_item->registers_size_ - code_item->ins_size_); // Checked by verifier. size_t forwarded = 1u; while (forwarded < number_of_args && args[forwarded] == this_vreg + forwarded && (zero_vreg_mask & (1u << args[forwarded])) == 0) { ++forwarded; } for (size_t i = forwarded; i != number_of_args; ++i) { if ((zero_vreg_mask & (1u << args[i])) == 0) { return static_cast(-1); } } return forwarded; } uint16_t GetZeroVRegMask(const Instruction* const0) { DCHECK(IsInstructionDirectConst(const0->Opcode())); DCHECK((const0->Opcode() == Instruction::CONST_WIDE) ? const0->VRegB_51l() == 0u : const0->VRegB() == 0); uint16_t base_mask = IsInstructionConstWide(const0->Opcode()) ? 3u : 1u; return base_mask << const0->VRegA(); } // We limit the number of IPUTs storing parameters. There can be any number // of IPUTs that store the value 0 as they are useless in a constructor as // the object always starts zero-initialized. We also eliminate all but the // last store to any field as they are not observable; not even if the field // is volatile as no reference to the object can escape from a constructor // with this pattern. static constexpr size_t kMaxConstructorIPuts = 3u; struct ConstructorIPutData { ConstructorIPutData() : field_index(DexFile::kDexNoIndex16), arg(0u) { } uint16_t field_index; uint16_t arg; }; bool RecordConstructorIPut(ArtMethod* method, const Instruction* new_iput, uint16_t this_vreg, uint16_t zero_vreg_mask, /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts]) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(IsInstructionIPut(new_iput->Opcode())); uint32_t field_index = new_iput->VRegC_22c(); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false); if (UNLIKELY(field == nullptr)) { return false; } // Remove previous IPUT to the same field, if any. Different field indexes may refer // to the same field, so we need to compare resolved fields from the dex cache. for (size_t old_pos = 0; old_pos != arraysize(iputs); ++old_pos) { if (iputs[old_pos].field_index == DexFile::kDexNoIndex16) { break; } ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index, method, /* is_static */ false); DCHECK(f != nullptr); if (f == field) { auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos); *back_it = ConstructorIPutData(); break; } } // If the stored value isn't zero, record the IPUT. if ((zero_vreg_mask & (1u << new_iput->VRegA_22c())) == 0u) { size_t new_pos = 0; while (new_pos != arraysize(iputs) && iputs[new_pos].field_index != DexFile::kDexNoIndex16) { ++new_pos; } if (new_pos == arraysize(iputs)) { return false; // Exceeded capacity of the output array. } iputs[new_pos].field_index = field_index; iputs[new_pos].arg = new_iput->VRegA_22c() - this_vreg; } return true; } bool DoAnalyseConstructor(const DexFile::CodeItem* code_item, ArtMethod* method, /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts]) REQUIRES_SHARED(Locks::mutator_lock_) { // On entry we should not have any IPUTs yet. DCHECK_EQ(0, std::count_if( iputs, iputs + arraysize(iputs), [](const ConstructorIPutData& iput_data) { return iput_data.field_index != DexFile::kDexNoIndex16; })); // Limit the maximum number of code units we're willing to match. static constexpr size_t kMaxCodeUnits = 16u; // Limit the number of registers that the constructor may use to 16. // Given that IPUTs must use low 16 registers and we do not match MOVEs, // this is a reasonable limitation. static constexpr size_t kMaxVRegs = 16u; // We try to match a constructor that calls another constructor (either in // superclass or in the same class) with the same parameters, or with some // parameters truncated (allowed only for calls to superclass constructor) // or with extra parameters with value 0 (with any type, including null). // This call can be followed by optional IPUTs on "this" storing either one // of the parameters or 0 and the code must then finish with RETURN_VOID. // The called constructor must be either java.lang.Object.() or it // must also match the same pattern. static Matcher::MatchFn* const kConstructorPattern[] = { &Matcher::Mark, &Matcher::Repeated<&Matcher::Const0>, &Matcher::Required<&Matcher::Opcode>, &Matcher::Mark, &Matcher::Repeated<&Matcher::Const0>, &Matcher::Repeated<&Matcher::IPutOnThis>, &Matcher::Required<&Matcher::Opcode>, }; DCHECK(method != nullptr); DCHECK(!method->IsStatic()); DCHECK(method->IsConstructor()); DCHECK(code_item != nullptr); if (!method->GetDeclaringClass()->IsVerified() || code_item->insns_size_in_code_units_ > kMaxCodeUnits || code_item->registers_size_ > kMaxVRegs || !Matcher::Match(code_item, kConstructorPattern)) { return false; } // Verify the invoke, prevent a few odd cases and collect IPUTs. uint16_t this_vreg = code_item->registers_size_ - code_item->ins_size_; uint16_t zero_vreg_mask = 0u; for (const Instruction* instruction = Instruction::At(code_item->insns_); instruction->Opcode() != Instruction::RETURN_VOID; instruction = instruction->Next()) { if (instruction->Opcode() == Instruction::INVOKE_DIRECT) { ArtMethod* target_method = GetTargetConstructor(method, instruction); if (target_method == nullptr) { return false; } // We allow forwarding constructors only if they pass more arguments // to prevent infinite recursion. if (target_method->GetDeclaringClass() == method->GetDeclaringClass() && instruction->VRegA_35c() <= code_item->ins_size_) { return false; } size_t forwarded = CountForwardedConstructorArguments(code_item, instruction, zero_vreg_mask); if (forwarded == static_cast(-1)) { return false; } if (target_method->GetDeclaringClass()->IsObjectClass()) { DCHECK_EQ(Instruction::At(target_method->GetCodeItem()->insns_)->Opcode(), Instruction::RETURN_VOID); } else { const DexFile::CodeItem* target_code_item = target_method->GetCodeItem(); if (target_code_item == nullptr) { return false; // Native constructor? } if (!DoAnalyseConstructor(target_code_item, target_method, iputs)) { return false; } // Prune IPUTs with zero input. auto kept_end = std::remove_if( iputs, iputs + arraysize(iputs), [forwarded](const ConstructorIPutData& iput_data) { return iput_data.arg >= forwarded; }); std::fill(kept_end, iputs + arraysize(iputs), ConstructorIPutData()); // If we have any IPUTs from the call, check that the target method is in the same // dex file (compare DexCache references), otherwise field_indexes would be bogus. if (iputs[0].field_index != DexFile::kDexNoIndex16 && target_method->GetDexCache() != method->GetDexCache()) { return false; } } } else if (IsInstructionDirectConst(instruction->Opcode())) { zero_vreg_mask |= GetZeroVRegMask(instruction); if ((zero_vreg_mask & (1u << this_vreg)) != 0u) { return false; // Overwriting `this` is unsupported. } } else { DCHECK(IsInstructionIPut(instruction->Opcode())); DCHECK_EQ(instruction->VRegB_22c(), this_vreg); if (!RecordConstructorIPut(method, instruction, this_vreg, zero_vreg_mask, iputs)) { return false; } } } return true; } } // anonymous namespace bool AnalyseConstructor(const DexFile::CodeItem* code_item, ArtMethod* method, InlineMethod* result) REQUIRES_SHARED(Locks::mutator_lock_) { ConstructorIPutData iputs[kMaxConstructorIPuts]; if (!DoAnalyseConstructor(code_item, method, iputs)) { return false; } static_assert(kMaxConstructorIPuts == 3, "Unexpected limit"); // Code below depends on this. DCHECK(iputs[0].field_index != DexFile::kDexNoIndex16 || iputs[1].field_index == DexFile::kDexNoIndex16); DCHECK(iputs[1].field_index != DexFile::kDexNoIndex16 || iputs[2].field_index == DexFile::kDexNoIndex16); #define STORE_IPUT(n) \ do { \ result->d.constructor_data.iput##n##_field_index = iputs[n].field_index; \ result->d.constructor_data.iput##n##_arg = iputs[n].arg; \ } while (false) STORE_IPUT(0); STORE_IPUT(1); STORE_IPUT(2); #undef STORE_IPUT result->opcode = kInlineOpConstructor; result->d.constructor_data.reserved = 0u; return true; } static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type"); static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type"); static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT), "iget_object type"); static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BOOLEAN), "iget_boolean type"); static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_BYTE), "iget_byte type"); static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_CHAR), "iget_char type"); static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_SHORT), "iget_short type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT), "iput type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_WIDE), "iput_wide type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_OBJECT), "iput_object type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BOOLEAN), "iput_boolean type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_BYTE), "iput_byte type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_CHAR), "iput_char type"); static_assert(InlineMethodAnalyser::IsInstructionIPut(Instruction::IPUT_SHORT), "iput_short type"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT), "iget/iput variant"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE), "iget/iput_wide variant"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT), "iget/iput_object variant"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN), "iget/iput_boolean variant"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE), "iget/iput_byte variant"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR), "iget/iput_char variant"); static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant"); bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method, InlineMethod* result) { const DexFile::CodeItem* code_item = method->GetCodeItem(); if (code_item == nullptr) { // Native or abstract. return false; } return AnalyseMethodCode(code_item, MethodReference(method->GetDexFile(), method->GetDexMethodIndex()), method->IsStatic(), method, result); } bool InlineMethodAnalyser::AnalyseMethodCode(const DexFile::CodeItem* code_item, const MethodReference& method_ref, bool is_static, ArtMethod* method, InlineMethod* result) { // We currently support only plain return or 2-instruction methods. DCHECK_NE(code_item->insns_size_in_code_units_, 0u); const Instruction* instruction = Instruction::At(code_item->insns_); Instruction::Code opcode = instruction->Opcode(); switch (opcode) { case Instruction::RETURN_VOID: if (result != nullptr) { result->opcode = kInlineOpNop; result->d.data = 0u; } return true; case Instruction::RETURN: case Instruction::RETURN_OBJECT: case Instruction::RETURN_WIDE: return AnalyseReturnMethod(code_item, result); case Instruction::CONST: case Instruction::CONST_4: case Instruction::CONST_16: case Instruction::CONST_HIGH16: // TODO: Support wide constants (RETURN_WIDE). if (AnalyseConstMethod(code_item, result)) { return true; } FALLTHROUGH_INTENDED; case Instruction::CONST_WIDE: case Instruction::CONST_WIDE_16: case Instruction::CONST_WIDE_32: case Instruction::CONST_WIDE_HIGH16: case Instruction::INVOKE_DIRECT: if (method != nullptr && !method->IsStatic() && method->IsConstructor()) { return AnalyseConstructor(code_item, method, result); } return false; case Instruction::IGET: case Instruction::IGET_OBJECT: case Instruction::IGET_BOOLEAN: case Instruction::IGET_BYTE: case Instruction::IGET_CHAR: case Instruction::IGET_SHORT: case Instruction::IGET_WIDE: // TODO: Add handling for JIT. // case Instruction::IGET_QUICK: // case Instruction::IGET_WIDE_QUICK: // case Instruction::IGET_OBJECT_QUICK: return AnalyseIGetMethod(code_item, method_ref, is_static, method, result); case Instruction::IPUT: case Instruction::IPUT_OBJECT: case Instruction::IPUT_BOOLEAN: case Instruction::IPUT_BYTE: case Instruction::IPUT_CHAR: case Instruction::IPUT_SHORT: case Instruction::IPUT_WIDE: // TODO: Add handling for JIT. // case Instruction::IPUT_QUICK: // case Instruction::IPUT_WIDE_QUICK: // case Instruction::IPUT_OBJECT_QUICK: return AnalyseIPutMethod(code_item, method_ref, is_static, method, result); default: return false; } } bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) { const DexFile::MethodId& method_id = ref.dex_file->GetMethodId(ref.dex_method_index); const char* method_name = ref.dex_file->GetMethodName(method_id); // javac names synthetic accessors "access$nnn", // jack names them "-getN", "-putN", "-wrapN". return strncmp(method_name, "access$", strlen("access$")) == 0 || strncmp(method_name, "-", strlen("-")) == 0; } bool InlineMethodAnalyser::AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result) { const Instruction* return_instruction = Instruction::At(code_item->insns_); Instruction::Code return_opcode = return_instruction->Opcode(); uint32_t reg = return_instruction->VRegA_11x(); uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_; DCHECK_GE(reg, arg_start); DCHECK_LT((return_opcode == Instruction::RETURN_WIDE) ? reg + 1 : reg, code_item->registers_size_); if (result != nullptr) { result->opcode = kInlineOpReturnArg; InlineReturnArgData* data = &result->d.return_data; data->arg = reg - arg_start; data->is_wide = (return_opcode == Instruction::RETURN_WIDE) ? 1u : 0u; data->is_object = (return_opcode == Instruction::RETURN_OBJECT) ? 1u : 0u; data->reserved = 0u; data->reserved2 = 0u; } return true; } bool InlineMethodAnalyser::AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result) { const Instruction* instruction = Instruction::At(code_item->insns_); const Instruction* return_instruction = instruction->Next(); Instruction::Code return_opcode = return_instruction->Opcode(); if (return_opcode != Instruction::RETURN && return_opcode != Instruction::RETURN_OBJECT) { return false; } int32_t return_reg = return_instruction->VRegA_11x(); DCHECK_LT(return_reg, code_item->registers_size_); int32_t const_value = instruction->VRegB(); if (instruction->Opcode() == Instruction::CONST_HIGH16) { const_value <<= 16; } DCHECK_LT(instruction->VRegA(), code_item->registers_size_); if (instruction->VRegA() != return_reg) { return false; // Not returning the value set by const? } if (return_opcode == Instruction::RETURN_OBJECT && const_value != 0) { return false; // Returning non-null reference constant? } if (result != nullptr) { result->opcode = kInlineOpNonWideConst; result->d.data = static_cast(const_value); } return true; } bool InlineMethodAnalyser::AnalyseIGetMethod(const DexFile::CodeItem* code_item, const MethodReference& method_ref, bool is_static, ArtMethod* method, InlineMethod* result) { const Instruction* instruction = Instruction::At(code_item->insns_); Instruction::Code opcode = instruction->Opcode(); DCHECK(IsInstructionIGet(opcode)); const Instruction* return_instruction = instruction->Next(); Instruction::Code return_opcode = return_instruction->Opcode(); if (!(return_opcode == Instruction::RETURN_WIDE && opcode == Instruction::IGET_WIDE) && !(return_opcode == Instruction::RETURN_OBJECT && opcode == Instruction::IGET_OBJECT) && !(return_opcode == Instruction::RETURN && opcode != Instruction::IGET_WIDE && opcode != Instruction::IGET_OBJECT)) { return false; } uint32_t return_reg = return_instruction->VRegA_11x(); DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1 : return_reg, code_item->registers_size_); uint32_t dst_reg = instruction->VRegA_22c(); uint32_t object_reg = instruction->VRegB_22c(); uint32_t field_idx = instruction->VRegC_22c(); uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_; DCHECK_GE(object_reg, arg_start); DCHECK_LT(object_reg, code_item->registers_size_); uint32_t object_arg = object_reg - arg_start; DCHECK_LT(opcode == Instruction::IGET_WIDE ? dst_reg + 1 : dst_reg, code_item->registers_size_); if (dst_reg != return_reg) { return false; // Not returning the value retrieved by IGET? } if (is_static || object_arg != 0u) { // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE). // Allow synthetic accessors. We don't care about losing their stack frame in NPE. if (!IsSyntheticAccessor(method_ref)) { return false; } } // InlineIGetIPutData::object_arg is only 4 bits wide. static constexpr uint16_t kMaxObjectArg = 15u; if (object_arg > kMaxObjectArg) { return false; } if (result != nullptr) { InlineIGetIPutData* data = &result->d.ifield_data; if (!ComputeSpecialAccessorInfo(method, field_idx, false, data)) { return false; } result->opcode = kInlineOpIGet; data->op_variant = IGetVariant(opcode); data->method_is_static = is_static ? 1u : 0u; data->object_arg = object_arg; // Allow IGET on any register, not just "this". data->src_arg = 0u; data->return_arg_plus1 = 0u; } return true; } bool InlineMethodAnalyser::AnalyseIPutMethod(const DexFile::CodeItem* code_item, const MethodReference& method_ref, bool is_static, ArtMethod* method, InlineMethod* result) { const Instruction* instruction = Instruction::At(code_item->insns_); Instruction::Code opcode = instruction->Opcode(); DCHECK(IsInstructionIPut(opcode)); const Instruction* return_instruction = instruction->Next(); Instruction::Code return_opcode = return_instruction->Opcode(); uint32_t arg_start = code_item->registers_size_ - code_item->ins_size_; uint16_t return_arg_plus1 = 0u; if (return_opcode != Instruction::RETURN_VOID) { if (return_opcode != Instruction::RETURN && return_opcode != Instruction::RETURN_OBJECT && return_opcode != Instruction::RETURN_WIDE) { return false; } // Returning an argument. uint32_t return_reg = return_instruction->VRegA_11x(); DCHECK_GE(return_reg, arg_start); DCHECK_LT(return_opcode == Instruction::RETURN_WIDE ? return_reg + 1u : return_reg, code_item->registers_size_); return_arg_plus1 = return_reg - arg_start + 1u; } uint32_t src_reg = instruction->VRegA_22c(); uint32_t object_reg = instruction->VRegB_22c(); uint32_t field_idx = instruction->VRegC_22c(); DCHECK_GE(object_reg, arg_start); DCHECK_LT(object_reg, code_item->registers_size_); DCHECK_GE(src_reg, arg_start); DCHECK_LT(opcode == Instruction::IPUT_WIDE ? src_reg + 1 : src_reg, code_item->registers_size_); uint32_t object_arg = object_reg - arg_start; uint32_t src_arg = src_reg - arg_start; if (is_static || object_arg != 0u) { // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE). // Allow synthetic accessors. We don't care about losing their stack frame in NPE. if (!IsSyntheticAccessor(method_ref)) { return false; } } // InlineIGetIPutData::object_arg/src_arg/return_arg_plus1 are each only 4 bits wide. static constexpr uint16_t kMaxObjectArg = 15u; static constexpr uint16_t kMaxSrcArg = 15u; static constexpr uint16_t kMaxReturnArgPlus1 = 15u; if (object_arg > kMaxObjectArg || src_arg > kMaxSrcArg || return_arg_plus1 > kMaxReturnArgPlus1) { return false; } if (result != nullptr) { InlineIGetIPutData* data = &result->d.ifield_data; if (!ComputeSpecialAccessorInfo(method, field_idx, true, data)) { return false; } result->opcode = kInlineOpIPut; data->op_variant = IPutVariant(opcode); data->method_is_static = is_static ? 1u : 0u; data->object_arg = object_arg; // Allow IPUT on any register, not just "this". data->src_arg = src_arg; data->return_arg_plus1 = return_arg_plus1; } return true; } bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(ArtMethod* method, uint32_t field_idx, bool is_put, InlineIGetIPutData* result) { if (method == nullptr) { return false; } ObjPtr dex_cache = method->GetDexCache(); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false); if (field == nullptr || field->IsStatic()) { return false; } ObjPtr method_class = method->GetDeclaringClass(); ObjPtr field_class = field->GetDeclaringClass(); if (!method_class->CanAccessResolvedField(field_class, field, dex_cache, field_idx) || (is_put && field->IsFinal() && method_class != field_class)) { return false; } DCHECK_GE(field->GetOffset().Int32Value(), 0); // Do not interleave function calls with bit field writes to placate valgrind. Bug: 27552451. uint32_t field_offset = field->GetOffset().Uint32Value(); bool is_volatile = field->IsVolatile(); result->field_idx = field_idx; result->field_offset = field_offset; result->is_volatile = is_volatile ? 1u : 0u; return true; } } // namespace art android-platform-art-8.1.0+r23/compiler/dex/inline_method_analyser.h000066400000000000000000000125511336577252300254500ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEX_INLINE_METHOD_ANALYSER_H_ #define ART_COMPILER_DEX_INLINE_METHOD_ANALYSER_H_ #include "base/macros.h" #include "base/mutex.h" #include "dex_file.h" #include "dex_instruction.h" #include "method_reference.h" /* * NOTE: This code is part of the quick compiler. It lives in the runtime * only to allow the debugger to check whether a method has been inlined. */ namespace art { namespace verifier { class MethodVerifier; } // namespace verifier class ArtMethod; enum InlineMethodOpcode : uint16_t { kInlineOpNop, kInlineOpReturnArg, kInlineOpNonWideConst, kInlineOpIGet, kInlineOpIPut, kInlineOpConstructor, }; struct InlineIGetIPutData { // The op_variant below is DexMemAccessType but the runtime doesn't know that enumeration. uint16_t op_variant : 3; uint16_t method_is_static : 1; uint16_t object_arg : 4; uint16_t src_arg : 4; // iput only uint16_t return_arg_plus1 : 4; // iput only, method argument to return + 1, 0 = return void. uint16_t field_idx; uint32_t is_volatile : 1; uint32_t field_offset : 31; }; static_assert(sizeof(InlineIGetIPutData) == sizeof(uint64_t), "Invalid size of InlineIGetIPutData"); struct InlineReturnArgData { uint16_t arg; uint16_t is_wide : 1; uint16_t is_object : 1; uint16_t reserved : 14; uint32_t reserved2; }; static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t), "Invalid size of InlineReturnArgData"); struct InlineConstructorData { // There can be up to 3 IPUTs, unused fields are marked with kNoDexIndex16. uint16_t iput0_field_index; uint16_t iput1_field_index; uint16_t iput2_field_index; uint16_t iput0_arg : 4; uint16_t iput1_arg : 4; uint16_t iput2_arg : 4; uint16_t reserved : 4; }; static_assert(sizeof(InlineConstructorData) == sizeof(uint64_t), "Invalid size of InlineConstructorData"); struct InlineMethod { InlineMethodOpcode opcode; union { uint64_t data; InlineIGetIPutData ifield_data; InlineReturnArgData return_data; InlineConstructorData constructor_data; } d; }; class InlineMethodAnalyser { public: /** * Analyse method code to determine if the method is a candidate for inlining. * If it is, record the inlining data. * * @return true if the method is a candidate for inlining, false otherwise. */ static bool AnalyseMethodCode(ArtMethod* method, InlineMethod* result) REQUIRES_SHARED(Locks::mutator_lock_); static constexpr bool IsInstructionIGet(Instruction::Code opcode) { return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT; } static constexpr bool IsInstructionIPut(Instruction::Code opcode) { return Instruction::IPUT <= opcode && opcode <= Instruction::IPUT_SHORT; } static constexpr uint16_t IGetVariant(Instruction::Code opcode) { return opcode - Instruction::IGET; } static constexpr uint16_t IPutVariant(Instruction::Code opcode) { return opcode - Instruction::IPUT; } // Determines whether the method is a synthetic accessor (method name starts with "access$"). static bool IsSyntheticAccessor(MethodReference ref); private: static bool AnalyseMethodCode(const DexFile::CodeItem* code_item, const MethodReference& method_ref, bool is_static, ArtMethod* method, InlineMethod* result) REQUIRES_SHARED(Locks::mutator_lock_); static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result); static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result); static bool AnalyseIGetMethod(const DexFile::CodeItem* code_item, const MethodReference& method_ref, bool is_static, ArtMethod* method, InlineMethod* result) REQUIRES_SHARED(Locks::mutator_lock_); static bool AnalyseIPutMethod(const DexFile::CodeItem* code_item, const MethodReference& method_ref, bool is_static, ArtMethod* method, InlineMethod* result) REQUIRES_SHARED(Locks::mutator_lock_); // Can we fast path instance field access in a verified accessor? // If yes, computes field's offset and volatility and whether the method is static or not. static bool ComputeSpecialAccessorInfo(ArtMethod* method, uint32_t field_idx, bool is_put, InlineIGetIPutData* result) REQUIRES_SHARED(Locks::mutator_lock_); }; } // namespace art #endif // ART_COMPILER_DEX_INLINE_METHOD_ANALYSER_H_ android-platform-art-8.1.0+r23/compiler/dex/quick_compiler_callbacks.cc000066400000000000000000000033651336577252300261020ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "quick_compiler_callbacks.h" #include "driver/compiler_driver.h" #include "verifier/method_verifier-inl.h" #include "verification_results.h" namespace art { void QuickCompilerCallbacks::MethodVerified(verifier::MethodVerifier* verifier) { if (verification_results_ != nullptr) { verification_results_->ProcessVerifiedMethod(verifier); } } void QuickCompilerCallbacks::ClassRejected(ClassReference ref) { if (verification_results_ != nullptr) { verification_results_->AddRejectedClass(ref); } } bool QuickCompilerCallbacks::CanAssumeVerified(ClassReference ref) { // If we don't have class unloading enabled in the compiler, we will never see class that were // previously verified. Return false to avoid overhead from the lookup in the compiler driver. if (!does_class_unloading_) { return false; } DCHECK(compiler_driver_ != nullptr); // In the case of the quicken filter: avoiding verification of quickened instructions, which the // verifier doesn't currently support. // In the case of the verify filter, avoiding verifiying twice. return compiler_driver_->CanAssumeVerified(ref); } } // namespace art android-platform-art-8.1.0+r23/compiler/dex/quick_compiler_callbacks.h000066400000000000000000000045341336577252300257430ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_ #define ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_ #include "compiler_callbacks.h" #include "verifier/verifier_deps.h" namespace art { class CompilerDriver; class VerificationResults; class QuickCompilerCallbacks FINAL : public CompilerCallbacks { public: explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode) : CompilerCallbacks(mode) {} ~QuickCompilerCallbacks() { } void MethodVerified(verifier::MethodVerifier* verifier) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; void ClassRejected(ClassReference ref) OVERRIDE; // We are running in an environment where we can call patchoat safely so we should. bool IsRelocationPossible() OVERRIDE { return true; } verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE { return verifier_deps_.get(); } void SetVerifierDeps(verifier::VerifierDeps* deps) OVERRIDE { verifier_deps_.reset(deps); } void SetVerificationResults(VerificationResults* verification_results) { verification_results_ = verification_results; } bool CanAssumeVerified(ClassReference ref) OVERRIDE; void SetDoesClassUnloading(bool does_class_unloading, CompilerDriver* compiler_driver) OVERRIDE { does_class_unloading_ = does_class_unloading; compiler_driver_ = compiler_driver; DCHECK(!does_class_unloading || compiler_driver_ != nullptr); } private: VerificationResults* verification_results_ = nullptr; bool does_class_unloading_ = false; CompilerDriver* compiler_driver_ = nullptr; std::unique_ptr verifier_deps_; }; } // namespace art #endif // ART_COMPILER_DEX_QUICK_COMPILER_CALLBACKS_H_ android-platform-art-8.1.0+r23/compiler/dex/verification_results.cc000066400000000000000000000156161336577252300253420ustar00rootroot00000000000000/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "verification_results.h" #include "base/logging.h" #include "base/mutex-inl.h" #include "base/stl_util.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "runtime.h" #include "thread.h" #include "thread-current-inl.h" #include "utils/atomic_dex_ref_map-inl.h" #include "verified_method.h" #include "verifier/method_verifier-inl.h" namespace art { VerificationResults::VerificationResults(const CompilerOptions* compiler_options) : compiler_options_(compiler_options), verified_methods_lock_("compiler verified methods lock"), rejected_classes_lock_("compiler rejected classes lock") {} VerificationResults::~VerificationResults() { WriterMutexLock mu(Thread::Current(), verified_methods_lock_); STLDeleteValues(&verified_methods_); atomic_verified_methods_.Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED, const VerifiedMethod* method) { delete method; }); } void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) { DCHECK(method_verifier != nullptr); if (!compiler_options_->IsAnyCompilationEnabled()) { // Verified methods are only required for quickening and compilation. return; } MethodReference ref = method_verifier->GetMethodReference(); std::unique_ptr verified_method(VerifiedMethod::Create(method_verifier)); if (verified_method == nullptr) { // We'll punt this later. return; } AtomicMap::InsertResult result = atomic_verified_methods_.Insert( DexFileReference(ref.dex_file, ref.dex_method_index), /*expected*/ nullptr, verified_method.get()); const VerifiedMethod* existing = nullptr; bool inserted; if (result != AtomicMap::kInsertResultInvalidDexFile) { inserted = (result == AtomicMap::kInsertResultSuccess); if (!inserted) { // Rare case. CHECK(atomic_verified_methods_.Get(DexFileReference(ref.dex_file, ref.dex_method_index), &existing)); CHECK_NE(verified_method.get(), existing); } } else { WriterMutexLock mu(Thread::Current(), verified_methods_lock_); auto it = verified_methods_.find(ref); inserted = it == verified_methods_.end(); if (inserted) { verified_methods_.Put(ref, verified_method.get()); DCHECK(verified_methods_.find(ref) != verified_methods_.end()); } else { existing = it->second; } } if (inserted) { // Successfully added, release the unique_ptr since we no longer have ownership. DCHECK_EQ(GetVerifiedMethod(ref), verified_method.get()); verified_method.release(); } else { // TODO: Investigate why are we doing the work again for this method and try to avoid it. LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod(); if (!Runtime::Current()->UseJitCompilation()) { if (kIsDebugBuild) { auto ex_set = existing->GetSafeCastSet(); auto ve_set = verified_method->GetSafeCastSet(); CHECK_EQ(ex_set == nullptr, ve_set == nullptr); CHECK((ex_set == nullptr) || (ex_set->size() == ve_set->size())); } } // Let the unique_ptr delete the new verified method since there was already an existing one // registered. It is unsafe to replace the existing one since the JIT may be using it to // generate a native GC map. } } const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) { const VerifiedMethod* ret = nullptr; DCHECK(compiler_options_->IsAnyCompilationEnabled()); if (atomic_verified_methods_.Get(DexFileReference(ref.dex_file, ref.dex_method_index), &ret)) { return ret; } ReaderMutexLock mu(Thread::Current(), verified_methods_lock_); auto it = verified_methods_.find(ref); return (it != verified_methods_.end()) ? it->second : nullptr; } void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) { // This method should only be called for classes verified at compile time, // which have no verifier error, nor has methods that we know will throw // at runtime. std::unique_ptr verified_method = std::make_unique( /* encountered_error_types */ 0, /* has_runtime_throw */ false); if (atomic_verified_methods_.Insert(DexFileReference(ref.dex_file, ref.dex_method_index), /*expected*/ nullptr, verified_method.get()) == AtomicMap::InsertResult::kInsertResultSuccess) { verified_method.release(); } } void VerificationResults::AddRejectedClass(ClassReference ref) { { WriterMutexLock mu(Thread::Current(), rejected_classes_lock_); rejected_classes_.insert(ref); } DCHECK(IsClassRejected(ref)); } bool VerificationResults::IsClassRejected(ClassReference ref) { ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_); return (rejected_classes_.find(ref) != rejected_classes_.end()); } bool VerificationResults::IsCandidateForCompilation(MethodReference&, const uint32_t access_flags) { if (!compiler_options_->IsAotCompilationEnabled()) { return false; } // Don't compile class initializers unless kEverything. if ((compiler_options_->GetCompilerFilter() != CompilerFilter::kEverything) && ((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { return false; } return true; } void VerificationResults::AddDexFile(const DexFile* dex_file) { atomic_verified_methods_.AddDexFile(dex_file, dex_file->NumMethodIds()); WriterMutexLock mu(Thread::Current(), verified_methods_lock_); // There can be some verified methods that are already registered for the dex_file since we set // up well known classes earlier. Remove these and put them in the array so that we don't // accidentally miss seeing them. for (auto it = verified_methods_.begin(); it != verified_methods_.end(); ) { MethodReference ref = it->first; if (ref.dex_file == dex_file) { CHECK(atomic_verified_methods_.Insert(DexFileReference(ref.dex_file, ref.dex_method_index), nullptr, it->second) == AtomicMap::kInsertResultSuccess); it = verified_methods_.erase(it); } else { ++it; } } } } // namespace art android-platform-art-8.1.0+r23/compiler/dex/verification_results.h000066400000000000000000000057571336577252300252110ustar00rootroot00000000000000/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ #define ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ #include #include #include "base/dchecked_vector.h" #include "base/macros.h" #include "base/mutex.h" #include "class_reference.h" #include "method_reference.h" #include "safe_map.h" #include "utils/atomic_dex_ref_map.h" namespace art { namespace verifier { class MethodVerifier; class VerifierDepsTest; } // namespace verifier class CompilerOptions; class VerifiedMethod; // Used by CompilerCallbacks to track verification information from the Runtime. class VerificationResults { public: explicit VerificationResults(const CompilerOptions* compiler_options); ~VerificationResults(); void ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!verified_methods_lock_); void CreateVerifiedMethodFor(MethodReference ref) REQUIRES(!verified_methods_lock_); const VerifiedMethod* GetVerifiedMethod(MethodReference ref) REQUIRES(!verified_methods_lock_); void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_); bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_); bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags); // Add a dex file to enable using the atomic map. void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_); private: // Verified methods. The method array is fixed to avoid needing a lock to extend it. using AtomicMap = AtomicDexRefMap; using VerifiedMethodMap = SafeMap; VerifiedMethodMap verified_methods_ GUARDED_BY(verified_methods_lock_); const CompilerOptions* const compiler_options_; // Dex2oat can add dex files to atomic_verified_methods_ to avoid locking when calling // GetVerifiedMethod. AtomicMap atomic_verified_methods_; ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; // Rejected classes. ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::set rejected_classes_ GUARDED_BY(rejected_classes_lock_); friend class verifier::VerifierDepsTest; }; } // namespace art #endif // ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ android-platform-art-8.1.0+r23/compiler/dex/verified_method.cc000066400000000000000000000106741336577252300242330ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "verified_method.h" #include #include #include "base/logging.h" #include "dex_file.h" #include "dex_instruction-inl.h" #include "runtime.h" #include "verifier/method_verifier-inl.h" #include "verifier/reg_type-inl.h" #include "verifier/register_line-inl.h" #include "verifier/verifier_deps.h" namespace art { VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw) : encountered_error_types_(encountered_error_types), has_runtime_throw_(has_runtime_throw) { } const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier) { DCHECK(Runtime::Current()->IsAotCompiler()); std::unique_ptr verified_method( new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(), method_verifier->HasInstructionThatWillThrow())); if (method_verifier->HasCheckCasts()) { verified_method->GenerateSafeCastSet(method_verifier); } return verified_method.release(); } bool VerifiedMethod::IsSafeCast(uint32_t pc) const { if (safe_cast_set_ == nullptr) { return false; } return std::binary_search(safe_cast_set_->begin(), safe_cast_set_->end(), pc); } void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) { /* * Walks over the method code and adds any cast instructions in which * the type cast is implicit to a set, which is used in the code generation * to elide these casts. */ if (method_verifier->HasFailures()) { return; } const DexFile::CodeItem* code_item = method_verifier->CodeItem(); const Instruction* inst = Instruction::At(code_item->insns_); const Instruction* end = Instruction::At(code_item->insns_ + code_item->insns_size_in_code_units_); for (; inst < end; inst = inst->Next()) { Instruction::Code code = inst->Opcode(); if (code == Instruction::CHECK_CAST) { uint32_t dex_pc = inst->GetDexPc(code_item->insns_); if (!method_verifier->GetInstructionFlags(dex_pc).IsVisited()) { // Do not attempt to quicken this instruction, it's unreachable anyway. continue; } const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); const verifier::RegType& reg_type(line->GetRegisterType(method_verifier, inst->VRegA_21c())); const verifier::RegType& cast_type = method_verifier->ResolveCheckedClass(dex::TypeIndex(inst->VRegB_21c())); // Pass null for the method verifier to not record the VerifierDeps dependency // if the types are not assignable. if (cast_type.IsStrictlyAssignableFrom(reg_type, /* method_verifier */ nullptr)) { // The types are assignable, we record that dependency in the VerifierDeps so // that if this changes after OTA, we will re-verify again. // We check if reg_type has a class, as the verifier may have inferred it's // 'null'. if (reg_type.HasClass()) { DCHECK(cast_type.HasClass()); verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(), cast_type.GetClass(), reg_type.GetClass(), /* strict */ true, /* assignable */ true); } if (safe_cast_set_ == nullptr) { safe_cast_set_.reset(new SafeCastSet()); } // Verify ordering for push_back() to the sorted vector. DCHECK(safe_cast_set_->empty() || safe_cast_set_->back() < dex_pc); safe_cast_set_->push_back(dex_pc); } } } DCHECK(safe_cast_set_ == nullptr || !safe_cast_set_->empty()); } } // namespace art android-platform-art-8.1.0+r23/compiler/dex/verified_method.h000066400000000000000000000045201336577252300240660ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DEX_VERIFIED_METHOD_H_ #define ART_COMPILER_DEX_VERIFIED_METHOD_H_ #include #include "base/mutex.h" #include "dex_file.h" #include "method_reference.h" #include "safe_map.h" namespace art { namespace verifier { class MethodVerifier; } // namespace verifier class VerifiedMethod { public: VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw); // Cast elision set type. // Since we're adding the dex PCs to the set in increasing order, a sorted vector // is better for performance (not just memory usage), especially for large sets. typedef std::vector SafeCastSet; static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier) REQUIRES_SHARED(Locks::mutator_lock_); ~VerifiedMethod() = default; const SafeCastSet* GetSafeCastSet() const { return safe_cast_set_.get(); } // Returns true if the cast can statically be verified to be redundant // by using the check-cast elision peephole optimization in the verifier. bool IsSafeCast(uint32_t pc) const; // Returns true if there were any errors during verification. bool HasVerificationFailures() const { return encountered_error_types_ != 0; } uint32_t GetEncounteredVerificationFailures() const { return encountered_error_types_; } bool HasRuntimeThrow() const { return has_runtime_throw_; } private: // Generate safe case set into safe_cast_set_. void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) REQUIRES_SHARED(Locks::mutator_lock_); std::unique_ptr safe_cast_set_; const uint32_t encountered_error_types_; const bool has_runtime_throw_; }; } // namespace art #endif // ART_COMPILER_DEX_VERIFIED_METHOD_H_ android-platform-art-8.1.0+r23/compiler/driver/000077500000000000000000000000001336577252300212725ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/driver/compiled_method_storage.cc000066400000000000000000000176661336577252300265010ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "compiled_method_storage.h" #include "base/logging.h" #include "compiled_method.h" #include "thread-current-inl.h" #include "utils.h" #include "utils/dedupe_set-inl.h" #include "utils/swap_space.h" namespace art { namespace { // anonymous namespace template const LengthPrefixedArray* CopyArray(SwapSpace* swap_space, const ArrayRef& array) { DCHECK(!array.empty()); SwapAllocator allocator(swap_space); void* storage = allocator.allocate(LengthPrefixedArray::ComputeSize(array.size())); LengthPrefixedArray* array_copy = new(storage) LengthPrefixedArray(array.size()); std::copy(array.begin(), array.end(), array_copy->begin()); return array_copy; } template void ReleaseArray(SwapSpace* swap_space, const LengthPrefixedArray* array) { SwapAllocator allocator(swap_space); size_t size = LengthPrefixedArray::ComputeSize(array->size()); array->~LengthPrefixedArray(); allocator.deallocate(const_cast(reinterpret_cast(array)), size); } } // anonymous namespace template inline const LengthPrefixedArray* CompiledMethodStorage::AllocateOrDeduplicateArray( const ArrayRef& data, DedupeSetType* dedupe_set) { if (data.empty()) { return nullptr; } else if (!DedupeEnabled()) { return CopyArray(swap_space_.get(), data); } else { return dedupe_set->Add(Thread::Current(), data); } } template inline void CompiledMethodStorage::ReleaseArrayIfNotDeduplicated( const LengthPrefixedArray* array) { if (array != nullptr && !DedupeEnabled()) { ReleaseArray(swap_space_.get(), array); } } template class CompiledMethodStorage::DedupeHashFunc { private: static constexpr bool kUseMurmur3Hash = true; public: size_t operator()(const ArrayRef& array) const { const uint8_t* data = reinterpret_cast(array.data()); // TODO: More reasonable assertion. // static_assert(IsPowerOfTwo(sizeof(ContentType)), // "ContentType is not power of two, don't know whether array layout is as assumed"); uint32_t len = sizeof(ContentType) * array.size(); if (kUseMurmur3Hash) { static constexpr uint32_t c1 = 0xcc9e2d51; static constexpr uint32_t c2 = 0x1b873593; static constexpr uint32_t r1 = 15; static constexpr uint32_t r2 = 13; static constexpr uint32_t m = 5; static constexpr uint32_t n = 0xe6546b64; uint32_t hash = 0; const int nblocks = len / 4; typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; const unaligned_uint32_t *blocks = reinterpret_cast(data); int i; for (i = 0; i < nblocks; i++) { uint32_t k = blocks[i]; k *= c1; k = (k << r1) | (k >> (32 - r1)); k *= c2; hash ^= k; hash = ((hash << r2) | (hash >> (32 - r2))) * m + n; } const uint8_t *tail = reinterpret_cast(data + nblocks * 4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; FALLTHROUGH_INTENDED; case 2: k1 ^= tail[1] << 8; FALLTHROUGH_INTENDED; case 1: k1 ^= tail[0]; k1 *= c1; k1 = (k1 << r1) | (k1 >> (32 - r1)); k1 *= c2; hash ^= k1; } hash ^= len; hash ^= (hash >> 16); hash *= 0x85ebca6b; hash ^= (hash >> 13); hash *= 0xc2b2ae35; hash ^= (hash >> 16); return hash; } else { size_t hash = 0x811c9dc5; for (uint32_t i = 0; i < len; ++i) { hash = (hash * 16777619) ^ data[i]; } hash += hash << 13; hash ^= hash >> 7; hash += hash << 3; hash ^= hash >> 17; hash += hash << 5; return hash; } } }; template class CompiledMethodStorage::LengthPrefixedArrayAlloc { public: explicit LengthPrefixedArrayAlloc(SwapSpace* swap_space) : swap_space_(swap_space) { } const LengthPrefixedArray* Copy(const ArrayRef& array) { return CopyArray(swap_space_, array); } void Destroy(const LengthPrefixedArray* array) { ReleaseArray(swap_space_, array); } private: SwapSpace* const swap_space_; }; CompiledMethodStorage::CompiledMethodStorage(int swap_fd) : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)), dedupe_enabled_(true), dedupe_code_("dedupe code", LengthPrefixedArrayAlloc(swap_space_.get())), dedupe_method_info_("dedupe method info", LengthPrefixedArrayAlloc(swap_space_.get())), dedupe_vmap_table_("dedupe vmap table", LengthPrefixedArrayAlloc(swap_space_.get())), dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc(swap_space_.get())), dedupe_linker_patches_("dedupe cfi info", LengthPrefixedArrayAlloc(swap_space_.get())) { } CompiledMethodStorage::~CompiledMethodStorage() { // All done by member destructors. } void CompiledMethodStorage::DumpMemoryUsage(std::ostream& os, bool extended) const { if (swap_space_.get() != nullptr) { const size_t swap_size = swap_space_->GetSize(); os << " swap=" << PrettySize(swap_size) << " (" << swap_size << "B)"; } if (extended) { Thread* self = Thread::Current(); os << "\nCode dedupe: " << dedupe_code_.DumpStats(self); os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self); os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self); } } const LengthPrefixedArray* CompiledMethodStorage::DeduplicateCode( const ArrayRef& code) { return AllocateOrDeduplicateArray(code, &dedupe_code_); } void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray* code) { ReleaseArrayIfNotDeduplicated(code); } const LengthPrefixedArray* CompiledMethodStorage::DeduplicateMethodInfo( const ArrayRef& src_map) { return AllocateOrDeduplicateArray(src_map, &dedupe_method_info_); } void CompiledMethodStorage::ReleaseMethodInfo(const LengthPrefixedArray* method_info) { ReleaseArrayIfNotDeduplicated(method_info); } const LengthPrefixedArray* CompiledMethodStorage::DeduplicateVMapTable( const ArrayRef& table) { return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_); } void CompiledMethodStorage::ReleaseVMapTable(const LengthPrefixedArray* table) { ReleaseArrayIfNotDeduplicated(table); } const LengthPrefixedArray* CompiledMethodStorage::DeduplicateCFIInfo( const ArrayRef& cfi_info) { return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_); } void CompiledMethodStorage::ReleaseCFIInfo(const LengthPrefixedArray* cfi_info) { ReleaseArrayIfNotDeduplicated(cfi_info); } const LengthPrefixedArray* CompiledMethodStorage::DeduplicateLinkerPatches( const ArrayRef& linker_patches) { return AllocateOrDeduplicateArray(linker_patches, &dedupe_linker_patches_); } void CompiledMethodStorage::ReleaseLinkerPatches( const LengthPrefixedArray* linker_patches) { ReleaseArrayIfNotDeduplicated(linker_patches); } } // namespace art android-platform-art-8.1.0+r23/compiler/driver/compiled_method_storage.h000066400000000000000000000071461336577252300263330ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ #define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ #include #include #include "base/array_ref.h" #include "base/length_prefixed_array.h" #include "base/macros.h" #include "utils/dedupe_set.h" #include "utils/swap_space.h" namespace art { class LinkerPatch; class CompiledMethodStorage { public: explicit CompiledMethodStorage(int swap_fd); ~CompiledMethodStorage(); void DumpMemoryUsage(std::ostream& os, bool extended) const; void SetDedupeEnabled(bool dedupe_enabled) { dedupe_enabled_ = dedupe_enabled; } bool DedupeEnabled() const { return dedupe_enabled_; } SwapAllocator GetSwapSpaceAllocator() { return SwapAllocator(swap_space_.get()); } const LengthPrefixedArray* DeduplicateCode(const ArrayRef& code); void ReleaseCode(const LengthPrefixedArray* code); const LengthPrefixedArray* DeduplicateMethodInfo( const ArrayRef& method_info); void ReleaseMethodInfo(const LengthPrefixedArray* method_info); const LengthPrefixedArray* DeduplicateVMapTable(const ArrayRef& table); void ReleaseVMapTable(const LengthPrefixedArray* table); const LengthPrefixedArray* DeduplicateCFIInfo(const ArrayRef& cfi_info); void ReleaseCFIInfo(const LengthPrefixedArray* cfi_info); const LengthPrefixedArray* DeduplicateLinkerPatches( const ArrayRef& linker_patches); void ReleaseLinkerPatches(const LengthPrefixedArray* linker_patches); private: template const LengthPrefixedArray* AllocateOrDeduplicateArray(const ArrayRef& data, DedupeSetType* dedupe_set); template void ReleaseArrayIfNotDeduplicated(const LengthPrefixedArray* array); // DeDuplication data structures. template class DedupeHashFunc; template class LengthPrefixedArrayAlloc; template using ArrayDedupeSet = DedupeSet, LengthPrefixedArray, LengthPrefixedArrayAlloc, size_t, DedupeHashFunc, 4>; // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first // as other fields rely on this. std::unique_ptr swap_space_; bool dedupe_enabled_; ArrayDedupeSet dedupe_code_; ArrayDedupeSet dedupe_method_info_; ArrayDedupeSet dedupe_vmap_table_; ArrayDedupeSet dedupe_cfi_info_; ArrayDedupeSet dedupe_linker_patches_; DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage); }; } // namespace art #endif // ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_ android-platform-art-8.1.0+r23/compiler/driver/compiled_method_storage_test.cc000066400000000000000000000121251336577252300275210ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "compiled_method_storage.h" #include "compiled_method.h" #include "compiler_driver.h" #include "compiler_options.h" #include "dex/verification_results.h" namespace art { TEST(CompiledMethodStorage, Deduplicate) { CompilerOptions compiler_options; VerificationResults verification_results(&compiler_options); CompilerDriver driver(&compiler_options, &verification_results, Compiler::kOptimizing, /* instruction_set_ */ kNone, /* instruction_set_features */ nullptr, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, /* thread_count */ 1u, /* dump_stats */ false, /* dump_passes */ false, /* timer */ nullptr, /* swap_fd */ -1, /* profile_compilation_info */ nullptr); CompiledMethodStorage* storage = driver.GetCompiledMethodStorage(); ASSERT_TRUE(storage->DedupeEnabled()); // The default. const uint8_t raw_code1[] = { 1u, 2u, 3u }; const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u }; ArrayRef code[] = { ArrayRef(raw_code1), ArrayRef(raw_code2), }; const uint8_t raw_method_info_map1[] = { 1u, 2u, 3u, 4u, 5u, 6u }; const uint8_t raw_method_info_map2[] = { 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u }; ArrayRef method_info[] = { ArrayRef(raw_method_info_map1), ArrayRef(raw_method_info_map2), }; const uint8_t raw_vmap_table1[] = { 2, 4, 6 }; const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 }; ArrayRef vmap_table[] = { ArrayRef(raw_vmap_table1), ArrayRef(raw_vmap_table2), }; const uint8_t raw_cfi_info1[] = { 1, 3, 5 }; const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 }; ArrayRef cfi_info[] = { ArrayRef(raw_cfi_info1), ArrayRef(raw_cfi_info2), }; const LinkerPatch raw_patches1[] = { LinkerPatch::CodePatch(0u, nullptr, 1u), LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 1u), }; const LinkerPatch raw_patches2[] = { LinkerPatch::CodePatch(0u, nullptr, 1u), LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 2u), }; ArrayRef patches[] = { ArrayRef(raw_patches1), ArrayRef(raw_patches2), }; std::vector compiled_methods; compiled_methods.reserve(1u << 7); for (auto&& c : code) { for (auto&& s : method_info) { for (auto&& v : vmap_table) { for (auto&& f : cfi_info) { for (auto&& p : patches) { compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod( &driver, kNone, c, 0u, 0u, 0u, s, v, f, p)); } } } } } constexpr size_t code_bit = 1u << 4; constexpr size_t src_map_bit = 1u << 3; constexpr size_t vmap_table_bit = 1u << 2; constexpr size_t cfi_info_bit = 1u << 1; constexpr size_t patches_bit = 1u << 0; CHECK_EQ(compiled_methods.size(), 1u << 5); for (size_t i = 0; i != compiled_methods.size(); ++i) { for (size_t j = 0; j != compiled_methods.size(); ++j) { CompiledMethod* lhs = compiled_methods[i]; CompiledMethod* rhs = compiled_methods[j]; bool same_code = ((i ^ j) & code_bit) == 0u; bool same_src_map = ((i ^ j) & src_map_bit) == 0u; bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u; bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u; bool same_patches = ((i ^ j) & patches_bit) == 0u; ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data()) << i << " " << j; ASSERT_EQ(same_src_map, lhs->GetMethodInfo().data() == rhs->GetMethodInfo().data()) << i << " " << j; ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data()) << i << " " << j; ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data()) << i << " " << j; ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data()) << i << " " << j; } } for (CompiledMethod* method : compiled_methods) { CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&driver, method); } } } // namespace art android-platform-art-8.1.0+r23/compiler/driver/compiler_driver-inl.h000066400000000000000000000124241336577252300254130ustar00rootroot00000000000000/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ #define ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ #include "compiler_driver.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/enums.h" #include "class_linker-inl.h" #include "dex_compilation_unit.h" #include "handle_scope-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" namespace art { inline mirror::Class* CompilerDriver::ResolveClass( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, dex::TypeIndex cls_index, const DexCompilationUnit* mUnit) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); mirror::Class* cls = mUnit->GetClassLinker()->ResolveType( *mUnit->GetDexFile(), cls_index, dex_cache, class_loader); DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending()); if (UNLIKELY(cls == nullptr)) { // Clean up any exception left by type resolution. soa.Self()->ClearException(); } return cls; } inline mirror::Class* CompilerDriver::ResolveCompilingMethodsClass( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexCompilationUnit* mUnit) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); const DexFile::MethodId& referrer_method_id = mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex()); return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit); } inline ArtField* CompilerDriver::ResolveFieldWithDexFile( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexFile* dex_file, uint32_t field_idx, bool is_static) { DCHECK_EQ(dex_cache->GetDexFile(), dex_file); ArtField* resolved_field = Runtime::Current()->GetClassLinker()->ResolveField( *dex_file, field_idx, dex_cache, class_loader, is_static); DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending()); if (UNLIKELY(resolved_field == nullptr)) { // Clean up any exception left by type resolution. soa.Self()->ClearException(); return nullptr; } if (UNLIKELY(resolved_field->IsStatic() != is_static)) { // ClassLinker can return a field of the wrong kind directly from the DexCache. // Silently return null on such incompatible class change. return nullptr; } return resolved_field; } inline ArtField* CompilerDriver::ResolveField( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static) { DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); return ResolveFieldWithDexFile(soa, dex_cache, class_loader, mUnit->GetDexFile(), field_idx, is_static); } inline std::pair CompilerDriver::IsFastInstanceField( mirror::DexCache* dex_cache, mirror::Class* referrer_class, ArtField* resolved_field, uint16_t field_idx) { DCHECK(!resolved_field->IsStatic()); ObjPtr fields_class = resolved_field->GetDeclaringClass(); bool fast_get = referrer_class != nullptr && referrer_class->CanAccessResolvedField(fields_class, resolved_field, dex_cache, field_idx); bool fast_put = fast_get && (!resolved_field->IsFinal() || fields_class == referrer_class); return std::make_pair(fast_get, fast_put); } inline ArtMethod* CompilerDriver::ResolveMethod( ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type) { DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod( *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type); if (UNLIKELY(resolved_method == nullptr)) { DCHECK(soa.Self()->IsExceptionPending()); // Clean up any exception left by type resolution. soa.Self()->ClearException(); } return resolved_method; } inline VerificationResults* CompilerDriver::GetVerificationResults() const { DCHECK(Runtime::Current()->IsAotCompiler()); return verification_results_; } } // namespace art #endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_ android-platform-art-8.1.0+r23/compiler/driver/compiler_driver.cc000066400000000000000000003705621336577252300250030ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compiler_driver.h" #include #include #include #ifndef __APPLE__ #include // For mallinfo #endif #include "android-base/strings.h" #include "art_field-inl.h" #include "art_method-inl.h" #include "base/arena_allocator.h" #include "base/array_ref.h" #include "base/bit_vector.h" #include "base/enums.h" #include "base/stl_util.h" #include "base/systrace.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "class_linker-inl.h" #include "compiled_method.h" #include "compiler.h" #include "compiler_callbacks.h" #include "compiler_driver-inl.h" #include "dex/dex_to_dex_compiler.h" #include "dex/verification_results.h" #include "dex/verified_method.h" #include "dex_compilation_unit.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "driver/compiler_options.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap.h" #include "gc/space/image_space.h" #include "gc/space/space.h" #include "handle_scope-inl.h" #include "intrinsics_enum.h" #include "jni_internal.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" #include "mirror/object-inl.h" #include "mirror/object-refvisitor-inl.h" #include "mirror/object_array-inl.h" #include "mirror/throwable.h" #include "nativehelper/ScopedLocalRef.h" #include "object_lock.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" #include "thread_list.h" #include "thread_pool.h" #include "trampolines/trampoline_compiler.h" #include "transaction.h" #include "utils/atomic_dex_ref_map-inl.h" #include "utils/dex_cache_arrays_layout-inl.h" #include "utils/swap_space.h" #include "vdex_file.h" #include "verifier/method_verifier-inl.h" #include "verifier/method_verifier.h" #include "verifier/verifier_deps.h" #include "verifier/verifier_enums.h" namespace art { static constexpr bool kTimeCompileMethod = !kIsDebugBuild; // Print additional info during profile guided compilation. static constexpr bool kDebugProfileGuidedCompilation = false; // Max encoded fields allowed for initializing app image. Hardcode the number for now // because 5000 should be large enough. static constexpr uint32_t kMaxEncodedFields = 5000; static double Percentage(size_t x, size_t y) { return 100.0 * (static_cast(x)) / (static_cast(x + y)); } static void DumpStat(size_t x, size_t y, const char* str) { if (x == 0 && y == 0) { return; } LOG(INFO) << Percentage(x, y) << "% of " << str << " for " << (x + y) << " cases"; } class CompilerDriver::AOTCompilationStats { public: AOTCompilationStats() : stats_lock_("AOT compilation statistics lock"), resolved_types_(0), unresolved_types_(0), resolved_instance_fields_(0), unresolved_instance_fields_(0), resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0), type_based_devirtualization_(0), safe_casts_(0), not_safe_casts_(0) { for (size_t i = 0; i <= kMaxInvokeType; i++) { resolved_methods_[i] = 0; unresolved_methods_[i] = 0; virtual_made_direct_[i] = 0; direct_calls_to_boot_[i] = 0; direct_methods_to_boot_[i] = 0; } } void Dump() { DumpStat(resolved_types_, unresolved_types_, "types resolved"); DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved"); DumpStat(resolved_local_static_fields_ + resolved_static_fields_, unresolved_static_fields_, "static fields resolved"); DumpStat(resolved_local_static_fields_, resolved_static_fields_ + unresolved_static_fields_, "static fields local to a class"); DumpStat(safe_casts_, not_safe_casts_, "check-casts removed based on type information"); // Note, the code below subtracts the stat value so that when added to the stat value we have // 100% of samples. TODO: clean this up. DumpStat(type_based_devirtualization_, resolved_methods_[kVirtual] + unresolved_methods_[kVirtual] + resolved_methods_[kInterface] + unresolved_methods_[kInterface] - type_based_devirtualization_, "virtual/interface calls made direct based on type information"); for (size_t i = 0; i <= kMaxInvokeType; i++) { std::ostringstream oss; oss << static_cast(i) << " methods were AOT resolved"; DumpStat(resolved_methods_[i], unresolved_methods_[i], oss.str().c_str()); if (virtual_made_direct_[i] > 0) { std::ostringstream oss2; oss2 << static_cast(i) << " methods made direct"; DumpStat(virtual_made_direct_[i], resolved_methods_[i] + unresolved_methods_[i] - virtual_made_direct_[i], oss2.str().c_str()); } if (direct_calls_to_boot_[i] > 0) { std::ostringstream oss2; oss2 << static_cast(i) << " method calls are direct into boot"; DumpStat(direct_calls_to_boot_[i], resolved_methods_[i] + unresolved_methods_[i] - direct_calls_to_boot_[i], oss2.str().c_str()); } if (direct_methods_to_boot_[i] > 0) { std::ostringstream oss2; oss2 << static_cast(i) << " method calls have methods in boot"; DumpStat(direct_methods_to_boot_[i], resolved_methods_[i] + unresolved_methods_[i] - direct_methods_to_boot_[i], oss2.str().c_str()); } } } // Allow lossy statistics in non-debug builds. #ifndef NDEBUG #define STATS_LOCK() MutexLock mu(Thread::Current(), stats_lock_) #else #define STATS_LOCK() #endif void TypeDoesntNeedAccessCheck() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_types_++; } void TypeNeedsAccessCheck() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_types_++; } void ResolvedInstanceField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_instance_fields_++; } void UnresolvedInstanceField() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_instance_fields_++; } void ResolvedLocalStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_local_static_fields_++; } void ResolvedStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); resolved_static_fields_++; } void UnresolvedStaticField() REQUIRES(!stats_lock_) { STATS_LOCK(); unresolved_static_fields_++; } // Indicate that type information from the verifier led to devirtualization. void PreciseTypeDevirtualization() REQUIRES(!stats_lock_) { STATS_LOCK(); type_based_devirtualization_++; } // A check-cast could be eliminated due to verifier type analysis. void SafeCast() REQUIRES(!stats_lock_) { STATS_LOCK(); safe_casts_++; } // A check-cast couldn't be eliminated due to verifier type analysis. void NotASafeCast() REQUIRES(!stats_lock_) { STATS_LOCK(); not_safe_casts_++; } private: Mutex stats_lock_; size_t resolved_types_; size_t unresolved_types_; size_t resolved_instance_fields_; size_t unresolved_instance_fields_; size_t resolved_local_static_fields_; size_t resolved_static_fields_; size_t unresolved_static_fields_; // Type based devirtualization for invoke interface and virtual. size_t type_based_devirtualization_; size_t resolved_methods_[kMaxInvokeType + 1]; size_t unresolved_methods_[kMaxInvokeType + 1]; size_t virtual_made_direct_[kMaxInvokeType + 1]; size_t direct_calls_to_boot_[kMaxInvokeType + 1]; size_t direct_methods_to_boot_[kMaxInvokeType + 1]; size_t safe_casts_; size_t not_safe_casts_; DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats); }; class CompilerDriver::DexFileMethodSet { public: explicit DexFileMethodSet(const DexFile& dex_file) : dex_file_(dex_file), method_indexes_(dex_file.NumMethodIds(), false, Allocator::GetMallocAllocator()) { } DexFileMethodSet(DexFileMethodSet&& other) = default; const DexFile& GetDexFile() const { return dex_file_; } BitVector& GetMethodIndexes() { return method_indexes_; } const BitVector& GetMethodIndexes() const { return method_indexes_; } private: const DexFile& dex_file_; BitVector method_indexes_; }; CompilerDriver::CompilerDriver( const CompilerOptions* compiler_options, VerificationResults* verification_results, Compiler::Kind compiler_kind, InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features, std::unordered_set* image_classes, std::unordered_set* compiled_classes, std::unordered_set* compiled_methods, size_t thread_count, bool dump_stats, bool dump_passes, CumulativeLogger* timer, int swap_fd, const ProfileCompilationInfo* profile_compilation_info) : compiler_options_(compiler_options), verification_results_(verification_results), compiler_(Compiler::Create(this, compiler_kind)), compiler_kind_(compiler_kind), instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set), instruction_set_features_(instruction_set_features), requires_constructor_barrier_lock_("constructor barrier lock"), non_relative_linker_patch_count_(0u), image_classes_(image_classes), classes_to_compile_(compiled_classes), methods_to_compile_(compiled_methods), had_hard_verifier_failure_(false), parallel_thread_count_(thread_count), stats_(new AOTCompilationStats), dump_stats_(dump_stats), dump_passes_(dump_passes), timings_logger_(timer), compiler_context_(nullptr), support_boot_image_fixup_(true), compiled_method_storage_(swap_fd), profile_compilation_info_(profile_compilation_info), max_arena_alloc_(0), dex_to_dex_references_lock_("dex-to-dex references lock"), dex_to_dex_references_(), current_dex_to_dex_methods_(nullptr) { DCHECK(compiler_options_ != nullptr); compiler_->Init(); if (GetCompilerOptions().IsBootImage()) { CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image"; } } CompilerDriver::~CompilerDriver() { compiled_methods_.Visit([this](const DexFileReference& ref ATTRIBUTE_UNUSED, CompiledMethod* method) { if (method != nullptr) { CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, method); } }); compiler_->UnInit(); } #define CREATE_TRAMPOLINE(type, abi, offset) \ if (Is64BitInstructionSet(instruction_set_)) { \ return CreateTrampoline64(instruction_set_, abi, \ type ## _ENTRYPOINT_OFFSET(PointerSize::k64, offset)); \ } else { \ return CreateTrampoline32(instruction_set_, abi, \ type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset)); \ } std::unique_ptr> CompilerDriver::CreateJniDlsymLookup() const { CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookup) } std::unique_ptr> CompilerDriver::CreateQuickGenericJniTrampoline() const { CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickGenericJniTrampoline) } std::unique_ptr> CompilerDriver::CreateQuickImtConflictTrampoline() const { CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickImtConflictTrampoline) } std::unique_ptr> CompilerDriver::CreateQuickResolutionTrampoline() const { CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickResolutionTrampoline) } std::unique_ptr> CompilerDriver::CreateQuickToInterpreterBridge() const { CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickToInterpreterBridge) } #undef CREATE_TRAMPOLINE static void SetupIntrinsic(Thread* self, Intrinsics intrinsic, InvokeType invoke_type, const char* class_name, const char* method_name, const char* signature) REQUIRES_SHARED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); PointerSize image_size = class_linker->GetImagePointerSize(); ObjPtr cls = class_linker->FindSystemClass(self, class_name); if (cls == nullptr) { LOG(FATAL) << "Could not find class of intrinsic " << class_name; } ArtMethod* method = cls->FindClassMethod(method_name, signature, image_size); if (method == nullptr || method->GetDeclaringClass() != cls) { LOG(FATAL) << "Could not find method of intrinsic " << class_name << " " << method_name << " " << signature; } DCHECK_EQ(method->GetInvokeType(), invoke_type); method->SetIntrinsic(static_cast(intrinsic)); } void CompilerDriver::CompileAll(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) { DCHECK(!Runtime::Current()->IsStarted()); InitializeThreadPools(); VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false); // Precompile: // 1) Load image classes // 2) Resolve all classes // 3) Attempt to verify all classes // 4) Attempt to initialize image classes, and trivially initialized classes PreCompile(class_loader, dex_files, timings); if (GetCompilerOptions().IsBootImage()) { // We don't need to setup the intrinsics for non boot image compilation, as // those compilations will pick up a boot image that have the ArtMethod already // set with the intrinsics flag. ScopedObjectAccess soa(Thread::Current()); #define SETUP_INTRINSICS(Name, InvokeType, NeedsEnvironmentOrCache, SideEffects, Exceptions, \ ClassName, MethodName, Signature) \ SetupIntrinsic(soa.Self(), Intrinsics::k##Name, InvokeType, ClassName, MethodName, Signature); #include "intrinsics_list.h" INTRINSICS_LIST(SETUP_INTRINSICS) #undef INTRINSICS_LIST #undef SETUP_INTRINSICS } // Compile: // 1) Compile all classes and methods enabled for compilation. May fall back to dex-to-dex // compilation. if (GetCompilerOptions().IsAnyCompilationEnabled()) { Compile(class_loader, dex_files, timings); } if (dump_stats_) { stats_->Dump(); } FreeThreadPools(); } static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( Thread* self, const CompilerDriver& driver, Handle class_loader, const DexFile& dex_file, const DexFile::ClassDef& class_def) REQUIRES_SHARED(Locks::mutator_lock_) { auto* const runtime = Runtime::Current(); DCHECK(driver.GetCompilerOptions().IsQuickeningCompilationEnabled()); const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = runtime->GetClassLinker(); mirror::Class* klass = class_linker->FindClass(self, descriptor, class_loader); if (klass == nullptr) { CHECK(self->IsExceptionPending()); self->ClearException(); return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } // DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic // references with actual offsets. We cannot re-verify such instructions. // // We store the verification information in the class status in the oat file, which the linker // can validate (checksums) and use to skip load-time verification. It is thus safe to // optimize when a class has been fully verified before. optimizer::DexToDexCompilationLevel max_level = optimizer::DexToDexCompilationLevel::kOptimize; if (driver.GetCompilerOptions().GetDebuggable()) { // We are debuggable so definitions of classes might be changed. We don't want to do any // optimizations that could break that. max_level = optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } if (klass->IsVerified()) { // Class is verified so we can enable DEX-to-DEX compilation for performance. return max_level; } else { // Class verification has failed: do not run DEX-to-DEX optimizations. return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } } static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( Thread* self, const CompilerDriver& driver, jobject jclass_loader, const DexFile& dex_file, const DexFile::ClassDef& class_def) { ScopedObjectAccess soa(self); StackHandleScope<1> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); return GetDexToDexCompilationLevel(self, driver, class_loader, dex_file, class_def); } // Does the runtime for the InstructionSet provide an implementation returned by // GetQuickGenericJniStub allowing down calls that aren't compiled using a JNI compiler? static bool InstructionSetHasGenericJniStub(InstructionSet isa) { switch (isa) { case kArm: case kArm64: case kThumb2: case kMips: case kMips64: case kX86: case kX86_64: return true; default: return false; } } static void CompileMethod(Thread* self, CompilerDriver* driver, const DexFile::CodeItem* code_item, uint32_t access_flags, InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, Handle class_loader, const DexFile& dex_file, optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level, bool compilation_enabled, Handle dex_cache) { DCHECK(driver != nullptr); CompiledMethod* compiled_method = nullptr; uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0; MethodReference method_ref(&dex_file, method_idx); if (driver->GetCurrentDexToDexMethods() != nullptr) { // This is the second pass when we dex-to-dex compile previously marked methods. // TODO: Refactor the compilation to avoid having to distinguish the two passes // here. That should be done on a higher level. http://b/29089975 if (driver->GetCurrentDexToDexMethods()->IsBitSet(method_idx)) { VerificationResults* results = driver->GetVerificationResults(); DCHECK(results != nullptr); const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref); // Do not optimize if a VerifiedMethod is missing. SafeCast elision, // for example, relies on it. compiled_method = optimizer::ArtCompileDEX( driver, code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, dex_file, (verified_method != nullptr) ? dex_to_dex_compilation_level : optimizer::DexToDexCompilationLevel::kDontDexToDexCompile); } } else if ((access_flags & kAccNative) != 0) { // Are we extracting only and have support for generic JNI down calls? if (!driver->GetCompilerOptions().IsJniCompilationEnabled() && InstructionSetHasGenericJniStub(driver->GetInstructionSet())) { // Leaving this empty will trigger the generic JNI version } else { // Look-up the ArtMethod associated with this code_item (if any) // -- It is later used to lookup any [optimization] annotations for this method. ScopedObjectAccess soa(self); // TODO: Lookup annotation from DexFile directly without resolving method. ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethod( dex_file, method_idx, dex_cache, class_loader, /* referrer */ nullptr, invoke_type); // Query any JNI optimization annotations such as @FastNative or @CriticalNative. Compiler::JniOptimizationFlags optimization_flags = Compiler::kNone; if (UNLIKELY(method == nullptr)) { // Failed method resolutions happen very rarely, e.g. ancestor class cannot be resolved. DCHECK(self->IsExceptionPending()); self->ClearException(); } else if (method->IsAnnotatedWithFastNative()) { // TODO: Will no longer need this CHECK once we have verifier checking this. CHECK(!method->IsAnnotatedWithCriticalNative()); optimization_flags = Compiler::kFastNative; } else if (method->IsAnnotatedWithCriticalNative()) { // TODO: Will no longer need this CHECK once we have verifier checking this. CHECK(!method->IsAnnotatedWithFastNative()); optimization_flags = Compiler::kCriticalNative; } compiled_method = driver->GetCompiler()->JniCompile(access_flags, method_idx, dex_file, optimization_flags); CHECK(compiled_method != nullptr); } } else if ((access_flags & kAccAbstract) != 0) { // Abstract methods don't have code. } else { VerificationResults* results = driver->GetVerificationResults(); DCHECK(results != nullptr); const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref); bool compile = compilation_enabled && // Basic checks, e.g., not . results->IsCandidateForCompilation(method_ref, access_flags) && // Did not fail to create VerifiedMethod metadata. verified_method != nullptr && // Do not have failures that should punt to the interpreter. !verified_method->HasRuntimeThrow() && (verified_method->GetEncounteredVerificationFailures() & (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 && // Is eligable for compilation by methods-to-compile filter. driver->IsMethodToCompile(method_ref) && driver->ShouldCompileBasedOnProfile(method_ref); if (compile) { // NOTE: if compiler declines to compile this method, it will return null. compiled_method = driver->GetCompiler()->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, dex_file, dex_cache); } if (compiled_method == nullptr && dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) { DCHECK(!Runtime::Current()->UseJitCompilation()); // TODO: add a command-line option to disable DEX-to-DEX compilation ? driver->MarkForDexToDexCompilation(self, method_ref); } } if (kTimeCompileMethod) { uint64_t duration_ns = NanoTime() - start_ns; if (duration_ns > MsToNs(driver->GetCompiler()->GetMaximumCompilationTimeBeforeWarning())) { LOG(WARNING) << "Compilation of " << dex_file.PrettyMethod(method_idx) << " took " << PrettyDuration(duration_ns); } } if (compiled_method != nullptr) { // Count non-relative linker patches. size_t non_relative_linker_patch_count = 0u; for (const LinkerPatch& patch : compiled_method->GetPatches()) { if (!patch.IsPcRelative()) { ++non_relative_linker_patch_count; } } bool compile_pic = driver->GetCompilerOptions().GetCompilePic(); // Off by default // When compiling with PIC, there should be zero non-relative linker patches CHECK(!compile_pic || non_relative_linker_patch_count == 0u); driver->AddCompiledMethod(method_ref, compiled_method, non_relative_linker_patch_count); } if (self->IsExceptionPending()) { ScopedObjectAccess soa(self); LOG(FATAL) << "Unexpected exception compiling: " << dex_file.PrettyMethod(method_idx) << "\n" << self->GetException()->Dump(); } } void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) { DCHECK(!Runtime::Current()->IsStarted()); jobject jclass_loader; const DexFile* dex_file; uint16_t class_def_idx; uint32_t method_idx = method->GetDexMethodIndex(); uint32_t access_flags = method->GetAccessFlags(); InvokeType invoke_type = method->GetInvokeType(); StackHandleScope<2> hs(self); Handle dex_cache(hs.NewHandle(method->GetDexCache())); Handle class_loader( hs.NewHandle(method->GetDeclaringClass()->GetClassLoader())); { ScopedObjectAccessUnchecked soa(self); ScopedLocalRef local_class_loader( soa.Env(), soa.AddLocalReference(class_loader.Get())); jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get()); // Find the dex_file dex_file = method->GetDexFile(); class_def_idx = method->GetClassDefIndex(); } const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset()); // Go to native so that we don't block GC during compilation. ScopedThreadSuspension sts(self, kNative); std::vector dex_files; dex_files.push_back(dex_file); InitializeThreadPools(); PreCompile(jclass_loader, dex_files, timings); // Can we run DEX-to-DEX compiler on this class ? optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = GetDexToDexCompilationLevel(self, *this, jclass_loader, *dex_file, dex_file->GetClassDef(class_def_idx)); DCHECK(current_dex_to_dex_methods_ == nullptr); CompileMethod(self, this, code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, *dex_file, dex_to_dex_compilation_level, true, dex_cache); ArrayRef dex_to_dex_references; { // From this point on, we shall not modify dex_to_dex_references_, so // just grab a reference to it that we use without holding the mutex. MutexLock lock(Thread::Current(), dex_to_dex_references_lock_); dex_to_dex_references = ArrayRef(dex_to_dex_references_); } if (!dex_to_dex_references.empty()) { DCHECK_EQ(dex_to_dex_references.size(), 1u); DCHECK(&dex_to_dex_references[0].GetDexFile() == dex_file); current_dex_to_dex_methods_ = &dex_to_dex_references.front().GetMethodIndexes(); DCHECK(current_dex_to_dex_methods_->IsBitSet(method_idx)); DCHECK_EQ(current_dex_to_dex_methods_->NumSetBits(), 1u); CompileMethod(self, this, code_item, access_flags, invoke_type, class_def_idx, method_idx, class_loader, *dex_file, dex_to_dex_compilation_level, true, dex_cache); current_dex_to_dex_methods_ = nullptr; } FreeThreadPools(); self->GetJniEnv()->DeleteGlobalRef(jclass_loader); } void CompilerDriver::Resolve(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) { // Resolution allocates classes and needs to run single-threaded to be deterministic. bool force_determinism = GetCompilerOptions().IsForceDeterminism(); ThreadPool* resolve_thread_pool = force_determinism ? single_thread_pool_.get() : parallel_thread_pool_.get(); size_t resolve_thread_count = force_determinism ? 1U : parallel_thread_count_; for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != nullptr); ResolveDexFile(class_loader, *dex_file, dex_files, resolve_thread_pool, resolve_thread_count, timings); } } // Resolve const-strings in the code. Done to have deterministic allocation behavior. Right now // this is single-threaded for simplicity. // TODO: Collect the relevant string indices in parallel, then allocate them sequentially in a // stable order. static void ResolveConstStrings(Handle dex_cache, const DexFile& dex_file, const DexFile::CodeItem* code_item) REQUIRES_SHARED(Locks::mutator_lock_) { if (code_item == nullptr) { // Abstract or native method. return; } const uint16_t* code_ptr = code_item->insns_; const uint16_t* code_end = code_item->insns_ + code_item->insns_size_in_code_units_; ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); while (code_ptr < code_end) { const Instruction* inst = Instruction::At(code_ptr); switch (inst->Opcode()) { case Instruction::CONST_STRING: case Instruction::CONST_STRING_JUMBO: { dex::StringIndex string_index((inst->Opcode() == Instruction::CONST_STRING) ? inst->VRegB_21c() : inst->VRegB_31c()); mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache); CHECK(string != nullptr) << "Could not allocate a string when forcing determinism"; break; } default: break; } code_ptr += inst->SizeInCodeUnits(); } } static void ResolveConstStrings(CompilerDriver* driver, const std::vector& dex_files, TimingLogger* timings) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); MutableHandle dex_cache(hs.NewHandle(nullptr)); for (const DexFile* dex_file : dex_files) { dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file)); TimingLogger::ScopedTiming t("Resolve const-string Strings", timings); size_t class_def_count = dex_file->NumClassDefs(); for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index); const uint8_t* class_data = dex_file->GetClassData(class_def); if (class_data == nullptr) { // empty class, probably a marker interface continue; } ClassDataItemIterator it(*dex_file, class_data); it.SkipAllFields(); bool compilation_enabled = driver->IsClassToCompile( dex_file->StringByTypeIdx(class_def.class_idx_)); if (!compilation_enabled) { // Compilation is skipped, do not resolve const-string in code of this class. // TODO: Make sure that inlining honors this. continue; } // Direct methods. int64_t previous_direct_method_idx = -1; while (it.HasNextDirectMethod()) { uint32_t method_idx = it.GetMemberIndex(); if (method_idx == previous_direct_method_idx) { // smali can create dex files with two encoded_methods sharing the same method_idx // http://code.google.com/p/smali/issues/detail?id=119 it.Next(); continue; } previous_direct_method_idx = method_idx; ResolveConstStrings(dex_cache, *dex_file, it.GetMethodCodeItem()); it.Next(); } // Virtual methods. int64_t previous_virtual_method_idx = -1; while (it.HasNextVirtualMethod()) { uint32_t method_idx = it.GetMemberIndex(); if (method_idx == previous_virtual_method_idx) { // smali can create dex files with two encoded_methods sharing the same method_idx // http://code.google.com/p/smali/issues/detail?id=119 it.Next(); continue; } previous_virtual_method_idx = method_idx; ResolveConstStrings(dex_cache, *dex_file, it.GetMethodCodeItem()); it.Next(); } DCHECK(!it.HasNext()); } } } inline void CompilerDriver::CheckThreadPools() { DCHECK(parallel_thread_pool_ != nullptr); DCHECK(single_thread_pool_ != nullptr); } static void EnsureVerifiedOrVerifyAtRuntime(jobject jclass_loader, const std::vector& dex_files) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<2> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); MutableHandle cls(hs.NewHandle(nullptr)); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (const DexFile* dex_file : dex_files) { for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(i); const char* descriptor = dex_file->GetClassDescriptor(class_def); cls.Assign(class_linker->FindClass(soa.Self(), descriptor, class_loader)); if (cls == nullptr) { soa.Self()->ClearException(); } else if (&cls->GetDexFile() == dex_file) { DCHECK(cls->IsErroneous() || cls->IsVerified() || cls->ShouldVerifyAtRuntime()) << cls->PrettyClass() << " " << cls->GetStatus(); } } } } void CompilerDriver::PreCompile(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) { CheckThreadPools(); LoadImageClasses(timings); VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false); if (compiler_options_->IsAnyCompilationEnabled()) { // Avoid adding the dex files in the case where we aren't going to add compiled methods. // This reduces RAM usage for this case. for (const DexFile* dex_file : dex_files) { // Can be already inserted if the caller is CompileOne. This happens for gtests. if (!compiled_methods_.HaveDexFile(dex_file)) { compiled_methods_.AddDexFile(dex_file, dex_file->NumMethodIds()); } } // Resolve eagerly to prepare for compilation. Resolve(class_loader, dex_files, timings); VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false); } if (compiler_options_->AssumeClassesAreVerified()) { VLOG(compiler) << "Verify none mode specified, skipping verification."; SetVerified(class_loader, dex_files, timings); } if (!compiler_options_->IsVerificationEnabled()) { return; } if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) { // Resolve strings from const-string. Do this now to have a deterministic image. ResolveConstStrings(this, dex_files, timings); VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false); } Verify(class_loader, dex_files, timings); VLOG(compiler) << "Verify: " << GetMemoryUsageString(false); if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) { LOG(FATAL) << "Had a hard failure verifying all classes, and was asked to abort in such " << "situations. Please check the log."; } if (compiler_options_->IsAnyCompilationEnabled()) { if (kIsDebugBuild) { EnsureVerifiedOrVerifyAtRuntime(class_loader, dex_files); } InitializeClasses(class_loader, dex_files, timings); VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false); } UpdateImageClasses(timings); VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false); } bool CompilerDriver::IsImageClass(const char* descriptor) const { if (image_classes_ != nullptr) { // If we have a set of image classes, use those. return image_classes_->find(descriptor) != image_classes_->end(); } // No set of image classes, assume we include all the classes. // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case. return !GetCompilerOptions().IsBootImage(); } bool CompilerDriver::IsClassToCompile(const char* descriptor) const { if (classes_to_compile_ == nullptr) { return true; } return classes_to_compile_->find(descriptor) != classes_to_compile_->end(); } bool CompilerDriver::IsMethodToCompile(const MethodReference& method_ref) const { if (methods_to_compile_ == nullptr) { return true; } std::string tmp = method_ref.dex_file->PrettyMethod(method_ref.dex_method_index, true); return methods_to_compile_->find(tmp.c_str()) != methods_to_compile_->end(); } bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const { // Profile compilation info may be null if no profile is passed. if (!CompilerFilter::DependsOnProfile(compiler_options_->GetCompilerFilter())) { // Use the compiler filter instead of the presence of profile_compilation_info_ since // we may want to have full speed compilation along with profile based layout optimizations. return true; } // If we are using a profile filter but do not have a profile compilation info, compile nothing. if (profile_compilation_info_ == nullptr) { return false; } // Compile only hot methods, it is the profile saver's job to decide what startup methods to mark // as hot. bool result = profile_compilation_info_->GetMethodHotness(method_ref).IsHot(); if (kDebugProfileGuidedCompilation) { LOG(INFO) << "[ProfileGuidedCompilation] " << (result ? "Compiled" : "Skipped") << " method:" << method_ref.dex_file->PrettyMethod(method_ref.dex_method_index, true); } return result; } class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { public: ResolveCatchBlockExceptionsClassVisitor() : classes_() {} virtual bool operator()(ObjPtr c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { classes_.push_back(c); return true; } void FindExceptionTypesToResolve( std::set>* exceptions_to_resolve) REQUIRES_SHARED(Locks::mutator_lock_) { const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); for (ObjPtr klass : classes_) { for (ArtMethod& method : klass->GetMethods(pointer_size)) { FindExceptionTypesToResolveForMethod(&method, exceptions_to_resolve); } } } private: void FindExceptionTypesToResolveForMethod( ArtMethod* method, std::set>* exceptions_to_resolve) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile::CodeItem* code_item = method->GetCodeItem(); if (code_item == nullptr) { return; // native or abstract method } if (code_item->tries_size_ == 0) { return; // nothing to process } const uint8_t* encoded_catch_handler_list = DexFile::GetCatchHandlerData(*code_item, 0); size_t num_encoded_catch_handlers = DecodeUnsignedLeb128(&encoded_catch_handler_list); for (size_t i = 0; i < num_encoded_catch_handlers; i++) { int32_t encoded_catch_handler_size = DecodeSignedLeb128(&encoded_catch_handler_list); bool has_catch_all = false; if (encoded_catch_handler_size <= 0) { encoded_catch_handler_size = -encoded_catch_handler_size; has_catch_all = true; } for (int32_t j = 0; j < encoded_catch_handler_size; j++) { dex::TypeIndex encoded_catch_handler_handlers_type_idx = dex::TypeIndex(DecodeUnsignedLeb128(&encoded_catch_handler_list)); // Add to set of types to resolve if not already in the dex cache resolved types if (!method->IsResolvedTypeIdx(encoded_catch_handler_handlers_type_idx)) { exceptions_to_resolve->emplace(encoded_catch_handler_handlers_type_idx, method->GetDexFile()); } // ignore address associated with catch handler DecodeUnsignedLeb128(&encoded_catch_handler_list); } if (has_catch_all) { // ignore catch all address DecodeUnsignedLeb128(&encoded_catch_handler_list); } } } std::vector> classes_; }; class RecordImageClassesVisitor : public ClassVisitor { public: explicit RecordImageClassesVisitor(std::unordered_set* image_classes) : image_classes_(image_classes) {} bool operator()(ObjPtr klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { std::string temp; image_classes_->insert(klass->GetDescriptor(&temp)); return true; } private: std::unordered_set* const image_classes_; }; // Make a list of descriptors for classes to include in the image void CompilerDriver::LoadImageClasses(TimingLogger* timings) { CHECK(timings != nullptr); if (!GetCompilerOptions().IsBootImage()) { return; } TimingLogger::ScopedTiming t("LoadImageClasses", timings); // Make a first class to load all classes explicitly listed in the file Thread* self = Thread::Current(); ScopedObjectAccess soa(self); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); CHECK(image_classes_.get() != nullptr); for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) { const std::string& descriptor(*it); StackHandleScope<1> hs(self); Handle klass( hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str()))); if (klass == nullptr) { VLOG(compiler) << "Failed to find class " << descriptor; image_classes_->erase(it++); self->ClearException(); } else { ++it; } } // Resolve exception classes referenced by the loaded classes. The catch logic assumes // exceptions are resolved by the verifier when there is a catch block in an interested method. // Do this here so that exception classes appear to have been specified image classes. std::set> unresolved_exception_types; StackHandleScope<1> hs(self); Handle java_lang_Throwable( hs.NewHandle(class_linker->FindSystemClass(self, "Ljava/lang/Throwable;"))); do { unresolved_exception_types.clear(); { // Thread suspension is not allowed while ResolveCatchBlockExceptionsClassVisitor // is using a std::vector>. ScopedAssertNoThreadSuspension ants(__FUNCTION__); ResolveCatchBlockExceptionsClassVisitor visitor; class_linker->VisitClasses(&visitor); visitor.FindExceptionTypesToResolve(&unresolved_exception_types); } for (const auto& exception_type : unresolved_exception_types) { dex::TypeIndex exception_type_idx = exception_type.first; const DexFile* dex_file = exception_type.second; StackHandleScope<2> hs2(self); Handle dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file, nullptr))); Handle klass(hs2.NewHandle( (dex_cache != nullptr) ? class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache, ScopedNullHandle()) : nullptr)); if (klass == nullptr) { const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx); const char* descriptor = dex_file->GetTypeDescriptor(type_id); LOG(FATAL) << "Failed to resolve class " << descriptor; } DCHECK(java_lang_Throwable->IsAssignableFrom(klass.Get())); } // Resolving exceptions may load classes that reference more exceptions, iterate until no // more are found } while (!unresolved_exception_types.empty()); // We walk the roots looking for classes so that we'll pick up the // above classes plus any classes them depend on such super // classes, interfaces, and the required ClassLinker roots. RecordImageClassesVisitor visitor(image_classes_.get()); class_linker->VisitClasses(&visitor); CHECK_NE(image_classes_->size(), 0U); } static void MaybeAddToImageClasses(Thread* self, ObjPtr klass, std::unordered_set* image_classes) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK_EQ(self, Thread::Current()); StackHandleScope<1> hs(self); std::string temp; const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); while (!klass->IsObjectClass()) { const char* descriptor = klass->GetDescriptor(&temp); std::pair::iterator, bool> result = image_classes->insert(descriptor); if (!result.second) { // Previously inserted. break; } VLOG(compiler) << "Adding " << descriptor << " to image classes"; for (size_t i = 0, num_interfaces = klass->NumDirectInterfaces(); i != num_interfaces; ++i) { ObjPtr interface = mirror::Class::GetDirectInterface(self, klass, i); DCHECK(interface != nullptr); MaybeAddToImageClasses(self, interface, image_classes); } for (auto& m : klass->GetVirtualMethods(pointer_size)) { MaybeAddToImageClasses(self, m.GetDeclaringClass(), image_classes); } if (klass->IsArrayClass()) { MaybeAddToImageClasses(self, klass->GetComponentType(), image_classes); } klass.Assign(klass->GetSuperClass()); } } // Keeps all the data for the update together. Also doubles as the reference visitor. // Note: we can use object pointers because we suspend all threads. class ClinitImageUpdate { public: static ClinitImageUpdate* Create(VariableSizedHandleScope& hs, std::unordered_set* image_class_descriptors, Thread* self, ClassLinker* linker) { std::unique_ptr res(new ClinitImageUpdate(hs, image_class_descriptors, self, linker)); return res.release(); } ~ClinitImageUpdate() { // Allow others to suspend again. self_->EndAssertNoThreadSuspension(old_cause_); } // Visitor for VisitReferences. void operator()(ObjPtr object, MemberOffset field_offset, bool /* is_static */) const REQUIRES_SHARED(Locks::mutator_lock_) { mirror::Object* ref = object->GetFieldObject(field_offset); if (ref != nullptr) { VisitClinitClassesObject(ref); } } // java.lang.ref.Reference visitor for VisitReferences. void operator()(ObjPtr klass ATTRIBUTE_UNUSED, ObjPtr ref ATTRIBUTE_UNUSED) const {} // Ignore class native roots. void VisitRootIfNonNull(mirror::CompressedReference* root ATTRIBUTE_UNUSED) const {} void VisitRoot(mirror::CompressedReference* root ATTRIBUTE_UNUSED) const {} void Walk() REQUIRES_SHARED(Locks::mutator_lock_) { // Use the initial classes as roots for a search. for (Handle klass_root : image_classes_) { VisitClinitClassesObject(klass_root.Get()); } Thread* self = Thread::Current(); ScopedAssertNoThreadSuspension ants(__FUNCTION__); for (Handle h_klass : to_insert_) { MaybeAddToImageClasses(self, h_klass.Get(), image_class_descriptors_); } } private: class FindImageClassesVisitor : public ClassVisitor { public: explicit FindImageClassesVisitor(VariableSizedHandleScope& hs, ClinitImageUpdate* data) : data_(data), hs_(hs) {} bool operator()(ObjPtr klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { std::string temp; const char* name = klass->GetDescriptor(&temp); if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) { data_->image_classes_.push_back(hs_.NewHandle(klass)); } else { // Check whether it is initialized and has a clinit. They must be kept, too. if (klass->IsInitialized() && klass->FindClassInitializer( Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) { data_->image_classes_.push_back(hs_.NewHandle(klass)); } } return true; } private: ClinitImageUpdate* const data_; VariableSizedHandleScope& hs_; }; ClinitImageUpdate(VariableSizedHandleScope& hs, std::unordered_set* image_class_descriptors, Thread* self, ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_) : hs_(hs), image_class_descriptors_(image_class_descriptors), self_(self) { CHECK(linker != nullptr); CHECK(image_class_descriptors != nullptr); // Make sure nobody interferes with us. old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure"); // Find all the already-marked classes. WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); FindImageClassesVisitor visitor(hs_, this); linker->VisitClasses(&visitor); } void VisitClinitClassesObject(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(object != nullptr); if (marked_objects_.find(object) != marked_objects_.end()) { // Already processed. return; } // Mark it. marked_objects_.insert(object); if (object->IsClass()) { // Add to the TODO list since MaybeAddToImageClasses may cause thread suspension. Thread // suspensionb is not safe to do in VisitObjects or VisitReferences. to_insert_.push_back(hs_.NewHandle(object->AsClass())); } else { // Else visit the object's class. VisitClinitClassesObject(object->GetClass()); } // If it is not a DexCache, visit all references. if (!object->IsDexCache()) { object->VisitReferences(*this, *this); } } VariableSizedHandleScope& hs_; mutable std::vector> to_insert_; mutable std::unordered_set marked_objects_; std::unordered_set* const image_class_descriptors_; std::vector> image_classes_; Thread* const self_; const char* old_cause_; DISALLOW_COPY_AND_ASSIGN(ClinitImageUpdate); }; void CompilerDriver::UpdateImageClasses(TimingLogger* timings) { if (GetCompilerOptions().IsBootImage()) { TimingLogger::ScopedTiming t("UpdateImageClasses", timings); Runtime* runtime = Runtime::Current(); // Suspend all threads. ScopedSuspendAll ssa(__FUNCTION__); VariableSizedHandleScope hs(Thread::Current()); std::string error_msg; std::unique_ptr update(ClinitImageUpdate::Create(hs, image_classes_.get(), Thread::Current(), runtime->GetClassLinker())); // Do the marking. update->Walk(); } } bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) { Runtime* runtime = Runtime::Current(); if (!runtime->IsAotCompiler()) { DCHECK(runtime->UseJitCompilation()); // Having the klass reference here implies that the klass is already loaded. return true; } if (!GetCompilerOptions().IsBootImage()) { // Assume loaded only if klass is in the boot image. App classes cannot be assumed // loaded because we don't even know what class loader will be used to load them. bool class_in_image = runtime->GetHeap()->FindSpaceFromObject(klass, false)->IsImageSpace(); return class_in_image; } std::string temp; const char* descriptor = klass->GetDescriptor(&temp); return IsImageClass(descriptor); } void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref) { MutexLock lock(self, dex_to_dex_references_lock_); // Since we're compiling one dex file at a time, we need to look for the // current dex file entry only at the end of dex_to_dex_references_. if (dex_to_dex_references_.empty() || &dex_to_dex_references_.back().GetDexFile() != method_ref.dex_file) { dex_to_dex_references_.emplace_back(*method_ref.dex_file); } dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.dex_method_index); } bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr referrer_class, ObjPtr resolved_class) { if (resolved_class == nullptr) { stats_->TypeNeedsAccessCheck(); return false; // Unknown class needs access checks. } bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible. if (!is_accessible) { if (referrer_class == nullptr) { stats_->TypeNeedsAccessCheck(); return false; // Incomplete referrer knowledge needs access check. } // Perform access check, will return true if access is ok or false if we're going to have to // check this at runtime (for example for class loaders). is_accessible = referrer_class->CanAccess(resolved_class); } if (is_accessible) { stats_->TypeDoesntNeedAccessCheck(); } else { stats_->TypeNeedsAccessCheck(); } return is_accessible; } bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(ObjPtr referrer_class, ObjPtr resolved_class, bool* finalizable) { if (resolved_class == nullptr) { stats_->TypeNeedsAccessCheck(); // Be conservative. *finalizable = true; return false; // Unknown class needs access checks. } *finalizable = resolved_class->IsFinalizable(); bool is_accessible = resolved_class->IsPublic(); // Public classes are always accessible. if (!is_accessible) { if (referrer_class == nullptr) { stats_->TypeNeedsAccessCheck(); return false; // Incomplete referrer knowledge needs access check. } // Perform access and instantiable checks, will return true if access is ok or false if we're // going to have to check this at runtime (for example for class loaders). is_accessible = referrer_class->CanAccess(resolved_class); } bool result = is_accessible && resolved_class->IsInstantiable(); if (result) { stats_->TypeDoesntNeedAccessCheck(); } else { stats_->TypeNeedsAccessCheck(); } return result; } void CompilerDriver::ProcessedInstanceField(bool resolved) { if (!resolved) { stats_->UnresolvedInstanceField(); } else { stats_->ResolvedInstanceField(); } } void CompilerDriver::ProcessedStaticField(bool resolved, bool local) { if (!resolved) { stats_->UnresolvedStaticField(); } else if (local) { stats_->ResolvedLocalStaticField(); } else { stats_->ResolvedStaticField(); } } ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, const ScopedObjectAccess& soa) { // Try to resolve the field and compiling method's class. ArtField* resolved_field; mirror::Class* referrer_class; Handle dex_cache(mUnit->GetDexCache()); { Handle class_loader_handle = mUnit->GetClassLoader(); resolved_field = ResolveField(soa, dex_cache, class_loader_handle, mUnit, field_idx, false); referrer_class = resolved_field != nullptr ? ResolveCompilingMethodsClass(soa, dex_cache, class_loader_handle, mUnit) : nullptr; } bool can_link = false; if (resolved_field != nullptr && referrer_class != nullptr) { std::pair fast_path = IsFastInstanceField( dex_cache.Get(), referrer_class, resolved_field, field_idx); can_link = is_put ? fast_path.second : fast_path.first; } ProcessedInstanceField(can_link); return can_link ? resolved_field : nullptr; } bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, MemberOffset* field_offset, bool* is_volatile) { ScopedObjectAccess soa(Thread::Current()); ArtField* resolved_field = ComputeInstanceFieldInfo(field_idx, mUnit, is_put, soa); if (resolved_field == nullptr) { // Conservative defaults. *is_volatile = true; *field_offset = MemberOffset(static_cast(-1)); return false; } else { *is_volatile = resolved_field->IsVolatile(); *field_offset = resolved_field->GetOffset(); return true; } } const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const { MethodReference ref(dex_file, method_idx); return verification_results_->GetVerifiedMethod(ref); } bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) { if (!compiler_options_->IsVerificationEnabled()) { // If we didn't verify, every cast has to be treated as non-safe. return false; } DCHECK(mUnit->GetVerifiedMethod() != nullptr); bool result = mUnit->GetVerifiedMethod()->IsSafeCast(dex_pc); if (result) { stats_->SafeCast(); } else { stats_->NotASafeCast(); } return result; } class CompilationVisitor { public: virtual ~CompilationVisitor() {} virtual void Visit(size_t index) = 0; }; class ParallelCompilationManager { public: ParallelCompilationManager(ClassLinker* class_linker, jobject class_loader, CompilerDriver* compiler, const DexFile* dex_file, const std::vector& dex_files, ThreadPool* thread_pool) : index_(0), class_linker_(class_linker), class_loader_(class_loader), compiler_(compiler), dex_file_(dex_file), dex_files_(dex_files), thread_pool_(thread_pool) {} ClassLinker* GetClassLinker() const { CHECK(class_linker_ != nullptr); return class_linker_; } jobject GetClassLoader() const { return class_loader_; } CompilerDriver* GetCompiler() const { CHECK(compiler_ != nullptr); return compiler_; } const DexFile* GetDexFile() const { CHECK(dex_file_ != nullptr); return dex_file_; } const std::vector& GetDexFiles() const { return dex_files_; } void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units) REQUIRES(!*Locks::mutator_lock_) { Thread* self = Thread::Current(); self->AssertNoPendingException(); CHECK_GT(work_units, 0U); index_.StoreRelaxed(begin); for (size_t i = 0; i < work_units; ++i) { thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor)); } thread_pool_->StartWorkers(self); // Ensure we're suspended while we're blocked waiting for the other threads to finish (worker // thread destructor's called below perform join). CHECK_NE(self->GetState(), kRunnable); // Wait for all the worker threads to finish. thread_pool_->Wait(self, true, false); // And stop the workers accepting jobs. thread_pool_->StopWorkers(self); } size_t NextIndex() { return index_.FetchAndAddSequentiallyConsistent(1); } private: class ForAllClosure : public Task { public: ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor) : manager_(manager), end_(end), visitor_(visitor) {} virtual void Run(Thread* self) { while (true) { const size_t index = manager_->NextIndex(); if (UNLIKELY(index >= end_)) { break; } visitor_->Visit(index); self->AssertNoPendingException(); } } virtual void Finalize() { delete this; } private: ParallelCompilationManager* const manager_; const size_t end_; CompilationVisitor* const visitor_; }; AtomicInteger index_; ClassLinker* const class_linker_; const jobject class_loader_; CompilerDriver* const compiler_; const DexFile* const dex_file_; const std::vector& dex_files_; ThreadPool* const thread_pool_; DISALLOW_COPY_AND_ASSIGN(ParallelCompilationManager); }; // A fast version of SkipClass above if the class pointer is available // that avoids the expensive FindInClassPath search. static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(klass != nullptr); const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile(); if (&dex_file != &original_dex_file) { if (class_loader == nullptr) { LOG(WARNING) << "Skipping class " << klass->PrettyDescriptor() << " from " << dex_file.GetLocation() << " previously found in " << original_dex_file.GetLocation(); } return true; } return false; } static void CheckAndClearResolveException(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { CHECK(self->IsExceptionPending()); mirror::Throwable* exception = self->GetException(); std::string temp; const char* descriptor = exception->GetClass()->GetDescriptor(&temp); const char* expected_exceptions[] = { "Ljava/lang/IllegalAccessError;", "Ljava/lang/IncompatibleClassChangeError;", "Ljava/lang/InstantiationError;", "Ljava/lang/LinkageError;", "Ljava/lang/NoClassDefFoundError;", "Ljava/lang/NoSuchFieldError;", "Ljava/lang/NoSuchMethodError;" }; bool found = false; for (size_t i = 0; (found == false) && (i < arraysize(expected_exceptions)); ++i) { if (strcmp(descriptor, expected_exceptions[i]) == 0) { found = true; } } if (!found) { LOG(FATAL) << "Unexpected exception " << exception->Dump(); } self->ClearException(); } bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const { const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_idx); const uint8_t* class_data = dex_file.GetClassData(class_def); if (class_data == nullptr) { // Empty class such as a marker interface. return false; } ClassDataItemIterator it(dex_file, class_data); it.SkipStaticFields(); // We require a constructor barrier if there are final instance fields. while (it.HasNextInstanceField()) { if (it.MemberIsFinal()) { return true; } it.Next(); } return false; } class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor { public: explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} void Visit(size_t class_def_index) OVERRIDE REQUIRES(!Locks::mutator_lock_) { ATRACE_CALL(); Thread* const self = Thread::Current(); jobject jclass_loader = manager_->GetClassLoader(); const DexFile& dex_file = *manager_->GetDexFile(); ClassLinker* class_linker = manager_->GetClassLinker(); // If an instance field is final then we need to have a barrier on the return, static final // fields are assigned within the lock held for class initialization. Conservatively assume // constructor barriers are always required. bool requires_constructor_barrier = true; // Method and Field are the worst. We can't resolve without either // context from the code use (to disambiguate virtual vs direct // method and instance vs static field) or from class // definitions. While the compiler will resolve what it can as it // needs it, here we try to resolve fields and methods used in class // definitions, since many of them many never be referenced by // generated code. const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); ScopedObjectAccess soa(self); StackHandleScope<2> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); Handle dex_cache(hs.NewHandle(class_linker->FindDexCache( soa.Self(), dex_file))); // Resolve the class. mirror::Class* klass = class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, class_loader); bool resolve_fields_and_methods; if (klass == nullptr) { // Class couldn't be resolved, for example, super-class is in a different dex file. Don't // attempt to resolve methods and fields when there is no declaring class. CheckAndClearResolveException(soa.Self()); resolve_fields_and_methods = false; } else { // We successfully resolved a class, should we skip it? if (SkipClass(jclass_loader, dex_file, klass)) { return; } // We want to resolve the methods and fields eagerly. resolve_fields_and_methods = true; } // Note the class_data pointer advances through the headers, // static fields, instance fields, direct methods, and virtual // methods. const uint8_t* class_data = dex_file.GetClassData(class_def); if (class_data == nullptr) { // Empty class such as a marker interface. requires_constructor_barrier = false; } else { ClassDataItemIterator it(dex_file, class_data); while (it.HasNextStaticField()) { if (resolve_fields_and_methods) { ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, class_loader, true); if (field == nullptr) { CheckAndClearResolveException(soa.Self()); } } it.Next(); } // We require a constructor barrier if there are final instance fields. requires_constructor_barrier = false; while (it.HasNextInstanceField()) { if (it.MemberIsFinal()) { requires_constructor_barrier = true; } if (resolve_fields_and_methods) { ArtField* field = class_linker->ResolveField(dex_file, it.GetMemberIndex(), dex_cache, class_loader, false); if (field == nullptr) { CheckAndClearResolveException(soa.Self()); } } it.Next(); } if (resolve_fields_and_methods) { while (it.HasNextDirectMethod()) { ArtMethod* method = class_linker->ResolveMethod( dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, it.GetMethodInvokeType(class_def)); if (method == nullptr) { CheckAndClearResolveException(soa.Self()); } it.Next(); } while (it.HasNextVirtualMethod()) { ArtMethod* method = class_linker->ResolveMethod( dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, it.GetMethodInvokeType(class_def)); if (method == nullptr) { CheckAndClearResolveException(soa.Self()); } it.Next(); } DCHECK(!it.HasNext()); } } manager_->GetCompiler()->SetRequiresConstructorBarrier(self, &dex_file, class_def_index, requires_constructor_barrier); } private: const ParallelCompilationManager* const manager_; }; class ResolveTypeVisitor : public CompilationVisitor { public: explicit ResolveTypeVisitor(const ParallelCompilationManager* manager) : manager_(manager) { } void Visit(size_t type_idx) OVERRIDE REQUIRES(!Locks::mutator_lock_) { // Class derived values are more complicated, they require the linker and loader. ScopedObjectAccess soa(Thread::Current()); ClassLinker* class_linker = manager_->GetClassLinker(); const DexFile& dex_file = *manager_->GetDexFile(); StackHandleScope<2> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(manager_->GetClassLoader()))); Handle dex_cache(hs.NewHandle(class_linker->RegisterDexFile( dex_file, class_loader.Get()))); ObjPtr klass = (dex_cache != nullptr) ? class_linker->ResolveType(dex_file, dex::TypeIndex(type_idx), dex_cache, class_loader) : nullptr; if (klass == nullptr) { soa.Self()->AssertPendingException(); mirror::Throwable* exception = soa.Self()->GetException(); VLOG(compiler) << "Exception during type resolution: " << exception->Dump(); if (exception->GetClass()->DescriptorEquals("Ljava/lang/OutOfMemoryError;")) { // There's little point continuing compilation if the heap is exhausted. LOG(FATAL) << "Out of memory during type resolution for compilation"; } soa.Self()->ClearException(); } } private: const ParallelCompilationManager* const manager_; }; void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); // TODO: we could resolve strings here, although the string table is largely filled with class // and method names. ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); if (GetCompilerOptions().IsBootImage()) { // For images we resolve all types, such as array, whereas for applications just those with // classdefs are resolved by ResolveClassFieldsAndMethods. TimingLogger::ScopedTiming t("Resolve Types", timings); ResolveTypeVisitor visitor(&context); context.ForAll(0, dex_file.NumTypeIds(), &visitor, thread_count); } TimingLogger::ScopedTiming t("Resolve MethodsAndFields", timings); ResolveClassFieldsAndMethodsVisitor visitor(&context); context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); } void CompilerDriver::SetVerified(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) { // This can be run in parallel. for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); SetVerifiedDexFile(class_loader, *dex_file, dex_files, parallel_thread_pool_.get(), parallel_thread_count_, timings); } } static void PopulateVerifiedMethods(const DexFile& dex_file, uint32_t class_def_index, VerificationResults* verification_results) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); const uint8_t* class_data = dex_file.GetClassData(class_def); if (class_data == nullptr) { return; } ClassDataItemIterator it(dex_file, class_data); it.SkipAllFields(); while (it.HasNextDirectMethod()) { verification_results->CreateVerifiedMethodFor(MethodReference(&dex_file, it.GetMemberIndex())); it.Next(); } while (it.HasNextVirtualMethod()) { verification_results->CreateVerifiedMethodFor(MethodReference(&dex_file, it.GetMemberIndex())); it.Next(); } DCHECK(!it.HasNext()); } static void LoadAndUpdateStatus(const DexFile& dex_file, const DexFile::ClassDef& class_def, mirror::Class::Status status, Handle class_loader, Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { StackHandleScope<1> hs(self); const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Handle cls(hs.NewHandle( class_linker->FindClass(self, descriptor, class_loader))); if (cls != nullptr) { // Check that the class is resolved with the current dex file. We might get // a boot image class, or a class in a different dex file for multidex, and // we should not update the status in that case. if (&cls->GetDexFile() == &dex_file) { ObjectLock lock(self, cls); mirror::Class::SetStatus(cls, status, self); } } else { DCHECK(self->IsExceptionPending()); self->ClearException(); } } bool CompilerDriver::FastVerify(jobject jclass_loader, const std::vector& dex_files, TimingLogger* timings) { verifier::VerifierDeps* verifier_deps = Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps(); // If there exist VerifierDeps that aren't the ones we just created to output, use them to verify. if (verifier_deps == nullptr || verifier_deps->OutputOnly()) { return false; } TimingLogger::ScopedTiming t("Fast Verify", timings); ScopedObjectAccess soa(Thread::Current()); StackHandleScope<2> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); if (!verifier_deps->ValidateDependencies(class_loader, soa.Self())) { return false; } bool compiler_only_verifies = !GetCompilerOptions().IsAnyCompilationEnabled(); // We successfully validated the dependencies, now update class status // of verified classes. Note that the dependencies also record which classes // could not be fully verified; we could try again, but that would hurt verification // time. So instead we assume these classes still need to be verified at // runtime. for (const DexFile* dex_file : dex_files) { // Fetch the list of unverified classes. const std::set& unverified_classes = verifier_deps->GetUnverifiedClasses(*dex_file); for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(i); if (unverified_classes.find(class_def.class_idx_) == unverified_classes.end()) { if (compiler_only_verifies) { // Just update the compiled_classes_ map. The compiler doesn't need to resolve // the type. DexFileReference ref(dex_file, i); mirror::Class::Status existing = mirror::Class::kStatusNotReady; DCHECK(compiled_classes_.Get(ref, &existing)) << ref.dex_file->GetLocation(); ClassStateTable::InsertResult result = compiled_classes_.Insert(ref, existing, mirror::Class::kStatusVerified); CHECK_EQ(result, ClassStateTable::kInsertResultSuccess); } else { // Update the class status, so later compilation stages know they don't need to verify // the class. LoadAndUpdateStatus( *dex_file, class_def, mirror::Class::kStatusVerified, class_loader, soa.Self()); // Create `VerifiedMethod`s for each methods, the compiler expects one for // quickening or compiling. // Note that this means: // - We're only going to compile methods that did verify. // - Quickening will not do checkcast ellision. // TODO(ngeoffray): Reconsider this once we refactor compiler filters. PopulateVerifiedMethods(*dex_file, i, verification_results_); } } else if (!compiler_only_verifies) { // Make sure later compilation stages know they should not try to verify // this class again. LoadAndUpdateStatus(*dex_file, class_def, mirror::Class::kStatusRetryVerificationAtRuntime, class_loader, soa.Self()); } } } return true; } void CompilerDriver::Verify(jobject jclass_loader, const std::vector& dex_files, TimingLogger* timings) { if (FastVerify(jclass_loader, dex_files, timings)) { return; } // If there is no existing `verifier_deps` (because of non-existing vdex), or // the existing `verifier_deps` is not valid anymore, create a new one for // non boot image compilation. The verifier will need it to record the new dependencies. // Then dex2oat can update the vdex file with these new dependencies. if (!GetCompilerOptions().IsBootImage()) { // Dex2oat creates the verifier deps. // Create the main VerifierDeps, and set it to this thread. verifier::VerifierDeps* verifier_deps = Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps(); CHECK(verifier_deps != nullptr); Thread::Current()->SetVerifierDeps(verifier_deps); // Create per-thread VerifierDeps to avoid contention on the main one. // We will merge them after verification. for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) { worker->GetThread()->SetVerifierDeps(new verifier::VerifierDeps(dex_files_for_oat_file_)); } } // Verification updates VerifierDeps and needs to run single-threaded to be deterministic. bool force_determinism = GetCompilerOptions().IsForceDeterminism(); ThreadPool* verify_thread_pool = force_determinism ? single_thread_pool_.get() : parallel_thread_pool_.get(); size_t verify_thread_count = force_determinism ? 1U : parallel_thread_count_; for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); VerifyDexFile(jclass_loader, *dex_file, dex_files, verify_thread_pool, verify_thread_count, timings); } if (!GetCompilerOptions().IsBootImage()) { // Merge all VerifierDeps into the main one. verifier::VerifierDeps* verifier_deps = Thread::Current()->GetVerifierDeps(); for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) { verifier::VerifierDeps* thread_deps = worker->GetThread()->GetVerifierDeps(); worker->GetThread()->SetVerifierDeps(nullptr); verifier_deps->MergeWith(*thread_deps, dex_files_for_oat_file_); delete thread_deps; } Thread::Current()->SetVerifierDeps(nullptr); } } class VerifyClassVisitor : public CompilationVisitor { public: VerifyClassVisitor(const ParallelCompilationManager* manager, verifier::HardFailLogMode log_level) : manager_(manager), log_level_(log_level) {} virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { ATRACE_CALL(); ScopedObjectAccess soa(Thread::Current()); const DexFile& dex_file = *manager_->GetDexFile(); const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = manager_->GetClassLinker(); jobject jclass_loader = manager_->GetClassLoader(); StackHandleScope<3> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); Handle klass( hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); verifier::FailureKind failure_kind; if (klass == nullptr) { CHECK(soa.Self()->IsExceptionPending()); soa.Self()->ClearException(); /* * At compile time, we can still structurally verify the class even if FindClass fails. * This is to ensure the class is structurally sound for compilation. An unsound class * will be rejected by the verifier and later skipped during compilation in the compiler. */ Handle dex_cache(hs.NewHandle(class_linker->FindDexCache( soa.Self(), dex_file))); std::string error_msg; failure_kind = verifier::MethodVerifier::VerifyClass(soa.Self(), &dex_file, dex_cache, class_loader, class_def, Runtime::Current()->GetCompilerCallbacks(), true /* allow soft failures */, log_level_, &error_msg); if (failure_kind == verifier::FailureKind::kHardFailure) { LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor) << " because: " << error_msg; manager_->GetCompiler()->SetHadHardVerifierFailure(); } else { // Force a soft failure for the VerifierDeps. This is a sanity measure, as // the vdex file already records that the class hasn't been resolved. It avoids // trying to do future verification optimizations when processing the vdex file. DCHECK(failure_kind == verifier::FailureKind::kSoftFailure || failure_kind == verifier::FailureKind::kNoFailure) << failure_kind; failure_kind = verifier::FailureKind::kSoftFailure; } } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) { CHECK(klass->IsResolved()) << klass->PrettyClass(); failure_kind = class_linker->VerifyClass(soa.Self(), klass, log_level_); if (klass->IsErroneous()) { // ClassLinker::VerifyClass throws, which isn't useful in the compiler. CHECK(soa.Self()->IsExceptionPending()); soa.Self()->ClearException(); manager_->GetCompiler()->SetHadHardVerifierFailure(); } CHECK(klass->ShouldVerifyAtRuntime() || klass->IsVerified() || klass->IsErroneous()) << klass->PrettyDescriptor() << ": state=" << klass->GetStatus(); // Class has a meaningful status for the compiler now, record it. ClassReference ref(manager_->GetDexFile(), class_def_index); manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); // It is *very* problematic if there are verification errors in the boot classpath. For example, // we rely on things working OK without verification when the decryption dialog is brought up. // So abort in a debug build if we find this violated. if (kIsDebugBuild) { // TODO(narayan): Remove this special case for signature polymorphic // invokes once verifier support is fully implemented. if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage() && !android::base::StartsWith(descriptor, "Ljava/lang/invoke/")) { DCHECK(klass->IsVerified()) << "Boot classpath class " << klass->PrettyClass() << " failed to fully verify: state= " << klass->GetStatus(); } if (klass->IsVerified()) { DCHECK_EQ(failure_kind, verifier::FailureKind::kNoFailure); } else if (klass->ShouldVerifyAtRuntime()) { DCHECK_EQ(failure_kind, verifier::FailureKind::kSoftFailure); } else { DCHECK_EQ(failure_kind, verifier::FailureKind::kHardFailure); } } } else { // Make the skip a soft failure, essentially being considered as verify at runtime. failure_kind = verifier::FailureKind::kSoftFailure; } verifier::VerifierDeps::MaybeRecordVerificationStatus( dex_file, class_def.class_idx_, failure_kind); soa.Self()->AssertNoPendingException(); } private: const ParallelCompilationManager* const manager_; const verifier::HardFailLogMode log_level_; }; void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) { TimingLogger::ScopedTiming t("Verify Dex File", timings); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); verifier::HardFailLogMode log_level = GetCompilerOptions().AbortOnHardVerifierFailure() ? verifier::HardFailLogMode::kLogInternalFatal : verifier::HardFailLogMode::kLogWarning; VerifyClassVisitor visitor(&context, log_level); context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); } class SetVerifiedClassVisitor : public CompilationVisitor { public: explicit SetVerifiedClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { ATRACE_CALL(); ScopedObjectAccess soa(Thread::Current()); const DexFile& dex_file = *manager_->GetDexFile(); const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); const char* descriptor = dex_file.GetClassDescriptor(class_def); ClassLinker* class_linker = manager_->GetClassLinker(); jobject jclass_loader = manager_->GetClassLoader(); StackHandleScope<3> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); Handle klass( hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); // Class might have failed resolution. Then don't set it to verified. if (klass != nullptr) { // Only do this if the class is resolved. If even resolution fails, quickening will go very, // very wrong. if (klass->IsResolved() && !klass->IsErroneousResolved()) { if (klass->GetStatus() < mirror::Class::kStatusVerified) { ObjectLock lock(soa.Self(), klass); // Set class status to verified. mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, soa.Self()); // Mark methods as pre-verified. If we don't do this, the interpreter will run with // access checks. klass->SetSkipAccessChecksFlagOnAllMethods( GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet())); klass->SetVerificationAttempted(); } // Record the final class status if necessary. ClassReference ref(manager_->GetDexFile(), class_def_index); manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus()); } } else { Thread* self = soa.Self(); DCHECK(self->IsExceptionPending()); self->ClearException(); } } private: const ParallelCompilationManager* const manager_; }; void CompilerDriver::SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) { TimingLogger::ScopedTiming t("Verify Dex File", timings); if (!compiled_classes_.HaveDexFile(&dex_file)) { compiled_classes_.AddDexFile(&dex_file, dex_file.NumClassDefs()); } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files, thread_pool); SetVerifiedClassVisitor visitor(&context); context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); } class InitializeClassVisitor : public CompilationVisitor { public: explicit InitializeClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} void Visit(size_t class_def_index) OVERRIDE { ATRACE_CALL(); jobject jclass_loader = manager_->GetClassLoader(); const DexFile& dex_file = *manager_->GetDexFile(); const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_); const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); ScopedObjectAccess soa(Thread::Current()); StackHandleScope<3> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); Handle klass( hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader))); if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) { TryInitializeClass(klass, class_loader); } // Clear any class not found or verification exceptions. soa.Self()->ClearException(); } // A helper function for initializing klass. void TryInitializeClass(Handle klass, Handle& class_loader) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile& dex_file = klass->GetDexFile(); const DexFile::ClassDef* class_def = klass->GetClassDef(); const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_); const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_); ScopedObjectAccessUnchecked soa(Thread::Current()); StackHandleScope<3> hs(soa.Self()); const bool is_boot_image = manager_->GetCompiler()->GetCompilerOptions().IsBootImage(); const bool is_app_image = manager_->GetCompiler()->GetCompilerOptions().IsAppImage(); mirror::Class::Status old_status = klass->GetStatus(); // Don't initialize classes in boot space when compiling app image if (is_app_image && klass->IsBootStrapClassLoaded()) { // Also return early and don't store the class status in the recorded class status. return; } // Only try to initialize classes that were successfully verified. if (klass->IsVerified()) { // Attempt to initialize the class but bail if we either need to initialize the super-class // or static fields. manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false); old_status = klass->GetStatus(); if (!klass->IsInitialized()) { // We don't want non-trivial class initialization occurring on multiple threads due to // deadlock problems. For example, a parent class is initialized (holding its lock) that // refers to a sub-class in its static/class initializer causing it to try to acquire the // sub-class' lock. While on a second thread the sub-class is initialized (holding its lock) // after first initializing its parents, whose locks are acquired. This leads to a // parent-to-child and a child-to-parent lock ordering and consequent potential deadlock. // We need to use an ObjectLock due to potential suspension in the interpreting code. Rather // than use a special Object for the purpose we use the Class of java.lang.Class. Handle h_klass(hs.NewHandle(klass->GetClass())); ObjectLock lock(soa.Self(), h_klass); // Attempt to initialize allowing initialization of parent classes but still not static // fields. // Initialize dependencies first only for app image, to make TryInitialize recursive. bool is_superclass_initialized = !is_app_image ? true : InitializeDependencies(klass, class_loader, soa.Self()); if (!is_app_image || (is_app_image && is_superclass_initialized)) { manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true); } // Otherwise it's in app image but superclasses can't be initialized, no need to proceed. old_status = klass->GetStatus(); bool too_many_encoded_fields = false; if (!is_boot_image && klass->NumStaticFields() > kMaxEncodedFields) { too_many_encoded_fields = true; } // If the class was not initialized, we can proceed to see if we can initialize static // fields. Limit the max number of encoded fields. if (!klass->IsInitialized() && (is_app_image || is_boot_image) && is_superclass_initialized && !too_many_encoded_fields && manager_->GetCompiler()->IsImageClass(descriptor)) { bool can_init_static_fields = false; if (is_boot_image) { // We need to initialize static fields, we only do this for image classes that aren't // marked with the $NoPreloadHolder (which implies this should not be initialized // early). can_init_static_fields = !StringPiece(descriptor).ends_with("$NoPreloadHolder;"); } else { CHECK(is_app_image); // The boot image case doesn't need to recursively initialize the dependencies with // special logic since the class linker already does this. can_init_static_fields = !soa.Self()->IsExceptionPending() && is_superclass_initialized && NoClinitInDependency(klass, soa.Self(), &class_loader); // TODO The checking for clinit can be removed since it's already // checked when init superclass. Currently keep it because it contains // processing of intern strings. Will be removed later when intern strings // and clinit are both initialized. } if (can_init_static_fields) { VLOG(compiler) << "Initializing: " << descriptor; // TODO multithreading support. We should ensure the current compilation thread has // exclusive access to the runtime and the transaction. To achieve this, we could use // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity // checks in Thread::AssertThreadSuspensionIsAllowable. Runtime* const runtime = Runtime::Current(); Transaction transaction; // Run the class initializer in transaction mode. runtime->EnterTransactionMode(&transaction); bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true, true); // TODO we detach transaction from runtime to indicate we quit the transactional // mode which prevents the GC from visiting objects modified during the transaction. // Ensure GC is not run so don't access freed objects when aborting transaction. { ScopedAssertNoThreadSuspension ants("Transaction end"); runtime->ExitTransactionMode(); if (!success) { CHECK(soa.Self()->IsExceptionPending()); mirror::Throwable* exception = soa.Self()->GetException(); VLOG(compiler) << "Initialization of " << descriptor << " aborted because of " << exception->Dump(); std::ostream* file_log = manager_->GetCompiler()-> GetCompilerOptions().GetInitFailureOutput(); if (file_log != nullptr) { *file_log << descriptor << "\n"; *file_log << exception->Dump() << "\n"; } soa.Self()->ClearException(); transaction.Rollback(); CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored"; } else if (is_boot_image) { // For boot image, we want to put the updated status in the oat class since we can't // reject the image anyways. old_status = klass->GetStatus(); } } if (!success) { // On failure, still intern strings of static fields and seen in , as these // will be created in the zygote. This is separated from the transaction code just // above as we will allocate strings, so must be allowed to suspend. if (&klass->GetDexFile() == manager_->GetDexFile()) { InternStrings(klass, class_loader); } else { DCHECK(!is_boot_image) << "Boot image must have equal dex files"; } } } } // If the class still isn't initialized, at least try some checks that initialization // would do so they can be skipped at runtime. if (!klass->IsInitialized() && manager_->GetClassLinker()->ValidateSuperClassDescriptors(klass)) { old_status = mirror::Class::kStatusSuperclassValidated; } else { soa.Self()->ClearException(); } soa.Self()->AssertNoPendingException(); } } // Record the final class status if necessary. ClassReference ref(&dex_file, klass->GetDexClassDefIndex()); // Back up the status before doing initialization for static encoded fields, // because the static encoded branch wants to keep the status to uninitialized. manager_->GetCompiler()->RecordClassStatus(ref, old_status); } private: void InternStrings(Handle klass, Handle class_loader) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(manager_->GetCompiler()->GetCompilerOptions().IsBootImage()); DCHECK(klass->IsVerified()); DCHECK(!klass->IsInitialized()); StackHandleScope<1> hs(Thread::Current()); Handle h_dex_cache = hs.NewHandle(klass->GetDexCache()); const DexFile* dex_file = manager_->GetDexFile(); const DexFile::ClassDef* class_def = klass->GetClassDef(); ClassLinker* class_linker = manager_->GetClassLinker(); // Check encoded final field values for strings and intern. annotations::RuntimeEncodedStaticFieldValueIterator value_it(*dex_file, &h_dex_cache, &class_loader, manager_->GetClassLinker(), *class_def); for ( ; value_it.HasNext(); value_it.Next()) { if (value_it.GetValueType() == annotations::RuntimeEncodedStaticFieldValueIterator::kString) { // Resolve the string. This will intern the string. art::ObjPtr resolved = class_linker->ResolveString( *dex_file, dex::StringIndex(value_it.GetJavaValue().i), h_dex_cache); CHECK(resolved != nullptr); } } // Intern strings seen in . ArtMethod* clinit = klass->FindClassInitializer(class_linker->GetImagePointerSize()); if (clinit != nullptr) { const DexFile::CodeItem* code_item = clinit->GetCodeItem(); DCHECK(code_item != nullptr); const Instruction* inst = Instruction::At(code_item->insns_); const uint32_t insns_size = code_item->insns_size_in_code_units_; for (uint32_t dex_pc = 0; dex_pc < insns_size;) { if (inst->Opcode() == Instruction::CONST_STRING) { ObjPtr s = class_linker->ResolveString( *dex_file, dex::StringIndex(inst->VRegB_21c()), h_dex_cache); CHECK(s != nullptr); } else if (inst->Opcode() == Instruction::CONST_STRING_JUMBO) { ObjPtr s = class_linker->ResolveString( *dex_file, dex::StringIndex(inst->VRegB_31c()), h_dex_cache); CHECK(s != nullptr); } dex_pc += inst->SizeInCodeUnits(); inst = inst->Next(); } } } bool ResolveTypesOfMethods(Thread* self, ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) { auto rtn_type = m->GetReturnType(true); // return value is discarded because resolve will be done internally. if (rtn_type == nullptr) { self->ClearException(); return false; } const DexFile::TypeList* types = m->GetParameterTypeList(); if (types != nullptr) { for (uint32_t i = 0; i < types->Size(); ++i) { dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_; auto param_type = m->GetClassFromTypeIndex(param_type_idx, true); if (param_type == nullptr) { self->ClearException(); return false; } } } return true; } // Pre resolve types mentioned in all method signatures before start a transaction // since ResolveType doesn't work in transaction mode. bool PreResolveTypes(Thread* self, const Handle& klass) REQUIRES_SHARED(Locks::mutator_lock_) { PointerSize pointer_size = manager_->GetClassLinker()->GetImagePointerSize(); for (ArtMethod& m : klass->GetMethods(pointer_size)) { if (!ResolveTypesOfMethods(self, &m)) { return false; } } if (klass->IsInterface()) { return true; } else if (klass->HasSuperClass()) { StackHandleScope<1> hs(self); MutableHandle super_klass(hs.NewHandle(klass->GetSuperClass())); for (int i = super_klass->GetVTableLength() - 1; i >= 0; --i) { ArtMethod* m = klass->GetVTableEntry(i, pointer_size); ArtMethod* super_m = super_klass->GetVTableEntry(i, pointer_size); if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) { return false; } } for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { super_klass.Assign(klass->GetIfTable()->GetInterface(i)); if (klass->GetClassLoader() != super_klass->GetClassLoader()) { uint32_t num_methods = super_klass->NumVirtualMethods(); for (uint32_t j = 0; j < num_methods; ++j) { ArtMethod* m = klass->GetIfTable()->GetMethodArray(i)->GetElementPtrSize( j, pointer_size); ArtMethod* super_m = super_klass->GetVirtualMethod(j, pointer_size); if (!ResolveTypesOfMethods(self, m) || !ResolveTypesOfMethods(self, super_m)) { return false; } } } } } return true; } // Initialize the klass's dependencies recursively before initializing itself. // Checking for interfaces is also necessary since interfaces can contain // both default methods and static encoded fields. bool InitializeDependencies(const Handle& klass, Handle class_loader, Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { if (klass->HasSuperClass()) { ObjPtr super_class = klass->GetSuperClass(); StackHandleScope<1> hs(self); Handle handle_scope_super(hs.NewHandle(super_class)); if (!handle_scope_super->IsInitialized()) { this->TryInitializeClass(handle_scope_super, class_loader); if (!handle_scope_super->IsInitialized()) { return false; } } } uint32_t num_if = klass->NumDirectInterfaces(); for (size_t i = 0; i < num_if; i++) { ObjPtr interface = mirror::Class::GetDirectInterface(self, klass.Get(), i); StackHandleScope<1> hs(self); Handle handle_interface(hs.NewHandle(interface)); TryInitializeClass(handle_interface, class_loader); if (!handle_interface->IsInitialized()) { return false; } } return PreResolveTypes(self, klass); } // In this phase the classes containing class initializers are ignored. Make sure no // clinit appears in kalss's super class chain and interfaces. bool NoClinitInDependency(const Handle& klass, Thread* self, Handle* class_loader) REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* clinit = klass->FindClassInitializer(manager_->GetClassLinker()->GetImagePointerSize()); if (clinit != nullptr) { VLOG(compiler) << klass->PrettyClass() << ' ' << clinit->PrettyMethod(true); return false; } if (klass->HasSuperClass()) { ObjPtr super_class = klass->GetSuperClass(); StackHandleScope<1> hs(self); Handle handle_scope_super(hs.NewHandle(super_class)); if (!NoClinitInDependency(handle_scope_super, self, class_loader)) { return false; } } uint32_t num_if = klass->NumDirectInterfaces(); for (size_t i = 0; i < num_if; i++) { ObjPtr interface = mirror::Class::GetDirectInterface(self, klass.Get(), i); StackHandleScope<1> hs(self); Handle handle_interface(hs.NewHandle(interface)); if (!NoClinitInDependency(handle_interface, self, class_loader)) { return false; } } return true; } const ParallelCompilationManager* const manager_; }; void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file, const std::vector& dex_files, TimingLogger* timings) { TimingLogger::ScopedTiming t("InitializeNoClinit", timings); // Initialization allocates objects and needs to run single-threaded to be deterministic. bool force_determinism = GetCompilerOptions().IsForceDeterminism(); ThreadPool* init_thread_pool = force_determinism ? single_thread_pool_.get() : parallel_thread_pool_.get(); size_t init_thread_count = force_determinism ? 1U : parallel_thread_count_; ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files, init_thread_pool); if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) { // Set the concurrency thread to 1 to support initialization for App Images since transaction // doesn't support multithreading now. // TODO: remove this when transactional mode supports multithreading. init_thread_count = 1U; } InitializeClassVisitor visitor(&context); context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count); } class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor { public: explicit InitializeArrayClassesAndCreateConflictTablesVisitor(VariableSizedHandleScope& hs) : hs_(hs) {} virtual bool operator()(ObjPtr klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) { return true; } if (klass->IsArrayClass()) { StackHandleScope<1> hs(Thread::Current()); auto h_klass = hs.NewHandleWrapper(&klass); Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(), h_klass, true, true); } // Collect handles since there may be thread suspension in future EnsureInitialized. to_visit_.push_back(hs_.NewHandle(klass)); return true; } void FillAllIMTAndConflictTables() REQUIRES_SHARED(Locks::mutator_lock_) { for (Handle c : to_visit_) { // Create the conflict tables. FillIMTAndConflictTables(c.Get()); } } private: void FillIMTAndConflictTables(ObjPtr klass) REQUIRES_SHARED(Locks::mutator_lock_) { if (!klass->ShouldHaveImt()) { return; } if (visited_classes_.find(klass) != visited_classes_.end()) { return; } if (klass->HasSuperClass()) { FillIMTAndConflictTables(klass->GetSuperClass()); } if (!klass->IsTemp()) { Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); } visited_classes_.insert(klass); } VariableSizedHandleScope& hs_; std::vector> to_visit_; std::unordered_set, HashObjPtr> visited_classes_; }; void CompilerDriver::InitializeClasses(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) { for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; CHECK(dex_file != nullptr); InitializeClasses(class_loader, *dex_file, dex_files, timings); } if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) { // Make sure that we call EnsureIntiailized on all the array classes to call // SetVerificationAttempted so that the access flags are set. If we do not do this they get // changed at runtime resulting in more dirty image pages. // Also create conflict tables. // Only useful if we are compiling an image (image_classes_ is not null). ScopedObjectAccess soa(Thread::Current()); VariableSizedHandleScope hs(soa.Self()); InitializeArrayClassesAndCreateConflictTablesVisitor visitor(hs); Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor); visitor.FillAllIMTAndConflictTables(); } if (GetCompilerOptions().IsBootImage()) { // Prune garbage objects created during aborted transactions. Runtime::Current()->GetHeap()->CollectGarbage(true); } } void CompilerDriver::Compile(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) { if (kDebugProfileGuidedCompilation) { LOG(INFO) << "[ProfileGuidedCompilation] " << ((profile_compilation_info_ == nullptr) ? "null" : profile_compilation_info_->DumpInfo(&dex_files)); } current_dex_to_dex_methods_ = nullptr; Thread* const self = Thread::Current(); { // Clear in case we aren't the first call to Compile. MutexLock mu(self, dex_to_dex_references_lock_); dex_to_dex_references_.clear(); } for (const DexFile* dex_file : dex_files) { CHECK(dex_file != nullptr); CompileDexFile(class_loader, *dex_file, dex_files, parallel_thread_pool_.get(), parallel_thread_count_, timings); const ArenaPool* const arena_pool = Runtime::Current()->GetArenaPool(); const size_t arena_alloc = arena_pool->GetBytesAllocated(); max_arena_alloc_ = std::max(arena_alloc, max_arena_alloc_); Runtime::Current()->ReclaimArenaPoolMemory(); } ArrayRef dex_to_dex_references; { // From this point on, we shall not modify dex_to_dex_references_, so // just grab a reference to it that we use without holding the mutex. MutexLock lock(self, dex_to_dex_references_lock_); dex_to_dex_references = ArrayRef(dex_to_dex_references_); } for (const auto& method_set : dex_to_dex_references) { current_dex_to_dex_methods_ = &method_set.GetMethodIndexes(); CompileDexFile(class_loader, method_set.GetDexFile(), dex_files, parallel_thread_pool_.get(), parallel_thread_count_, timings); } current_dex_to_dex_methods_ = nullptr; VLOG(compiler) << "Compile: " << GetMemoryUsageString(false); } class CompileClassVisitor : public CompilationVisitor { public: explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {} virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE { ATRACE_CALL(); const DexFile& dex_file = *manager_->GetDexFile(); const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); ClassLinker* class_linker = manager_->GetClassLinker(); jobject jclass_loader = manager_->GetClassLoader(); ClassReference ref(&dex_file, class_def_index); // Skip compiling classes with generic verifier failures since they will still fail at runtime if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) { return; } // Use a scoped object access to perform to the quick SkipClass check. const char* descriptor = dex_file.GetClassDescriptor(class_def); ScopedObjectAccess soa(Thread::Current()); StackHandleScope<3> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(jclass_loader))); Handle klass( hs.NewHandle(class_linker->FindClass(soa.Self(), descriptor, class_loader))); Handle dex_cache; if (klass == nullptr) { soa.Self()->AssertPendingException(); soa.Self()->ClearException(); dex_cache = hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)); } else if (SkipClass(jclass_loader, dex_file, klass.Get())) { return; } else { dex_cache = hs.NewHandle(klass->GetDexCache()); } const uint8_t* class_data = dex_file.GetClassData(class_def); if (class_data == nullptr) { // empty class, probably a marker interface return; } // Go to native so that we don't block GC during compilation. ScopedThreadSuspension sts(soa.Self(), kNative); CompilerDriver* const driver = manager_->GetCompiler(); // Can we run DEX-to-DEX compiler on this class ? optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level = GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def); ClassDataItemIterator it(dex_file, class_data); it.SkipAllFields(); bool compilation_enabled = driver->IsClassToCompile( dex_file.StringByTypeIdx(class_def.class_idx_)); // Compile direct methods int64_t previous_direct_method_idx = -1; while (it.HasNextDirectMethod()) { uint32_t method_idx = it.GetMemberIndex(); if (method_idx == previous_direct_method_idx) { // smali can create dex files with two encoded_methods sharing the same method_idx // http://code.google.com/p/smali/issues/detail?id=119 it.Next(); continue; } previous_direct_method_idx = method_idx; CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), it.GetMethodInvokeType(class_def), class_def_index, method_idx, class_loader, dex_file, dex_to_dex_compilation_level, compilation_enabled, dex_cache); it.Next(); } // Compile virtual methods int64_t previous_virtual_method_idx = -1; while (it.HasNextVirtualMethod()) { uint32_t method_idx = it.GetMemberIndex(); if (method_idx == previous_virtual_method_idx) { // smali can create dex files with two encoded_methods sharing the same method_idx // http://code.google.com/p/smali/issues/detail?id=119 it.Next(); continue; } previous_virtual_method_idx = method_idx; CompileMethod(soa.Self(), driver, it.GetMethodCodeItem(), it.GetMethodAccessFlags(), it.GetMethodInvokeType(class_def), class_def_index, method_idx, class_loader, dex_file, dex_to_dex_compilation_level, compilation_enabled, dex_cache); it.Next(); } DCHECK(!it.HasNext()); } private: const ParallelCompilationManager* const manager_; }; void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) { TimingLogger::ScopedTiming t("Compile Dex File", timings); ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this, &dex_file, dex_files, thread_pool); CompileClassVisitor visitor(&context); context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count); } void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref, CompiledMethod* const compiled_method, size_t non_relative_linker_patch_count) { DCHECK(GetCompiledMethod(method_ref) == nullptr) << method_ref.dex_file->PrettyMethod(method_ref.dex_method_index); MethodTable::InsertResult result = compiled_methods_.Insert( DexFileReference(method_ref.dex_file, method_ref.dex_method_index), /*expected*/ nullptr, compiled_method); CHECK(result == MethodTable::kInsertResultSuccess); non_relative_linker_patch_count_.FetchAndAddRelaxed(non_relative_linker_patch_count); DCHECK(GetCompiledMethod(method_ref) != nullptr) << method_ref.dex_file->PrettyMethod(method_ref.dex_method_index); } bool CompilerDriver::GetCompiledClass(ClassReference ref, mirror::Class::Status* status) const { DCHECK(status != nullptr); // The table doesn't know if something wasn't inserted. For this case it will return // kStatusNotReady. To handle this, just assume anything we didn't try to verify is not compiled. if (!compiled_classes_.Get(DexFileReference(ref.first, ref.second), status) || *status < mirror::Class::kStatusRetryVerificationAtRuntime) { return false; } return true; } void CompilerDriver::RecordClassStatus(ClassReference ref, mirror::Class::Status status) { switch (status) { case mirror::Class::kStatusErrorResolved: case mirror::Class::kStatusErrorUnresolved: case mirror::Class::kStatusNotReady: case mirror::Class::kStatusResolved: case mirror::Class::kStatusRetryVerificationAtRuntime: case mirror::Class::kStatusVerified: case mirror::Class::kStatusSuperclassValidated: case mirror::Class::kStatusInitialized: break; // Expected states. default: LOG(FATAL) << "Unexpected class status for class " << PrettyDescriptor(ref.first->GetClassDescriptor(ref.first->GetClassDef(ref.second))) << " of " << status; } ClassStateTable::InsertResult result; do { DexFileReference dex_ref(ref.first, ref.second); mirror::Class::Status existing = mirror::Class::kStatusNotReady; if (!compiled_classes_.Get(dex_ref, &existing)) { // Probably a uses library class, bail. if (kIsDebugBuild) { // Check to make sure it's not a dex file for an oat file we are compiling since these // should always succeed. These do not include classes in for used libraries. for (const DexFile* dex_file : GetDexFilesForOatFile()) { CHECK_NE(dex_ref.dex_file, dex_file) << dex_ref.dex_file->GetLocation(); } } return; } if (existing >= status) { // Existing status is already better than we expect, break. break; } // Update the status if we now have a greater one. This happens with vdex, // which records a class is verified, but does not resolve it. result = compiled_classes_.Insert(dex_ref, existing, status); CHECK(result != ClassStateTable::kInsertResultInvalidDexFile); } while (result != ClassStateTable::kInsertResultSuccess); } CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const { CompiledMethod* compiled_method = nullptr; compiled_methods_.Get(DexFileReference(ref.dex_file, ref.dex_method_index), &compiled_method); return compiled_method; } bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx, uint16_t class_def_idx, const DexFile& dex_file) const { const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx); if (verified_method != nullptr) { return !verified_method->HasVerificationFailures(); } // If we can't find verification metadata, check if this is a system class (we trust that system // classes have their methods verified). If it's not, be conservative and assume the method // has not been verified successfully. // TODO: When compiling the boot image it should be safe to assume that everything is verified, // even if methods are not found in the verification cache. const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx)); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Thread* self = Thread::Current(); ScopedObjectAccess soa(self); bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr; if (!is_system_class) { self->ClearException(); } return is_system_class; } size_t CompilerDriver::GetNonRelativeLinkerPatchCount() const { return non_relative_linker_patch_count_.LoadRelaxed(); } void CompilerDriver::SetRequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index, bool requires) { WriterMutexLock mu(self, requires_constructor_barrier_lock_); requires_constructor_barrier_.emplace(ClassReference(dex_file, class_def_index), requires); } bool CompilerDriver::RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index) { ClassReference class_ref(dex_file, class_def_index); { ReaderMutexLock mu(self, requires_constructor_barrier_lock_); auto it = requires_constructor_barrier_.find(class_ref); if (it != requires_constructor_barrier_.end()) { return it->second; } } WriterMutexLock mu(self, requires_constructor_barrier_lock_); const bool requires = RequiresConstructorBarrier(*dex_file, class_def_index); requires_constructor_barrier_.emplace(class_ref, requires); return requires; } std::string CompilerDriver::GetMemoryUsageString(bool extended) const { std::ostringstream oss; const gc::Heap* const heap = Runtime::Current()->GetHeap(); const size_t java_alloc = heap->GetBytesAllocated(); oss << "arena alloc=" << PrettySize(max_arena_alloc_) << " (" << max_arena_alloc_ << "B)"; oss << " java alloc=" << PrettySize(java_alloc) << " (" << java_alloc << "B)"; #if defined(__BIONIC__) || defined(__GLIBC__) const struct mallinfo info = mallinfo(); const size_t allocated_space = static_cast(info.uordblks); const size_t free_space = static_cast(info.fordblks); oss << " native alloc=" << PrettySize(allocated_space) << " (" << allocated_space << "B)" << " free=" << PrettySize(free_space) << " (" << free_space << "B)"; #endif compiled_method_storage_.DumpMemoryUsage(oss, extended); return oss.str(); } bool CompilerDriver::MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const { // We're not allowed to inline across dex files if we're the no-inline-from dex file. if (inlined_from != inlined_into && compiler_options_->GetNoInlineFromDexFile() != nullptr && ContainsElement(*compiler_options_->GetNoInlineFromDexFile(), inlined_from)) { return false; } return true; } void CompilerDriver::InitializeThreadPools() { size_t parallel_count = parallel_thread_count_ > 0 ? parallel_thread_count_ - 1 : 0; parallel_thread_pool_.reset( new ThreadPool("Compiler driver thread pool", parallel_count)); single_thread_pool_.reset(new ThreadPool("Single-threaded Compiler driver thread pool", 0)); } void CompilerDriver::FreeThreadPools() { parallel_thread_pool_.reset(); single_thread_pool_.reset(); } void CompilerDriver::SetDexFilesForOatFile(const std::vector& dex_files) { dex_files_for_oat_file_ = dex_files; for (const DexFile* dex_file : dex_files) { if (!compiled_classes_.HaveDexFile(dex_file)) { compiled_classes_.AddDexFile(dex_file, dex_file->NumClassDefs()); } } } bool CompilerDriver::CanAssumeVerified(ClassReference ref) const { mirror::Class::Status existing = mirror::Class::kStatusNotReady; compiled_classes_.Get(DexFileReference(ref.first, ref.second), &existing); return existing >= mirror::Class::kStatusVerified; } } // namespace art android-platform-art-8.1.0+r23/compiler/driver/compiler_driver.h000066400000000000000000000520101336577252300246260ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DRIVER_COMPILER_DRIVER_H_ #define ART_COMPILER_DRIVER_COMPILER_DRIVER_H_ #include #include #include #include #include "arch/instruction_set.h" #include "base/array_ref.h" #include "base/bit_utils.h" #include "base/mutex.h" #include "base/timing_logger.h" #include "class_reference.h" #include "compiler.h" #include "dex_file.h" #include "dex_file_types.h" #include "driver/compiled_method_storage.h" #include "jit/profile_compilation_info.h" #include "invoke_type.h" #include "method_reference.h" #include "mirror/class.h" // For mirror::Class::Status. #include "os.h" #include "safe_map.h" #include "thread_pool.h" #include "utils/atomic_dex_ref_map.h" #include "utils/dex_cache_arrays_layout.h" namespace art { namespace mirror { class DexCache; } // namespace mirror namespace verifier { class MethodVerifier; class VerifierDepsTest; } // namespace verifier class BitVector; class CompiledMethod; class CompilerOptions; class DexCompilationUnit; struct InlineIGetIPutData; class InstructionSetFeatures; class InternTable; class ParallelCompilationManager; class ScopedObjectAccess; template class SrcMap; template class Handle; class TimingLogger; class VdexFile; class VerificationResults; class VerifiedMethod; enum EntryPointCallingConvention { // ABI of invocations to a method's interpreter entry point. kInterpreterAbi, // ABI of calls to a method's native code, only used for native methods. kJniAbi, // ABI of calls to a method's quick code entry point. kQuickAbi }; class CompilerDriver { public: // Create a compiler targeting the requested "instruction_set". // "image" should be true if image specific optimizations should be // enabled. "image_classes" lets the compiler know what classes it // can assume will be in the image, with null implying all available // classes. CompilerDriver(const CompilerOptions* compiler_options, VerificationResults* verification_results, Compiler::Kind compiler_kind, InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features, std::unordered_set* image_classes, std::unordered_set* compiled_classes, std::unordered_set* compiled_methods, size_t thread_count, bool dump_stats, bool dump_passes, CumulativeLogger* timer, int swap_fd, const ProfileCompilationInfo* profile_compilation_info); ~CompilerDriver(); // Set dex files that will be stored in the oat file after being compiled. void SetDexFilesForOatFile(const std::vector& dex_files); // Get dex file that will be stored in the oat file after being compiled. ArrayRef GetDexFilesForOatFile() const { return ArrayRef(dex_files_for_oat_file_); } void CompileAll(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_, !dex_to_dex_references_lock_); // Compile a single Method. void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!dex_to_dex_references_lock_); VerificationResults* GetVerificationResults() const; InstructionSet GetInstructionSet() const { return instruction_set_; } const InstructionSetFeatures* GetInstructionSetFeatures() const { return instruction_set_features_; } const CompilerOptions& GetCompilerOptions() const { return *compiler_options_; } Compiler* GetCompiler() const { return compiler_.get(); } const std::unordered_set* GetImageClasses() const { return image_classes_.get(); } // Generate the trampolines that are invoked by unresolved direct methods. std::unique_ptr> CreateJniDlsymLookup() const; std::unique_ptr> CreateQuickGenericJniTrampoline() const; std::unique_ptr> CreateQuickImtConflictTrampoline() const; std::unique_ptr> CreateQuickResolutionTrampoline() const; std::unique_ptr> CreateQuickToInterpreterBridge() const; bool GetCompiledClass(ClassReference ref, mirror::Class::Status* status) const; CompiledMethod* GetCompiledMethod(MethodReference ref) const; size_t GetNonRelativeLinkerPatchCount() const; // Add a compiled method. void AddCompiledMethod(const MethodReference& method_ref, CompiledMethod* const compiled_method, size_t non_relative_linker_patch_count); void SetRequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index, bool requires) REQUIRES(!requires_constructor_barrier_lock_); // Do the methods for this class require a constructor barrier (prior to the return)? // The answer is "yes", if and only if this class has any instance final fields. // (This must not be called for any non- methods; the answer would be "no"). // // --- // // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end // of the invoked constructor. The constructor barrier is a conservative implementation means of // enforcing the freezes happen-before the object being constructed is observable by another // thread. // // Note: This question only makes sense for instance constructors; // static constructors (despite possibly having finals) never need // a barrier. // // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes // class initialization as: // // lock(class.lock) // class.state = initializing // unlock(class.lock) // // invoke // // lock(class.lock) // class.state = initialized // unlock(class.lock) <-- acts as a release // // The last operation in the above example acts as an atomic release // for any stores in , which ends up being stricter // than what a constructor barrier needs. // // See also QuasiAtomic::ThreadFenceForConstructor(). bool RequiresConstructorBarrier(Thread* self, const DexFile* dex_file, uint16_t class_def_index) REQUIRES(!requires_constructor_barrier_lock_); // Are runtime access checks necessary in the compiled code? bool CanAccessTypeWithoutChecks(ObjPtr referrer_class, ObjPtr resolved_class) REQUIRES_SHARED(Locks::mutator_lock_); // Are runtime access and instantiable checks necessary in the code? // out_is_finalizable is set to whether the type is finalizable. bool CanAccessInstantiableTypeWithoutChecks(ObjPtr referrer_class, ObjPtr resolved_class, bool* out_is_finalizable) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve compiling method's class. Returns null on failure. mirror::Class* ResolveCompilingMethodsClass( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexCompilationUnit* mUnit) REQUIRES_SHARED(Locks::mutator_lock_); mirror::Class* ResolveClass( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, dex::TypeIndex type_index, const DexCompilationUnit* mUnit) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve a field. Returns null on failure, including incompatible class change. // NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static. ArtField* ResolveField( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexCompilationUnit* mUnit, uint32_t field_idx, bool is_static) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve a field with a given dex file. ArtField* ResolveFieldWithDexFile( const ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexFile* dex_file, uint32_t field_idx, bool is_static) REQUIRES_SHARED(Locks::mutator_lock_); // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset. std::pair IsFastInstanceField( mirror::DexCache* dex_cache, mirror::Class* referrer_class, ArtField* resolved_field, uint16_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve a method. Returns null on failure, including incompatible class change. ArtMethod* ResolveMethod( ScopedObjectAccess& soa, Handle dex_cache, Handle class_loader, const DexCompilationUnit* mUnit, uint32_t method_idx, InvokeType invoke_type) REQUIRES_SHARED(Locks::mutator_lock_); void ProcessedInstanceField(bool resolved); void ProcessedStaticField(bool resolved, bool local); // Can we fast path instance field access? Computes field's offset and volatility. bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, MemberOffset* field_offset, bool* is_volatile) REQUIRES(!Locks::mutator_lock_); ArtField* ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put, const ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_); const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const; bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc); bool GetSupportBootImageFixup() const { return support_boot_image_fixup_; } void SetSupportBootImageFixup(bool support_boot_image_fixup) { support_boot_image_fixup_ = support_boot_image_fixup; } void SetCompilerContext(void* compiler_context) { compiler_context_ = compiler_context; } void* GetCompilerContext() const { return compiler_context_; } size_t GetThreadCount() const { return parallel_thread_count_; } bool GetDumpStats() const { return dump_stats_; } bool GetDumpPasses() const { return dump_passes_; } CumulativeLogger* GetTimingsLogger() const { return timings_logger_; } void SetDedupeEnabled(bool dedupe_enabled) { compiled_method_storage_.SetDedupeEnabled(dedupe_enabled); } bool DedupeEnabled() const { return compiled_method_storage_.DedupeEnabled(); } // Checks if class specified by type_idx is one of the image_classes_ bool IsImageClass(const char* descriptor) const; // Checks whether the provided class should be compiled, i.e., is in classes_to_compile_. bool IsClassToCompile(const char* descriptor) const; // Checks whether the provided method should be compiled, i.e., is in method_to_compile_. bool IsMethodToCompile(const MethodReference& method_ref) const; // Checks whether profile guided compilation is enabled and if the method should be compiled // according to the profile file. bool ShouldCompileBasedOnProfile(const MethodReference& method_ref) const; // Checks whether profile guided verification is enabled and if the method should be verified // according to the profile file. bool ShouldVerifyClassBasedOnProfile(const DexFile& dex_file, uint16_t class_idx) const; void RecordClassStatus(ClassReference ref, mirror::Class::Status status); // Checks if the specified method has been verified without failures. Returns // false if the method is not in the verification results (GetVerificationResults). bool IsMethodVerifiedWithoutFailures(uint32_t method_idx, uint16_t class_def_idx, const DexFile& dex_file) const; // Get memory usage during compilation. std::string GetMemoryUsageString(bool extended) const; void SetHadHardVerifierFailure() { had_hard_verifier_failure_ = true; } Compiler::Kind GetCompilerKind() { return compiler_kind_; } CompiledMethodStorage* GetCompiledMethodStorage() { return &compiled_method_storage_; } // Can we assume that the klass is loaded? bool CanAssumeClassIsLoaded(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_); bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const { if (!kIsTargetBuild) { return MayInlineInternal(inlined_from, inlined_into); } return true; } void MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref) REQUIRES(!dex_to_dex_references_lock_); const BitVector* GetCurrentDexToDexMethods() const { return current_dex_to_dex_methods_; } const ProfileCompilationInfo* GetProfileCompilationInfo() const { return profile_compilation_info_; } bool CanAssumeVerified(ClassReference ref) const; private: void PreCompile(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); // Attempt to resolve all type, methods, fields, and strings // referenced from code in the dex file following PathClassLoader // ordering semantics. void Resolve(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void ResolveDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); // Do fast verification through VerifierDeps if possible. Return whether // verification was successful. bool FastVerify(jobject class_loader, const std::vector& dex_files, TimingLogger* timings); void Verify(jobject class_loader, const std::vector& dex_files, TimingLogger* timings); void VerifyDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void SetVerified(jobject class_loader, const std::vector& dex_files, TimingLogger* timings); void SetVerifiedDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void InitializeClasses(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); void Compile(jobject class_loader, const std::vector& dex_files, TimingLogger* timings) REQUIRES(!dex_to_dex_references_lock_); void CompileDexFile(jobject class_loader, const DexFile& dex_file, const std::vector& dex_files, ThreadPool* thread_pool, size_t thread_count, TimingLogger* timings) REQUIRES(!Locks::mutator_lock_); bool MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const; void InitializeThreadPools(); void FreeThreadPools(); void CheckThreadPools(); bool RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const; const CompilerOptions* const compiler_options_; VerificationResults* const verification_results_; std::unique_ptr compiler_; Compiler::Kind compiler_kind_; const InstructionSet instruction_set_; const InstructionSetFeatures* const instruction_set_features_; // All class references that require constructor barriers. If the class reference is not in the // set then the result has not yet been computed. mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::map requires_constructor_barrier_ GUARDED_BY(requires_constructor_barrier_lock_); // All class references that this compiler has compiled. Indexed by class defs. using ClassStateTable = AtomicDexRefMap; ClassStateTable compiled_classes_; typedef AtomicDexRefMap MethodTable; private: // All method references that this compiler has compiled. MethodTable compiled_methods_; // Number of non-relative patches in all compiled methods. These patches need space // in the .oat_patches ELF section if requested in the compiler options. Atomic non_relative_linker_patch_count_; // If image_ is true, specifies the classes that will be included in the image. // Note if image_classes_ is null, all classes are included in the image. std::unique_ptr> image_classes_; // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null, // all classes are eligible for compilation (duplication filters etc. will still apply). // This option may be restricted to the boot image, depending on a flag in the implementation. std::unique_ptr> classes_to_compile_; // Specifies the methods that will be compiled. Note that if methods_to_compile_ is null, // all methods are eligible for compilation (compilation filters etc. will still apply). // This option may be restricted to the boot image, depending on a flag in the implementation. std::unique_ptr> methods_to_compile_; bool had_hard_verifier_failure_; // A thread pool that can (potentially) run tasks in parallel. std::unique_ptr parallel_thread_pool_; size_t parallel_thread_count_; // A thread pool that guarantees running single-threaded on the main thread. std::unique_ptr single_thread_pool_; class AOTCompilationStats; std::unique_ptr stats_; bool dump_stats_; const bool dump_passes_; CumulativeLogger* const timings_logger_; typedef void (*CompilerCallbackFn)(CompilerDriver& driver); typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver); void* compiler_context_; bool support_boot_image_fixup_; // List of dex files that will be stored in the oat file. std::vector dex_files_for_oat_file_; CompiledMethodStorage compiled_method_storage_; // Info for profile guided compilation. const ProfileCompilationInfo* const profile_compilation_info_; size_t max_arena_alloc_; // Data for delaying dex-to-dex compilation. Mutex dex_to_dex_references_lock_; // In the first phase, dex_to_dex_references_ collects methods for dex-to-dex compilation. class DexFileMethodSet; std::vector dex_to_dex_references_ GUARDED_BY(dex_to_dex_references_lock_); // In the second phase, current_dex_to_dex_methods_ points to the BitVector with method // indexes for dex-to-dex compilation in the current dex file. const BitVector* current_dex_to_dex_methods_; friend class CompileClassVisitor; friend class DexToDexDecompilerTest; friend class verifier::VerifierDepsTest; DISALLOW_COPY_AND_ASSIGN(CompilerDriver); }; } // namespace art #endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_H_ android-platform-art-8.1.0+r23/compiler/driver/compiler_driver_test.cc000066400000000000000000000375731336577252300260440ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "driver/compiler_driver.h" #include #include #include #include "art_method-inl.h" #include "class_linker-inl.h" #include "common_compiler_test.h" #include "compiler_callbacks.h" #include "dex_file.h" #include "dex_file_types.h" #include "gc/heap.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/dex_cache-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "handle_scope-inl.h" #include "jit/profile_compilation_info.h" #include "scoped_thread_state_change-inl.h" namespace art { class CompilerDriverTest : public CommonCompilerTest { protected: void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) { TimingLogger timings("CompilerDriverTest::CompileAll", false, false); TimingLogger::ScopedTiming t(__FUNCTION__, &timings); dex_files_ = GetDexFiles(class_loader); compiler_driver_->SetDexFilesForOatFile(dex_files_);; compiler_driver_->CompileAll(class_loader, dex_files_, &timings); t.NewTiming("MakeAllExecutable"); MakeAllExecutable(class_loader); } void EnsureCompiled(jobject class_loader, const char* class_name, const char* method, const char* signature, bool is_virtual) REQUIRES(!Locks::mutator_lock_) { CompileAll(class_loader); Thread::Current()->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); CHECK(started); env_ = Thread::Current()->GetJniEnv(); class_ = env_->FindClass(class_name); CHECK(class_ != nullptr) << "Class not found: " << class_name; if (is_virtual) { mid_ = env_->GetMethodID(class_, method, signature); } else { mid_ = env_->GetStaticMethodID(class_, method, signature); } CHECK(mid_ != nullptr) << "Method not found: " << class_name << "." << method << signature; } void MakeAllExecutable(jobject class_loader) { const std::vector class_path = GetDexFiles(class_loader); for (size_t i = 0; i != class_path.size(); ++i) { const DexFile* dex_file = class_path[i]; CHECK(dex_file != nullptr); MakeDexFileExecutable(class_loader, *dex_file); } } void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); for (size_t i = 0; i < dex_file.NumClassDefs(); i++) { const DexFile::ClassDef& class_def = dex_file.GetClassDef(i); const char* descriptor = dex_file.GetClassDescriptor(class_def); ScopedObjectAccess soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); Handle loader( hs.NewHandle(soa.Decode(class_loader))); mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader); CHECK(c != nullptr); const auto pointer_size = class_linker->GetImagePointerSize(); for (auto& m : c->GetMethods(pointer_size)) { MakeExecutable(&m); } } } JNIEnv* env_; jclass class_; jmethodID mid_; std::vector dex_files_; }; // Disabled due to 10 second runtime on host // TODO: Update the test for hash-based dex cache arrays. Bug: 30627598 TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) { CompileAll(nullptr); // All libcore references should resolve ScopedObjectAccess soa(Thread::Current()); ASSERT_TRUE(java_lang_dex_file_ != nullptr); const DexFile& dex = *java_lang_dex_file_; ObjPtr dex_cache = class_linker_->FindDexCache(soa.Self(), dex); EXPECT_EQ(dex.NumStringIds(), dex_cache->NumStrings()); for (size_t i = 0; i < dex_cache->NumStrings(); i++) { const mirror::String* string = dex_cache->GetResolvedString(dex::StringIndex(i)); EXPECT_TRUE(string != nullptr) << "string_idx=" << i; } EXPECT_EQ(dex.NumTypeIds(), dex_cache->NumResolvedTypes()); for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { mirror::Class* type = dex_cache->GetResolvedType(dex::TypeIndex(i)); EXPECT_TRUE(type != nullptr) << "type_idx=" << i << " " << dex.GetTypeDescriptor(dex.GetTypeId(dex::TypeIndex(i))); } EXPECT_TRUE(dex_cache->StaticMethodSize() == dex_cache->NumResolvedMethods() || dex.NumMethodIds() == dex_cache->NumResolvedMethods()); auto* cl = Runtime::Current()->GetClassLinker(); auto pointer_size = cl->GetImagePointerSize(); for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { // FIXME: This is outdated for hash-based method array. ArtMethod* method = dex_cache->GetResolvedMethod(i, pointer_size); EXPECT_TRUE(method != nullptr) << "method_idx=" << i << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " " << dex.GetMethodName(dex.GetMethodId(i)); EXPECT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << "method_idx=" << i << " " << dex.GetMethodDeclaringClassDescriptor(dex.GetMethodId(i)) << " " << dex.GetMethodName(dex.GetMethodId(i)); } EXPECT_TRUE(dex_cache->StaticArtFieldSize() == dex_cache->NumResolvedFields() || dex.NumFieldIds() == dex_cache->NumResolvedFields()); for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { // FIXME: This is outdated for hash-based field array. ArtField* field = dex_cache->GetResolvedField(i, cl->GetImagePointerSize()); EXPECT_TRUE(field != nullptr) << "field_idx=" << i << " " << dex.GetFieldDeclaringClassDescriptor(dex.GetFieldId(i)) << " " << dex.GetFieldName(dex.GetFieldId(i)); } // TODO check Class::IsVerified for all classes // TODO: check that all Method::GetCode() values are non-null } TEST_F(CompilerDriverTest, AbstractMethodErrorStub) { jobject class_loader; { ScopedObjectAccess soa(Thread::Current()); class_loader = LoadDex("AbstractMethod"); } ASSERT_TRUE(class_loader != nullptr); EnsureCompiled(class_loader, "AbstractClass", "foo", "()V", true); // Create a jobj_ of ConcreteClass, NOT AbstractClass. jclass c_class = env_->FindClass("ConcreteClass"); jmethodID constructor = env_->GetMethodID(c_class, "", "()V"); jobject jobj_ = env_->NewObject(c_class, constructor); ASSERT_TRUE(jobj_ != nullptr); // Force non-virtual call to AbstractClass foo, will throw AbstractMethodError exception. env_->CallNonvirtualVoidMethod(jobj_, class_, mid_); EXPECT_EQ(env_->ExceptionCheck(), JNI_TRUE); jthrowable exception = env_->ExceptionOccurred(); env_->ExceptionClear(); jclass jlame = env_->FindClass("java/lang/AbstractMethodError"); EXPECT_TRUE(env_->IsInstanceOf(exception, jlame)); { ScopedObjectAccess soa(Thread::Current()); Thread::Current()->ClearException(); } } class CompilerDriverMethodsTest : public CompilerDriverTest { protected: std::unordered_set* GetCompiledMethods() OVERRIDE { return new std::unordered_set({ "byte StaticLeafMethods.identity(byte)", "int StaticLeafMethods.sum(int, int, int)", "double StaticLeafMethods.sum(double, double, double, double)" }); } }; TEST_F(CompilerDriverMethodsTest, Selection) { Thread* self = Thread::Current(); jobject class_loader; { ScopedObjectAccess soa(self); class_loader = LoadDex("StaticLeafMethods"); } ASSERT_NE(class_loader, nullptr); // Need to enable dex-file writability. Methods rejected to be compiled will run through the // dex-to-dex compiler. for (const DexFile* dex_file : GetDexFiles(class_loader)) { ASSERT_TRUE(dex_file->EnableWrite()); } CompileAll(class_loader); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ScopedObjectAccess soa(self); StackHandleScope<1> hs(self); Handle h_loader( hs.NewHandle(soa.Decode(class_loader))); mirror::Class* klass = class_linker->FindClass(self, "LStaticLeafMethods;", h_loader); ASSERT_NE(klass, nullptr); std::unique_ptr> expected(GetCompiledMethods()); const auto pointer_size = class_linker->GetImagePointerSize(); for (auto& m : klass->GetDirectMethods(pointer_size)) { std::string name = m.PrettyMethod(true); const void* code = m.GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); ASSERT_NE(code, nullptr); if (expected->find(name) != expected->end()) { expected->erase(name); EXPECT_FALSE(class_linker->IsQuickToInterpreterBridge(code)); } else { EXPECT_TRUE(class_linker->IsQuickToInterpreterBridge(code)); } } EXPECT_TRUE(expected->empty()); } class CompilerDriverProfileTest : public CompilerDriverTest { protected: ProfileCompilationInfo* GetProfileCompilationInfo() OVERRIDE { ScopedObjectAccess soa(Thread::Current()); std::vector> dex_files = OpenTestDexFiles("ProfileTestMultiDex"); ProfileCompilationInfo info; for (const std::unique_ptr& dex_file : dex_files) { profile_info_.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot, MethodReference(dex_file.get(), 1)); profile_info_.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot, MethodReference(dex_file.get(), 2)); } return &profile_info_; } CompilerFilter::Filter GetCompilerFilter() const OVERRIDE { // Use a profile based filter. return CompilerFilter::kSpeedProfile; } std::unordered_set GetExpectedMethodsForClass(const std::string& clazz) { if (clazz == "Main") { return std::unordered_set({ "java.lang.String Main.getA()", "java.lang.String Main.getB()"}); } else if (clazz == "Second") { return std::unordered_set({ "java.lang.String Second.getX()", "java.lang.String Second.getY()"}); } else { return std::unordered_set(); } } void CheckCompiledMethods(jobject class_loader, const std::string& clazz, const std::unordered_set& expected_methods) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Thread* self = Thread::Current(); ScopedObjectAccess soa(self); StackHandleScope<1> hs(self); Handle h_loader( hs.NewHandle(soa.Decode(class_loader))); mirror::Class* klass = class_linker->FindClass(self, clazz.c_str(), h_loader); ASSERT_NE(klass, nullptr); const auto pointer_size = class_linker->GetImagePointerSize(); size_t number_of_compiled_methods = 0; for (auto& m : klass->GetVirtualMethods(pointer_size)) { std::string name = m.PrettyMethod(true); const void* code = m.GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); ASSERT_NE(code, nullptr); if (expected_methods.find(name) != expected_methods.end()) { number_of_compiled_methods++; EXPECT_FALSE(class_linker->IsQuickToInterpreterBridge(code)); } else { EXPECT_TRUE(class_linker->IsQuickToInterpreterBridge(code)); } } EXPECT_EQ(expected_methods.size(), number_of_compiled_methods); } private: ProfileCompilationInfo profile_info_; }; TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) { Thread* self = Thread::Current(); jobject class_loader; { ScopedObjectAccess soa(self); class_loader = LoadDex("ProfileTestMultiDex"); } ASSERT_NE(class_loader, nullptr); // Need to enable dex-file writability. Methods rejected to be compiled will run through the // dex-to-dex compiler. for (const DexFile* dex_file : GetDexFiles(class_loader)) { ASSERT_TRUE(dex_file->EnableWrite()); } CompileAll(class_loader); std::unordered_set m = GetExpectedMethodsForClass("Main"); std::unordered_set s = GetExpectedMethodsForClass("Second"); CheckCompiledMethods(class_loader, "LMain;", m); CheckCompiledMethods(class_loader, "LSecond;", s); } // Test that a verify only compiler filter updates the CompiledClass map, // which will be used for OatClass. class CompilerDriverVerifyTest : public CompilerDriverTest { protected: CompilerFilter::Filter GetCompilerFilter() const OVERRIDE { return CompilerFilter::kVerify; } void CheckVerifiedClass(jobject class_loader, const std::string& clazz) const { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Thread* self = Thread::Current(); ScopedObjectAccess soa(self); StackHandleScope<1> hs(self); Handle h_loader( hs.NewHandle(soa.Decode(class_loader))); mirror::Class* klass = class_linker->FindClass(self, clazz.c_str(), h_loader); ASSERT_NE(klass, nullptr); EXPECT_TRUE(klass->IsVerified()); mirror::Class::Status status; bool found = compiler_driver_->GetCompiledClass( ClassReference(&klass->GetDexFile(), klass->GetDexTypeIndex().index_), &status); ASSERT_TRUE(found); EXPECT_EQ(status, mirror::Class::kStatusVerified); } }; TEST_F(CompilerDriverVerifyTest, VerifyCompilation) { Thread* self = Thread::Current(); jobject class_loader; { ScopedObjectAccess soa(self); class_loader = LoadDex("ProfileTestMultiDex"); } ASSERT_NE(class_loader, nullptr); CompileAll(class_loader); CheckVerifiedClass(class_loader, "LMain;"); CheckVerifiedClass(class_loader, "LSecond;"); } // Test that a class of status kStatusRetryVerificationAtRuntime is indeed recorded that way in the // driver. // Test that checks that classes can be assumed as verified if unloading mode is enabled and // the class status is at least verified. TEST_F(CompilerDriverVerifyTest, RetryVerifcationStatusCheckVerified) { Thread* const self = Thread::Current(); jobject class_loader; std::vector dex_files; const DexFile* dex_file = nullptr; { ScopedObjectAccess soa(self); class_loader = LoadDex("ProfileTestMultiDex"); ASSERT_NE(class_loader, nullptr); dex_files = GetDexFiles(class_loader); ASSERT_GT(dex_files.size(), 0u); dex_file = dex_files.front(); } compiler_driver_->SetDexFilesForOatFile(dex_files); callbacks_->SetDoesClassUnloading(true, compiler_driver_.get()); ClassReference ref(dex_file, 0u); // Test that the status is read from the compiler driver as expected. for (size_t i = mirror::Class::kStatusRetryVerificationAtRuntime; i < mirror::Class::kStatusMax; ++i) { const mirror::Class::Status expected_status = static_cast(i); // Skip unsupported status that are not supposed to be ever recorded. if (expected_status == mirror::Class::kStatusVerifyingAtRuntime || expected_status == mirror::Class::kStatusInitializing) { continue; } compiler_driver_->RecordClassStatus(ref, expected_status); mirror::Class::Status status = {}; ASSERT_TRUE(compiler_driver_->GetCompiledClass(ref, &status)); EXPECT_EQ(status, expected_status); // Check that we can assume verified if we are a status that is at least verified. if (status >= mirror::Class::kStatusVerified) { // Check that the class can be assumed as verified in the compiler driver. EXPECT_TRUE(callbacks_->CanAssumeVerified(ref)) << status; } } } // TODO: need check-cast test (when stub complete & we can throw/catch } // namespace art android-platform-art-8.1.0+r23/compiler/driver/compiler_options.cc000066400000000000000000000161421336577252300251720ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compiler_options.h" #include namespace art { CompilerOptions::CompilerOptions() : compiler_filter_(CompilerFilter::kDefaultCompilerFilter), huge_method_threshold_(kDefaultHugeMethodThreshold), large_method_threshold_(kDefaultLargeMethodThreshold), small_method_threshold_(kDefaultSmallMethodThreshold), tiny_method_threshold_(kDefaultTinyMethodThreshold), num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold), inline_max_code_units_(kUnsetInlineMaxCodeUnits), no_inline_from_(nullptr), boot_image_(false), app_image_(false), top_k_profile_threshold_(kDefaultTopKProfileThreshold), debuggable_(false), generate_debug_info_(kDefaultGenerateDebugInfo), generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo), generate_build_id_(false), implicit_null_checks_(true), implicit_so_checks_(true), implicit_suspend_checks_(false), compile_pic_(false), verbose_methods_(), abort_on_hard_verifier_failure_(false), init_failure_output_(nullptr), dump_cfg_file_name_(""), dump_cfg_append_(false), force_determinism_(false), register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault), passes_to_run_(nullptr) { } CompilerOptions::~CompilerOptions() { // The destructor looks empty but it destroys a PassManagerOptions object. We keep it here // because we don't want to include the PassManagerOptions definition from the header file. } void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) { ParseUintOption(option, "--huge-method-max", &huge_method_threshold_, Usage); } void CompilerOptions::ParseLargeMethodMax(const StringPiece& option, UsageFn Usage) { ParseUintOption(option, "--large-method-max", &large_method_threshold_, Usage); } void CompilerOptions::ParseSmallMethodMax(const StringPiece& option, UsageFn Usage) { ParseUintOption(option, "--small-method-max", &small_method_threshold_, Usage); } void CompilerOptions::ParseTinyMethodMax(const StringPiece& option, UsageFn Usage) { ParseUintOption(option, "--tiny-method-max", &tiny_method_threshold_, Usage); } void CompilerOptions::ParseNumDexMethods(const StringPiece& option, UsageFn Usage) { ParseUintOption(option, "--num-dex-methods", &num_dex_methods_threshold_, Usage); } void CompilerOptions::ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage) { ParseUintOption(option, "--inline-max-code-units", &inline_max_code_units_, Usage); } void CompilerOptions::ParseDumpInitFailures(const StringPiece& option, UsageFn Usage ATTRIBUTE_UNUSED) { DCHECK(option.starts_with("--dump-init-failures=")); std::string file_name = option.substr(strlen("--dump-init-failures=")).data(); init_failure_output_.reset(new std::ofstream(file_name)); if (init_failure_output_.get() == nullptr) { LOG(ERROR) << "Failed to allocate ofstream"; } else if (init_failure_output_->fail()) { LOG(ERROR) << "Failed to open " << file_name << " for writing the initialization " << "failures."; init_failure_output_.reset(); } } void CompilerOptions::ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage) { DCHECK(option.starts_with("--register-allocation-strategy=")); StringPiece choice = option.substr(strlen("--register-allocation-strategy=")).data(); if (choice == "linear-scan") { register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan; } else if (choice == "graph-color") { register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor; } else { Usage("Unrecognized register allocation strategy. Try linear-scan, or graph-color."); } } bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) { if (option.starts_with("--compiler-filter=")) { const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data(); if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, &compiler_filter_)) { Usage("Unknown --compiler-filter value %s", compiler_filter_string); } } else if (option == "--compile-pic") { compile_pic_ = true; } else if (option.starts_with("--huge-method-max=")) { ParseHugeMethodMax(option, Usage); } else if (option.starts_with("--large-method-max=")) { ParseLargeMethodMax(option, Usage); } else if (option.starts_with("--small-method-max=")) { ParseSmallMethodMax(option, Usage); } else if (option.starts_with("--tiny-method-max=")) { ParseTinyMethodMax(option, Usage); } else if (option.starts_with("--num-dex-methods=")) { ParseNumDexMethods(option, Usage); } else if (option.starts_with("--inline-max-code-units=")) { ParseInlineMaxCodeUnits(option, Usage); } else if (option == "--generate-debug-info" || option == "-g") { generate_debug_info_ = true; } else if (option == "--no-generate-debug-info") { generate_debug_info_ = false; } else if (option == "--generate-mini-debug-info") { generate_mini_debug_info_ = true; } else if (option == "--no-generate-mini-debug-info") { generate_mini_debug_info_ = false; } else if (option == "--generate-build-id") { generate_build_id_ = true; } else if (option == "--no-generate-build-id") { generate_build_id_ = false; } else if (option == "--debuggable") { debuggable_ = true; } else if (option.starts_with("--top-k-profile-threshold=")) { ParseDouble(option.data(), '=', 0.0, 100.0, &top_k_profile_threshold_, Usage); } else if (option == "--abort-on-hard-verifier-error") { abort_on_hard_verifier_failure_ = true; } else if (option.starts_with("--dump-init-failures=")) { ParseDumpInitFailures(option, Usage); } else if (option.starts_with("--dump-cfg=")) { dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data(); } else if (option == "--dump-cfg-append") { dump_cfg_append_ = true; } else if (option.starts_with("--register-allocation-strategy=")) { ParseRegisterAllocationStrategy(option, Usage); } else if (option.starts_with("--verbose-methods=")) { // TODO: rather than switch off compiler logging, make all VLOG(compiler) messages // conditional on having verbose methods. gLogVerbosity.compiler = false; Split(option.substr(strlen("--verbose-methods=")).ToString(), ',', &verbose_methods_); } else { // Option not recognized. return false; } return true; } } // namespace art android-platform-art-8.1.0+r23/compiler/driver/compiler_options.h000066400000000000000000000221731336577252300250350ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ #define ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ #include #include #include #include "base/macros.h" #include "compiler_filter.h" #include "globals.h" #include "optimizing/register_allocator.h" #include "utils.h" namespace art { namespace verifier { class VerifierDepsTest; } // namespace verifier class DexFile; class CompilerOptions FINAL { public: // Guide heuristics to determine whether to compile method if profile data not available. static const size_t kDefaultHugeMethodThreshold = 10000; static const size_t kDefaultLargeMethodThreshold = 600; static const size_t kDefaultSmallMethodThreshold = 60; static const size_t kDefaultTinyMethodThreshold = 20; static const size_t kDefaultNumDexMethodsThreshold = 900; static constexpr double kDefaultTopKProfileThreshold = 90.0; static const bool kDefaultGenerateDebugInfo = false; static const bool kDefaultGenerateMiniDebugInfo = false; static const size_t kDefaultInlineMaxCodeUnits = 32; static constexpr size_t kUnsetInlineMaxCodeUnits = -1; CompilerOptions(); ~CompilerOptions(); CompilerFilter::Filter GetCompilerFilter() const { return compiler_filter_; } void SetCompilerFilter(CompilerFilter::Filter compiler_filter) { compiler_filter_ = compiler_filter; } bool IsAotCompilationEnabled() const { return CompilerFilter::IsAotCompilationEnabled(compiler_filter_); } bool IsJniCompilationEnabled() const { return CompilerFilter::IsJniCompilationEnabled(compiler_filter_); } bool IsQuickeningCompilationEnabled() const { return CompilerFilter::IsQuickeningCompilationEnabled(compiler_filter_); } bool IsVerificationEnabled() const { return CompilerFilter::IsVerificationEnabled(compiler_filter_); } bool AssumeClassesAreVerified() const { return compiler_filter_ == CompilerFilter::kAssumeVerified; } bool VerifyAtRuntime() const { return compiler_filter_ == CompilerFilter::kExtract; } bool IsAnyCompilationEnabled() const { return CompilerFilter::IsAnyCompilationEnabled(compiler_filter_); } size_t GetHugeMethodThreshold() const { return huge_method_threshold_; } size_t GetLargeMethodThreshold() const { return large_method_threshold_; } size_t GetSmallMethodThreshold() const { return small_method_threshold_; } size_t GetTinyMethodThreshold() const { return tiny_method_threshold_; } bool IsHugeMethod(size_t num_dalvik_instructions) const { return num_dalvik_instructions > huge_method_threshold_; } bool IsLargeMethod(size_t num_dalvik_instructions) const { return num_dalvik_instructions > large_method_threshold_; } bool IsSmallMethod(size_t num_dalvik_instructions) const { return num_dalvik_instructions > small_method_threshold_; } bool IsTinyMethod(size_t num_dalvik_instructions) const { return num_dalvik_instructions > tiny_method_threshold_; } size_t GetNumDexMethodsThreshold() const { return num_dex_methods_threshold_; } size_t GetInlineMaxCodeUnits() const { return inline_max_code_units_; } void SetInlineMaxCodeUnits(size_t units) { inline_max_code_units_ = units; } double GetTopKProfileThreshold() const { return top_k_profile_threshold_; } bool GetDebuggable() const { return debuggable_; } void SetDebuggable(bool value) { debuggable_ = value; } bool GetNativeDebuggable() const { return GetDebuggable() && GetGenerateDebugInfo(); } // This flag controls whether the compiler collects debugging information. // The other flags control how the information is written to disk. bool GenerateAnyDebugInfo() const { return GetGenerateDebugInfo() || GetGenerateMiniDebugInfo(); } bool GetGenerateDebugInfo() const { return generate_debug_info_; } bool GetGenerateMiniDebugInfo() const { return generate_mini_debug_info_; } bool GetGenerateBuildId() const { return generate_build_id_; } bool GetImplicitNullChecks() const { return implicit_null_checks_; } bool GetImplicitStackOverflowChecks() const { return implicit_so_checks_; } bool GetImplicitSuspendChecks() const { return implicit_suspend_checks_; } bool IsBootImage() const { return boot_image_; } bool IsAppImage() const { return app_image_; } void DisableAppImage() { app_image_ = false; } // Should the code be compiled as position independent? bool GetCompilePic() const { return compile_pic_; } bool HasVerboseMethods() const { return !verbose_methods_.empty(); } bool IsVerboseMethod(const std::string& pretty_method) const { for (const std::string& cur_method : verbose_methods_) { if (pretty_method.find(cur_method) != std::string::npos) { return true; } } return false; } std::ostream* GetInitFailureOutput() const { return init_failure_output_.get(); } bool AbortOnHardVerifierFailure() const { return abort_on_hard_verifier_failure_; } const std::vector* GetNoInlineFromDexFile() const { return no_inline_from_; } bool ParseCompilerOption(const StringPiece& option, UsageFn Usage); void SetNonPic() { compile_pic_ = false; } const std::string& GetDumpCfgFileName() const { return dump_cfg_file_name_; } bool GetDumpCfgAppend() const { return dump_cfg_append_; } bool IsForceDeterminism() const { return force_determinism_; } RegisterAllocator::Strategy GetRegisterAllocationStrategy() const { return register_allocation_strategy_; } const std::vector* GetPassesToRun() const { return passes_to_run_; } private: void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage); void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage); void ParseInlineMaxCodeUnits(const StringPiece& option, UsageFn Usage); void ParseNumDexMethods(const StringPiece& option, UsageFn Usage); void ParseTinyMethodMax(const StringPiece& option, UsageFn Usage); void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage); void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage); void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage); void ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage); CompilerFilter::Filter compiler_filter_; size_t huge_method_threshold_; size_t large_method_threshold_; size_t small_method_threshold_; size_t tiny_method_threshold_; size_t num_dex_methods_threshold_; size_t inline_max_code_units_; // Dex files from which we should not inline code. // This is usually a very short list (i.e. a single dex file), so we // prefer vector<> over a lookup-oriented container, such as set<>. const std::vector* no_inline_from_; bool boot_image_; bool app_image_; // When using a profile file only the top K% of the profiled samples will be compiled. double top_k_profile_threshold_; bool debuggable_; bool generate_debug_info_; bool generate_mini_debug_info_; bool generate_build_id_; bool implicit_null_checks_; bool implicit_so_checks_; bool implicit_suspend_checks_; bool compile_pic_; // Vector of methods to have verbose output enabled for. std::vector verbose_methods_; // Abort compilation with an error if we find a class that fails verification with a hard // failure. bool abort_on_hard_verifier_failure_; // Log initialization of initialization failures to this stream if not null. std::unique_ptr init_failure_output_; std::string dump_cfg_file_name_; bool dump_cfg_append_; // Whether the compiler should trade performance for determinism to guarantee exactly reproducible // outcomes. bool force_determinism_; RegisterAllocator::Strategy register_allocation_strategy_; // If not null, specifies optimization passes which will be run instead of defaults. // Note that passes_to_run_ is not checked for correctness and providing an incorrect // list of passes can lead to unexpected compiler behaviour. This is caused by dependencies // between passes. Failing to satisfy them can for example lead to compiler crashes. // Passing pass names which are not recognized by the compiler will result in // compiler-dependant behavior. const std::vector* passes_to_run_; friend class Dex2Oat; friend class DexToDexDecompilerTest; friend class CommonCompilerTest; friend class verifier::VerifierDepsTest; DISALLOW_COPY_AND_ASSIGN(CompilerOptions); }; } // namespace art #endif // ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_ android-platform-art-8.1.0+r23/compiler/driver/dex_compilation_unit.cc000066400000000000000000000035171336577252300260240ustar00rootroot00000000000000/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "dex_compilation_unit.h" #include "mirror/dex_cache.h" #include "utils.h" namespace art { DexCompilationUnit::DexCompilationUnit(Handle class_loader, ClassLinker* class_linker, const DexFile& dex_file, const DexFile::CodeItem* code_item, uint16_t class_def_idx, uint32_t method_idx, uint32_t access_flags, const VerifiedMethod* verified_method, Handle dex_cache) : class_loader_(class_loader), class_linker_(class_linker), dex_file_(&dex_file), code_item_(code_item), class_def_idx_(class_def_idx), dex_method_idx_(method_idx), access_flags_(access_flags), verified_method_(verified_method), dex_cache_(dex_cache) { } const std::string& DexCompilationUnit::GetSymbol() { if (symbol_.empty()) { symbol_ = "dex_"; symbol_ += MangleForJni(dex_file_->PrettyMethod(dex_method_idx_)); } return symbol_; } } // namespace art android-platform-art-8.1.0+r23/compiler/driver/dex_compilation_unit.h000066400000000000000000000066241336577252300256700ustar00rootroot00000000000000/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ #define ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ #include #include "base/arena_object.h" #include "dex_file.h" #include "handle.h" #include "jni.h" namespace art { namespace mirror { class ClassLoader; class DexCache; } // namespace mirror class ClassLinker; class VerifiedMethod; class DexCompilationUnit : public DeletableArenaObject { public: DexCompilationUnit(Handle class_loader, ClassLinker* class_linker, const DexFile& dex_file, const DexFile::CodeItem* code_item, uint16_t class_def_idx, uint32_t method_idx, uint32_t access_flags, const VerifiedMethod* verified_method, Handle dex_cache); Handle GetClassLoader() const { return class_loader_; } ClassLinker* GetClassLinker() const { return class_linker_; } const DexFile* GetDexFile() const { return dex_file_; } uint16_t GetClassDefIndex() const { return class_def_idx_; } uint32_t GetDexMethodIndex() const { return dex_method_idx_; } const DexFile::CodeItem* GetCodeItem() const { return code_item_; } const char* GetShorty() const { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); return dex_file_->GetMethodShorty(method_id); } const char* GetShorty(uint32_t* shorty_len) const { const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_); return dex_file_->GetMethodShorty(method_id, shorty_len); } uint32_t GetAccessFlags() const { return access_flags_; } bool IsConstructor() const { return ((access_flags_ & kAccConstructor) != 0); } bool IsNative() const { return ((access_flags_ & kAccNative) != 0); } bool IsStatic() const { return ((access_flags_ & kAccStatic) != 0); } bool IsSynchronized() const { return ((access_flags_ & kAccSynchronized) != 0); } const VerifiedMethod* GetVerifiedMethod() const { return verified_method_; } void ClearVerifiedMethod() { verified_method_ = nullptr; } const std::string& GetSymbol(); Handle GetDexCache() const { return dex_cache_; } private: const Handle class_loader_; ClassLinker* const class_linker_; const DexFile* const dex_file_; const DexFile::CodeItem* const code_item_; const uint16_t class_def_idx_; const uint32_t dex_method_idx_; const uint32_t access_flags_; const VerifiedMethod* verified_method_; const Handle dex_cache_; std::string symbol_; }; } // namespace art #endif // ART_COMPILER_DRIVER_DEX_COMPILATION_UNIT_H_ android-platform-art-8.1.0+r23/compiler/elf_builder.h000066400000000000000000001142351336577252300224320ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_ELF_BUILDER_H_ #define ART_COMPILER_ELF_BUILDER_H_ #include #include "arch/instruction_set.h" #include "arch/mips/instruction_set_features_mips.h" #include "base/array_ref.h" #include "base/bit_utils.h" #include "base/casts.h" #include "base/unix_file/fd_file.h" #include "elf_utils.h" #include "leb128.h" #include "linker/error_delaying_output_stream.h" namespace art { // Writes ELF file. // // The basic layout of the elf file: // Elf_Ehdr - The ELF header. // Elf_Phdr[] - Program headers for the linker. // .note.gnu.build-id - Optional build ID section (SHA-1 digest). // .rodata - DEX files and oat metadata. // .text - Compiled code. // .bss - Zero-initialized writeable section. // .MIPS.abiflags - MIPS specific section. // .dynstr - Names for .dynsym. // .dynsym - A few oat-specific dynamic symbols. // .hash - Hash-table for .dynsym. // .dynamic - Tags which let the linker locate .dynsym. // .strtab - Names for .symtab. // .symtab - Debug symbols. // .eh_frame - Unwind information (CFI). // .eh_frame_hdr - Index of .eh_frame. // .debug_frame - Unwind information (CFI). // .debug_frame.oat_patches - Addresses for relocation. // .debug_info - Debug information. // .debug_info.oat_patches - Addresses for relocation. // .debug_abbrev - Decoding information for .debug_info. // .debug_str - Strings for .debug_info. // .debug_line - Line number tables. // .debug_line.oat_patches - Addresses for relocation. // .text.oat_patches - Addresses for relocation. // .shstrtab - Names of ELF sections. // Elf_Shdr[] - Section headers. // // Some section are optional (the debug sections in particular). // // We try write the section data directly into the file without much // in-memory buffering. This means we generally write sections based on the // dependency order (e.g. .dynamic points to .dynsym which points to .text). // // In the cases where we need to buffer, we write the larger section first // and buffer the smaller one (e.g. .strtab is bigger than .symtab). // // The debug sections are written last for easier stripping. // template class ElfBuilder FINAL { public: static constexpr size_t kMaxProgramHeaders = 16; // SHA-1 digest. Not using SHA_DIGEST_LENGTH from openssl/sha.h to avoid // spreading this header dependency for just this single constant. static constexpr size_t kBuildIdLen = 20; using Elf_Addr = typename ElfTypes::Addr; using Elf_Off = typename ElfTypes::Off; using Elf_Word = typename ElfTypes::Word; using Elf_Sword = typename ElfTypes::Sword; using Elf_Ehdr = typename ElfTypes::Ehdr; using Elf_Shdr = typename ElfTypes::Shdr; using Elf_Sym = typename ElfTypes::Sym; using Elf_Phdr = typename ElfTypes::Phdr; using Elf_Dyn = typename ElfTypes::Dyn; // Base class of all sections. class Section : public OutputStream { public: Section(ElfBuilder* owner, const std::string& name, Elf_Word type, Elf_Word flags, const Section* link, Elf_Word info, Elf_Word align, Elf_Word entsize) : OutputStream(name), owner_(owner), header_(), section_index_(0), name_(name), link_(link), started_(false), finished_(false), phdr_flags_(PF_R), phdr_type_(0) { DCHECK_GE(align, 1u); header_.sh_type = type; header_.sh_flags = flags; header_.sh_info = info; header_.sh_addralign = align; header_.sh_entsize = entsize; } // Start writing of this section. void Start() { CHECK(!started_); CHECK(!finished_); started_ = true; auto& sections = owner_->sections_; // Check that the previous section is complete. CHECK(sections.empty() || sections.back()->finished_); // The first ELF section index is 1. Index 0 is reserved for NULL. section_index_ = sections.size() + 1; // Page-align if we switch between allocated and non-allocated sections, // or if we change the type of allocation (e.g. executable vs non-executable). if (!sections.empty()) { if (header_.sh_flags != sections.back()->header_.sh_flags) { header_.sh_addralign = kPageSize; } } // Align file position. if (header_.sh_type != SHT_NOBITS) { header_.sh_offset = owner_->AlignFileOffset(header_.sh_addralign); } else { header_.sh_offset = 0; } // Align virtual memory address. if ((header_.sh_flags & SHF_ALLOC) != 0) { header_.sh_addr = owner_->AlignVirtualAddress(header_.sh_addralign); } else { header_.sh_addr = 0; } // Push this section on the list of written sections. sections.push_back(this); } // Finish writing of this section. void End() { CHECK(started_); CHECK(!finished_); finished_ = true; if (header_.sh_type == SHT_NOBITS) { CHECK_GT(header_.sh_size, 0u); } else { // Use the current file position to determine section size. off_t file_offset = owner_->stream_.Seek(0, kSeekCurrent); CHECK_GE(file_offset, (off_t)header_.sh_offset); header_.sh_size = file_offset - header_.sh_offset; } if ((header_.sh_flags & SHF_ALLOC) != 0) { owner_->virtual_address_ += header_.sh_size; } } // Get the location of this section in virtual memory. Elf_Addr GetAddress() const { CHECK(started_); return header_.sh_addr; } // Returns the size of the content of this section. Elf_Word GetSize() const { if (finished_) { return header_.sh_size; } else { CHECK(started_); CHECK_NE(header_.sh_type, (Elf_Word)SHT_NOBITS); return owner_->stream_.Seek(0, kSeekCurrent) - header_.sh_offset; } } // Write this section as "NOBITS" section. (used for the .bss section) // This means that the ELF file does not contain the initial data for this section // and it will be zero-initialized when the ELF file is loaded in the running program. void WriteNoBitsSection(Elf_Word size) { DCHECK_NE(header_.sh_flags & SHF_ALLOC, 0u); header_.sh_type = SHT_NOBITS; Start(); header_.sh_size = size; End(); } // This function always succeeds to simplify code. // Use builder's Good() to check the actual status. bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE { CHECK(started_); CHECK(!finished_); return owner_->stream_.WriteFully(buffer, byte_count); } // This function always succeeds to simplify code. // Use builder's Good() to check the actual status. off_t Seek(off_t offset, Whence whence) OVERRIDE { // Forward the seek as-is and trust the caller to use it reasonably. return owner_->stream_.Seek(offset, whence); } // This function flushes the output and returns whether it succeeded. // If there was a previous failure, this does nothing and returns false, i.e. failed. bool Flush() OVERRIDE { return owner_->stream_.Flush(); } Elf_Word GetSectionIndex() const { DCHECK(started_); DCHECK_NE(section_index_, 0u); return section_index_; } private: ElfBuilder* owner_; Elf_Shdr header_; Elf_Word section_index_; const std::string name_; const Section* const link_; bool started_; bool finished_; Elf_Word phdr_flags_; Elf_Word phdr_type_; friend class ElfBuilder; DISALLOW_COPY_AND_ASSIGN(Section); }; class CachedSection : public Section { public: CachedSection(ElfBuilder* owner, const std::string& name, Elf_Word type, Elf_Word flags, const Section* link, Elf_Word info, Elf_Word align, Elf_Word entsize) : Section(owner, name, type, flags, link, info, align, entsize), cache_() { } Elf_Word Add(const void* data, size_t length) { Elf_Word offset = cache_.size(); const uint8_t* d = reinterpret_cast(data); cache_.insert(cache_.end(), d, d + length); return offset; } Elf_Word GetCacheSize() { return cache_.size(); } void Write() { this->WriteFully(cache_.data(), cache_.size()); cache_.clear(); cache_.shrink_to_fit(); } void WriteCachedSection() { this->Start(); Write(); this->End(); } private: std::vector cache_; }; // Writer of .dynstr section. class CachedStringSection FINAL : public CachedSection { public: CachedStringSection(ElfBuilder* owner, const std::string& name, Elf_Word flags, Elf_Word align) : CachedSection(owner, name, SHT_STRTAB, flags, /* link */ nullptr, /* info */ 0, align, /* entsize */ 0) { } Elf_Word Add(const std::string& name) { if (CachedSection::GetCacheSize() == 0u) { DCHECK(name.empty()); } return CachedSection::Add(name.c_str(), name.length() + 1); } }; // Writer of .strtab and .shstrtab sections. class StringSection FINAL : public Section { public: StringSection(ElfBuilder* owner, const std::string& name, Elf_Word flags, Elf_Word align) : Section(owner, name, SHT_STRTAB, flags, /* link */ nullptr, /* info */ 0, align, /* entsize */ 0), current_offset_(0) { } Elf_Word Write(const std::string& name) { if (current_offset_ == 0) { DCHECK(name.empty()); } Elf_Word offset = current_offset_; this->WriteFully(name.c_str(), name.length() + 1); current_offset_ += name.length() + 1; return offset; } private: Elf_Word current_offset_; }; // Writer of .dynsym and .symtab sections. class SymbolSection FINAL : public CachedSection { public: SymbolSection(ElfBuilder* owner, const std::string& name, Elf_Word type, Elf_Word flags, Section* strtab) : CachedSection(owner, name, type, flags, strtab, /* info */ 0, sizeof(Elf_Off), sizeof(Elf_Sym)) { // The symbol table always has to start with NULL symbol. Elf_Sym null_symbol = Elf_Sym(); CachedSection::Add(&null_symbol, sizeof(null_symbol)); } // Buffer symbol for this section. It will be written later. // If the symbol's section is null, it will be considered absolute (SHN_ABS). // (we use this in JIT to reference code which is stored outside the debug ELF file) void Add(Elf_Word name, const Section* section, Elf_Addr addr, Elf_Word size, uint8_t binding, uint8_t type) { Elf_Word section_index; if (section != nullptr) { DCHECK_LE(section->GetAddress(), addr); DCHECK_LE(addr, section->GetAddress() + section->GetSize()); section_index = section->GetSectionIndex(); } else { section_index = static_cast(SHN_ABS); } Add(name, section_index, addr, size, binding, type); } void Add(Elf_Word name, Elf_Word section_index, Elf_Addr addr, Elf_Word size, uint8_t binding, uint8_t type) { Elf_Sym sym = Elf_Sym(); sym.st_name = name; sym.st_value = addr; sym.st_size = size; sym.st_other = 0; sym.st_shndx = section_index; sym.st_info = (binding << 4) + (type & 0xf); CachedSection::Add(&sym, sizeof(sym)); } }; class AbiflagsSection FINAL : public Section { public: // Section with Mips abiflag info. static constexpr uint8_t MIPS_AFL_REG_NONE = 0; // no registers static constexpr uint8_t MIPS_AFL_REG_32 = 1; // 32-bit registers static constexpr uint8_t MIPS_AFL_REG_64 = 2; // 64-bit registers static constexpr uint32_t MIPS_AFL_FLAGS1_ODDSPREG = 1; // Uses odd single-prec fp regs static constexpr uint8_t MIPS_ABI_FP_DOUBLE = 1; // -mdouble-float static constexpr uint8_t MIPS_ABI_FP_XX = 5; // -mfpxx static constexpr uint8_t MIPS_ABI_FP_64A = 7; // -mips32r* -mfp64 -mno-odd-spreg AbiflagsSection(ElfBuilder* owner, const std::string& name, Elf_Word type, Elf_Word flags, const Section* link, Elf_Word info, Elf_Word align, Elf_Word entsize, InstructionSet isa, const InstructionSetFeatures* features) : Section(owner, name, type, flags, link, info, align, entsize) { if (isa == kMips || isa == kMips64) { bool fpu32 = false; // assume mips64 values uint8_t isa_rev = 6; // assume mips64 values if (isa == kMips) { // adjust for mips32 values fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint(); isa_rev = features->AsMipsInstructionSetFeatures()->IsR6() ? 6 : features->AsMipsInstructionSetFeatures()->IsMipsIsaRevGreaterThanEqual2() ? (fpu32 ? 2 : 5) : 1; } abiflags_.version = 0; // version of flags structure abiflags_.isa_level = (isa == kMips) ? 32 : 64; abiflags_.isa_rev = isa_rev; abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; abiflags_.cpr2_size = MIPS_AFL_REG_NONE; // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6). // Otherwise set to MIPS_ABI_FP_DOUBLE. abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE; abiflags_.isa_ext = 0; abiflags_.ases = 0; // To keep the code simple, we are not using odd FP reg for single floats for both // mips32 and mips64 ART. Therefore we are not setting the MIPS_AFL_FLAGS1_ODDSPREG bit. abiflags_.flags1 = 0; abiflags_.flags2 = 0; } } Elf_Word GetSize() const { return sizeof(abiflags_); } void Write() { this->WriteFully(&abiflags_, sizeof(abiflags_)); } private: struct { uint16_t version; // version of this structure uint8_t isa_level, isa_rev, gpr_size, cpr1_size, cpr2_size; uint8_t fp_abi; uint32_t isa_ext, ases, flags1, flags2; } abiflags_; }; class BuildIdSection FINAL : public Section { public: BuildIdSection(ElfBuilder* owner, const std::string& name, Elf_Word type, Elf_Word flags, const Section* link, Elf_Word info, Elf_Word align, Elf_Word entsize) : Section(owner, name, type, flags, link, info, align, entsize), digest_start_(-1) { } void Write() { // The size fields are 32-bit on both 32-bit and 64-bit systems, confirmed // with the 64-bit linker and libbfd code. The size of name and desc must // be a multiple of 4 and it currently is. this->WriteUint32(4); // namesz. this->WriteUint32(kBuildIdLen); // descsz. this->WriteUint32(3); // type = NT_GNU_BUILD_ID. this->WriteFully("GNU", 4); // name. digest_start_ = this->Seek(0, kSeekCurrent); static_assert(kBuildIdLen % 4 == 0, "expecting a mutliple of 4 for build ID length"); this->WriteFully(std::string(kBuildIdLen, '\0').c_str(), kBuildIdLen); // desc. } off_t GetDigestStart() { CHECK_GT(digest_start_, 0); return digest_start_; } private: bool WriteUint32(uint32_t v) { return this->WriteFully(&v, sizeof(v)); } // File offset where the build ID digest starts. // Populated with zeros first, then updated with the actual value as the // very last thing in the output file creation. off_t digest_start_; }; ElfBuilder(InstructionSet isa, const InstructionSetFeatures* features, OutputStream* output) : isa_(isa), features_(features), stream_(output), rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0), bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize), dynsym_(this, ".dynsym", SHT_DYNSYM, SHF_ALLOC, &dynstr_), hash_(this, ".hash", SHT_HASH, SHF_ALLOC, &dynsym_, 0, sizeof(Elf_Word), sizeof(Elf_Word)), dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)), eh_frame_(this, ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), eh_frame_hdr_(this, ".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0), strtab_(this, ".strtab", 0, 1), symtab_(this, ".symtab", SHT_SYMTAB, 0, &strtab_), debug_frame_(this, ".debug_frame", SHT_PROGBITS, 0, nullptr, 0, sizeof(Elf_Addr), 0), debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0), debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0), shstrtab_(this, ".shstrtab", 0, 1), abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0, isa, features), build_id_(this, ".note.gnu.build-id", SHT_NOTE, SHF_ALLOC, nullptr, 0, 4, 0), started_(false), write_program_headers_(false), loaded_size_(0u), virtual_address_(0) { text_.phdr_flags_ = PF_R | PF_X; bss_.phdr_flags_ = PF_R | PF_W; dynamic_.phdr_flags_ = PF_R | PF_W; dynamic_.phdr_type_ = PT_DYNAMIC; eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME; abiflags_.phdr_type_ = PT_MIPS_ABIFLAGS; build_id_.phdr_type_ = PT_NOTE; } ~ElfBuilder() {} InstructionSet GetIsa() { return isa_; } Section* GetRoData() { return &rodata_; } Section* GetText() { return &text_; } Section* GetBss() { return &bss_; } StringSection* GetStrTab() { return &strtab_; } SymbolSection* GetSymTab() { return &symtab_; } Section* GetEhFrame() { return &eh_frame_; } Section* GetEhFrameHdr() { return &eh_frame_hdr_; } Section* GetDebugFrame() { return &debug_frame_; } Section* GetDebugInfo() { return &debug_info_; } Section* GetDebugLine() { return &debug_line_; } // Encode patch locations as LEB128 list of deltas between consecutive addresses. // (exposed publicly for tests) static void EncodeOatPatches(const ArrayRef& locations, std::vector* buffer) { buffer->reserve(buffer->size() + locations.size() * 2); // guess 2 bytes per ULEB128. uintptr_t address = 0; // relative to start of section. for (uintptr_t location : locations) { DCHECK_GE(location, address) << "Patch locations are not in sorted order"; EncodeUnsignedLeb128(buffer, dchecked_integral_cast(location - address)); address = location; } } void WritePatches(const char* name, const ArrayRef& patch_locations) { std::vector buffer; EncodeOatPatches(patch_locations, &buffer); std::unique_ptr
s(new Section(this, name, SHT_OAT_PATCH, 0, nullptr, 0, 1, 0)); s->Start(); s->WriteFully(buffer.data(), buffer.size()); s->End(); other_sections_.push_back(std::move(s)); } void WriteSection(const char* name, const std::vector* buffer) { std::unique_ptr
s(new Section(this, name, SHT_PROGBITS, 0, nullptr, 0, 1, 0)); s->Start(); s->WriteFully(buffer->data(), buffer->size()); s->End(); other_sections_.push_back(std::move(s)); } // Reserve space for ELF header and program headers. // We do not know the number of headers until later, so // it is easiest to just reserve a fixed amount of space. // Program headers are required for loading by the linker. // It is possible to omit them for ELF files used for debugging. void Start(bool write_program_headers = true) { int size = sizeof(Elf_Ehdr); if (write_program_headers) { size += sizeof(Elf_Phdr) * kMaxProgramHeaders; } stream_.Seek(size, kSeekSet); started_ = true; virtual_address_ += size; write_program_headers_ = write_program_headers; } void End() { DCHECK(started_); // Note: loaded_size_ == 0 for tests that don't write .rodata, .text, .bss, // .dynstr, dynsym, .hash and .dynamic. These tests should not read loaded_size_. // TODO: Either refactor the .eh_frame creation so that it counts towards loaded_size_, // or remove all support for .eh_frame. (The currently unused .eh_frame counts towards // the virtual_address_ but we don't consider it for loaded_size_.) CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize)) << loaded_size_ << " " << virtual_address_; // Write section names and finish the section headers. shstrtab_.Start(); shstrtab_.Write(""); for (auto* section : sections_) { section->header_.sh_name = shstrtab_.Write(section->name_); if (section->link_ != nullptr) { section->header_.sh_link = section->link_->GetSectionIndex(); } } shstrtab_.End(); // Write section headers at the end of the ELF file. std::vector shdrs; shdrs.reserve(1u + sections_.size()); shdrs.push_back(Elf_Shdr()); // NULL at index 0. for (auto* section : sections_) { shdrs.push_back(section->header_); } Elf_Off section_headers_offset; section_headers_offset = AlignFileOffset(sizeof(Elf_Off)); stream_.WriteFully(shdrs.data(), shdrs.size() * sizeof(shdrs[0])); // Flush everything else before writing the program headers. This should prevent // the OS from reordering writes, so that we don't end up with valid headers // and partially written data if we suddenly lose power, for example. stream_.Flush(); // The main ELF header. Elf_Ehdr elf_header = MakeElfHeader(isa_, features_); elf_header.e_shoff = section_headers_offset; elf_header.e_shnum = shdrs.size(); elf_header.e_shstrndx = shstrtab_.GetSectionIndex(); // Program headers (i.e. mmap instructions). std::vector phdrs; if (write_program_headers_) { phdrs = MakeProgramHeaders(); CHECK_LE(phdrs.size(), kMaxProgramHeaders); elf_header.e_phoff = sizeof(Elf_Ehdr); elf_header.e_phnum = phdrs.size(); } stream_.Seek(0, kSeekSet); stream_.WriteFully(&elf_header, sizeof(elf_header)); stream_.WriteFully(phdrs.data(), phdrs.size() * sizeof(phdrs[0])); stream_.Flush(); } // The running program does not have access to section headers // and the loader is not supposed to use them either. // The dynamic sections therefore replicates some of the layout // information like the address and size of .rodata and .text. // It also contains other metadata like the SONAME. // The .dynamic section is found using the PT_DYNAMIC program header. void PrepareDynamicSection(const std::string& elf_file_path, Elf_Word rodata_size, Elf_Word text_size, Elf_Word bss_size, Elf_Word bss_methods_offset, Elf_Word bss_roots_offset) { std::string soname(elf_file_path); size_t directory_separator_pos = soname.rfind('/'); if (directory_separator_pos != std::string::npos) { soname = soname.substr(directory_separator_pos + 1); } // Calculate addresses of .text, .bss and .dynstr. DCHECK_EQ(rodata_.header_.sh_addralign, static_cast(kPageSize)); DCHECK_EQ(text_.header_.sh_addralign, static_cast(kPageSize)); DCHECK_EQ(bss_.header_.sh_addralign, static_cast(kPageSize)); DCHECK_EQ(dynstr_.header_.sh_addralign, static_cast(kPageSize)); Elf_Word rodata_address = rodata_.GetAddress(); Elf_Word text_address = RoundUp(rodata_address + rodata_size, kPageSize); Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize); Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize); Elf_Word abiflags_size = 0; if (isa_ == kMips || isa_ == kMips64) { abiflags_size = abiflags_.GetSize(); } Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize); // Cache .dynstr, .dynsym and .hash data. dynstr_.Add(""); // dynstr should start with empty string. Elf_Word rodata_index = rodata_.GetSectionIndex(); Elf_Word oatdata = dynstr_.Add("oatdata"); dynsym_.Add(oatdata, rodata_index, rodata_address, rodata_size, STB_GLOBAL, STT_OBJECT); if (text_size != 0u) { Elf_Word text_index = rodata_index + 1u; Elf_Word oatexec = dynstr_.Add("oatexec"); dynsym_.Add(oatexec, text_index, text_address, text_size, STB_GLOBAL, STT_OBJECT); Elf_Word oatlastword = dynstr_.Add("oatlastword"); Elf_Word oatlastword_address = text_address + text_size - 4; dynsym_.Add(oatlastword, text_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); } else if (rodata_size != 0) { // rodata_ can be size 0 for dwarf_test. Elf_Word oatlastword = dynstr_.Add("oatlastword"); Elf_Word oatlastword_address = rodata_address + rodata_size - 4; dynsym_.Add(oatlastword, rodata_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); } DCHECK_LE(bss_roots_offset, bss_size); if (bss_size != 0u) { Elf_Word bss_index = rodata_index + 1u + (text_size != 0 ? 1u : 0u); Elf_Word oatbss = dynstr_.Add("oatbss"); dynsym_.Add(oatbss, bss_index, bss_address, bss_roots_offset, STB_GLOBAL, STT_OBJECT); DCHECK_LE(bss_methods_offset, bss_roots_offset); DCHECK_LE(bss_roots_offset, bss_size); // Add a symbol marking the start of the methods part of the .bss, if not empty. if (bss_methods_offset != bss_roots_offset) { Elf_Word bss_methods_address = bss_address + bss_methods_offset; Elf_Word bss_methods_size = bss_roots_offset - bss_methods_offset; Elf_Word oatbssroots = dynstr_.Add("oatbssmethods"); dynsym_.Add( oatbssroots, bss_index, bss_methods_address, bss_methods_size, STB_GLOBAL, STT_OBJECT); } // Add a symbol marking the start of the GC roots part of the .bss, if not empty. if (bss_roots_offset != bss_size) { Elf_Word bss_roots_address = bss_address + bss_roots_offset; Elf_Word bss_roots_size = bss_size - bss_roots_offset; Elf_Word oatbssroots = dynstr_.Add("oatbssroots"); dynsym_.Add( oatbssroots, bss_index, bss_roots_address, bss_roots_size, STB_GLOBAL, STT_OBJECT); } Elf_Word oatbsslastword = dynstr_.Add("oatbsslastword"); Elf_Word bsslastword_address = bss_address + bss_size - 4; dynsym_.Add(oatbsslastword, bss_index, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT); } Elf_Word soname_offset = dynstr_.Add(soname); // We do not really need a hash-table since there is so few entries. // However, the hash-table is the only way the linker can actually // determine the number of symbols in .dynsym so it is required. int count = dynsym_.GetCacheSize() / sizeof(Elf_Sym); // Includes NULL. std::vector hash; hash.push_back(1); // Number of buckets. hash.push_back(count); // Number of chains. // Buckets. Having just one makes it linear search. hash.push_back(1); // Point to first non-NULL symbol. // Chains. This creates linked list of symbols. hash.push_back(0); // Dummy entry for the NULL symbol. for (int i = 1; i < count - 1; i++) { hash.push_back(i + 1); // Each symbol points to the next one. } hash.push_back(0); // Last symbol terminates the chain. hash_.Add(hash.data(), hash.size() * sizeof(hash[0])); // Calculate addresses of .dynsym, .hash and .dynamic. DCHECK_EQ(dynstr_.header_.sh_flags, dynsym_.header_.sh_flags); DCHECK_EQ(dynsym_.header_.sh_flags, hash_.header_.sh_flags); Elf_Word dynsym_address = RoundUp(dynstr_address + dynstr_.GetCacheSize(), dynsym_.header_.sh_addralign); Elf_Word hash_address = RoundUp(dynsym_address + dynsym_.GetCacheSize(), hash_.header_.sh_addralign); DCHECK_EQ(dynamic_.header_.sh_addralign, static_cast(kPageSize)); Elf_Word dynamic_address = RoundUp(hash_address + dynsym_.GetCacheSize(), kPageSize); Elf_Dyn dyns[] = { { DT_HASH, { hash_address } }, { DT_STRTAB, { dynstr_address } }, { DT_SYMTAB, { dynsym_address } }, { DT_SYMENT, { sizeof(Elf_Sym) } }, { DT_STRSZ, { dynstr_.GetCacheSize() } }, { DT_SONAME, { soname_offset } }, { DT_NULL, { 0 } }, }; dynamic_.Add(&dyns, sizeof(dyns)); loaded_size_ = RoundUp(dynamic_address + dynamic_.GetCacheSize(), kPageSize); } void WriteDynamicSection() { dynstr_.WriteCachedSection(); dynsym_.WriteCachedSection(); hash_.WriteCachedSection(); dynamic_.WriteCachedSection(); CHECK_EQ(loaded_size_, RoundUp(dynamic_.GetAddress() + dynamic_.GetSize(), kPageSize)); } Elf_Word GetLoadedSize() { CHECK_NE(loaded_size_, 0u); return loaded_size_; } void WriteMIPSabiflagsSection() { abiflags_.Start(); abiflags_.Write(); abiflags_.End(); } void WriteBuildIdSection() { build_id_.Start(); build_id_.Write(); build_id_.End(); } void WriteBuildId(uint8_t build_id[kBuildIdLen]) { stream_.Seek(build_id_.GetDigestStart(), kSeekSet); stream_.WriteFully(build_id, kBuildIdLen); } // Returns true if all writes and seeks on the output stream succeeded. bool Good() { return stream_.Good(); } // Returns the builder's internal stream. OutputStream* GetStream() { return &stream_; } off_t AlignFileOffset(size_t alignment) { return stream_.Seek(RoundUp(stream_.Seek(0, kSeekCurrent), alignment), kSeekSet); } Elf_Addr AlignVirtualAddress(size_t alignment) { return virtual_address_ = RoundUp(virtual_address_, alignment); } private: static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) { Elf_Ehdr elf_header = Elf_Ehdr(); switch (isa) { case kArm: // Fall through. case kThumb2: { elf_header.e_machine = EM_ARM; elf_header.e_flags = EF_ARM_EABI_VER5; break; } case kArm64: { elf_header.e_machine = EM_AARCH64; elf_header.e_flags = 0; break; } case kX86: { elf_header.e_machine = EM_386; elf_header.e_flags = 0; break; } case kX86_64: { elf_header.e_machine = EM_X86_64; elf_header.e_flags = 0; break; } case kMips: { elf_header.e_machine = EM_MIPS; elf_header.e_flags = (EF_MIPS_NOREORDER | EF_MIPS_PIC | EF_MIPS_CPIC | EF_MIPS_ABI_O32 | (features->AsMipsInstructionSetFeatures()->IsR6() ? EF_MIPS_ARCH_32R6 : EF_MIPS_ARCH_32R2)); break; } case kMips64: { elf_header.e_machine = EM_MIPS; elf_header.e_flags = (EF_MIPS_NOREORDER | EF_MIPS_PIC | EF_MIPS_CPIC | EF_MIPS_ARCH_64R6); break; } case kNone: { LOG(FATAL) << "No instruction set"; break; } default: { LOG(FATAL) << "Unknown instruction set " << isa; } } elf_header.e_ident[EI_MAG0] = ELFMAG0; elf_header.e_ident[EI_MAG1] = ELFMAG1; elf_header.e_ident[EI_MAG2] = ELFMAG2; elf_header.e_ident[EI_MAG3] = ELFMAG3; elf_header.e_ident[EI_CLASS] = (sizeof(Elf_Addr) == sizeof(Elf32_Addr)) ? ELFCLASS32 : ELFCLASS64; elf_header.e_ident[EI_DATA] = ELFDATA2LSB; elf_header.e_ident[EI_VERSION] = EV_CURRENT; elf_header.e_ident[EI_OSABI] = ELFOSABI_LINUX; elf_header.e_ident[EI_ABIVERSION] = 0; elf_header.e_type = ET_DYN; elf_header.e_version = 1; elf_header.e_entry = 0; elf_header.e_ehsize = sizeof(Elf_Ehdr); elf_header.e_phentsize = sizeof(Elf_Phdr); elf_header.e_shentsize = sizeof(Elf_Shdr); elf_header.e_phoff = sizeof(Elf_Ehdr); return elf_header; } // Create program headers based on written sections. std::vector MakeProgramHeaders() { CHECK(!sections_.empty()); std::vector phdrs; { // The program headers must start with PT_PHDR which is used in // loaded process to determine the number of program headers. Elf_Phdr phdr = Elf_Phdr(); phdr.p_type = PT_PHDR; phdr.p_flags = PF_R; phdr.p_offset = phdr.p_vaddr = phdr.p_paddr = sizeof(Elf_Ehdr); phdr.p_filesz = phdr.p_memsz = 0; // We need to fill this later. phdr.p_align = sizeof(Elf_Off); phdrs.push_back(phdr); // Tell the linker to mmap the start of file to memory. Elf_Phdr load = Elf_Phdr(); load.p_type = PT_LOAD; load.p_flags = PF_R; load.p_offset = load.p_vaddr = load.p_paddr = 0; load.p_filesz = load.p_memsz = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * kMaxProgramHeaders; load.p_align = kPageSize; phdrs.push_back(load); } // Create program headers for sections. for (auto* section : sections_) { const Elf_Shdr& shdr = section->header_; if ((shdr.sh_flags & SHF_ALLOC) != 0 && shdr.sh_size != 0) { // PT_LOAD tells the linker to mmap part of the file. // The linker can only mmap page-aligned sections. // Single PT_LOAD may contain several ELF sections. Elf_Phdr& prev = phdrs.back(); Elf_Phdr load = Elf_Phdr(); load.p_type = PT_LOAD; load.p_flags = section->phdr_flags_; load.p_offset = shdr.sh_offset; load.p_vaddr = load.p_paddr = shdr.sh_addr; load.p_filesz = (shdr.sh_type != SHT_NOBITS ? shdr.sh_size : 0u); load.p_memsz = shdr.sh_size; load.p_align = shdr.sh_addralign; if (prev.p_type == load.p_type && prev.p_flags == load.p_flags && prev.p_filesz == prev.p_memsz && // Do not merge .bss load.p_filesz == load.p_memsz) { // Do not merge .bss // Merge this PT_LOAD with the previous one. Elf_Word size = shdr.sh_offset + shdr.sh_size - prev.p_offset; prev.p_filesz = size; prev.p_memsz = size; } else { // If we are adding new load, it must be aligned. CHECK_EQ(shdr.sh_addralign, (Elf_Word)kPageSize); phdrs.push_back(load); } } } for (auto* section : sections_) { const Elf_Shdr& shdr = section->header_; if ((shdr.sh_flags & SHF_ALLOC) != 0 && shdr.sh_size != 0) { // Other PT_* types allow the program to locate interesting // parts of memory at runtime. They must overlap with PT_LOAD. if (section->phdr_type_ != 0) { Elf_Phdr phdr = Elf_Phdr(); phdr.p_type = section->phdr_type_; phdr.p_flags = section->phdr_flags_; phdr.p_offset = shdr.sh_offset; phdr.p_vaddr = phdr.p_paddr = shdr.sh_addr; phdr.p_filesz = phdr.p_memsz = shdr.sh_size; phdr.p_align = shdr.sh_addralign; phdrs.push_back(phdr); } } } // Set the size of the initial PT_PHDR. CHECK_EQ(phdrs[0].p_type, (Elf_Word)PT_PHDR); phdrs[0].p_filesz = phdrs[0].p_memsz = phdrs.size() * sizeof(Elf_Phdr); return phdrs; } InstructionSet isa_; const InstructionSetFeatures* features_; ErrorDelayingOutputStream stream_; Section rodata_; Section text_; Section bss_; CachedStringSection dynstr_; SymbolSection dynsym_; CachedSection hash_; CachedSection dynamic_; Section eh_frame_; Section eh_frame_hdr_; StringSection strtab_; SymbolSection symtab_; Section debug_frame_; Section debug_info_; Section debug_line_; StringSection shstrtab_; AbiflagsSection abiflags_; BuildIdSection build_id_; std::vector> other_sections_; // List of used section in the order in which they were written. std::vector sections_; bool started_; bool write_program_headers_; // The size of the memory taken by the ELF file when loaded. size_t loaded_size_; // Used for allocation of virtual address space. Elf_Addr virtual_address_; DISALLOW_COPY_AND_ASSIGN(ElfBuilder); }; } // namespace art #endif // ART_COMPILER_ELF_BUILDER_H_ android-platform-art-8.1.0+r23/compiler/elf_writer.cc000066400000000000000000000044051336577252300224530ustar00rootroot00000000000000/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "elf_writer.h" #include "base/unix_file/fd_file.h" #include "elf_file.h" namespace art { uintptr_t ElfWriter::GetOatDataAddress(ElfFile* elf_file) { uintptr_t oatdata_address = elf_file->FindSymbolAddress(SHT_DYNSYM, "oatdata", false); CHECK_NE(0U, oatdata_address); return oatdata_address; } void ElfWriter::GetOatElfInformation(File* file, size_t* oat_loaded_size, size_t* oat_data_offset) { std::string error_msg; std::unique_ptr elf_file(ElfFile::Open(file, false, false, /*low_4gb*/false, &error_msg)); CHECK(elf_file.get() != nullptr) << error_msg; bool success = elf_file->GetLoadedSize(oat_loaded_size, &error_msg); CHECK(success) << error_msg; CHECK_NE(0U, *oat_loaded_size); *oat_data_offset = GetOatDataAddress(elf_file.get()); CHECK_NE(0U, *oat_data_offset); } bool ElfWriter::Fixup(File* file, uintptr_t oat_data_begin) { std::string error_msg; std::unique_ptr elf_file(ElfFile::Open(file, true, false, /*low_4gb*/false, &error_msg)); CHECK(elf_file.get() != nullptr) << error_msg; // Lookup "oatdata" symbol address. uintptr_t oatdata_address = ElfWriter::GetOatDataAddress(elf_file.get()); uintptr_t base_address = oat_data_begin - oatdata_address; return elf_file->Fixup(base_address); } } // namespace art android-platform-art-8.1.0+r23/compiler/elf_writer.h000066400000000000000000000053411336577252300223150ustar00rootroot00000000000000/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_ELF_WRITER_H_ #define ART_COMPILER_ELF_WRITER_H_ #include #include #include #include #include "base/array_ref.h" #include "base/macros.h" #include "base/mutex.h" #include "os.h" namespace art { class ElfFile; class OutputStream; namespace debug { struct MethodDebugInfo; } // namespace debug class ElfWriter { public: // Looks up information about location of oat file in elf file container. // Used for ImageWriter to perform memory layout. static void GetOatElfInformation(File* file, size_t* oat_loaded_size, size_t* oat_data_offset); // Returns runtime oat_data runtime address for an opened ElfFile. static uintptr_t GetOatDataAddress(ElfFile* elf_file); static bool Fixup(File* file, uintptr_t oat_data_begin); virtual ~ElfWriter() {} virtual void Start() = 0; virtual void PrepareDynamicSection(size_t rodata_size, size_t text_size, size_t bss_size, size_t bss_methods_offset, size_t bss_roots_offset) = 0; virtual void PrepareDebugInfo(const ArrayRef& method_infos) = 0; virtual OutputStream* StartRoData() = 0; virtual void EndRoData(OutputStream* rodata) = 0; virtual OutputStream* StartText() = 0; virtual void EndText(OutputStream* text) = 0; virtual void WriteDynamicSection() = 0; virtual void WriteDebugInfo(const ArrayRef& method_infos) = 0; virtual bool End() = 0; // Get the ELF writer's stream. This stream can be used for writing data directly // to a section after the section has been finished. When that's done, the user // should Seek() back to the position where the stream was before this operation. virtual OutputStream* GetStream() = 0; // Get the size that the loaded ELF file will occupy in memory. virtual size_t GetLoadedSize() = 0; protected: ElfWriter() = default; }; } // namespace art #endif // ART_COMPILER_ELF_WRITER_H_ android-platform-art-8.1.0+r23/compiler/elf_writer_quick.cc000066400000000000000000000260471336577252300236550ustar00rootroot00000000000000/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "elf_writer_quick.h" #include #include #include #include "base/casts.h" #include "base/logging.h" #include "compiled_method.h" #include "debug/elf_debug_writer.h" #include "debug/method_debug_info.h" #include "driver/compiler_options.h" #include "elf.h" #include "elf_builder.h" #include "elf_utils.h" #include "globals.h" #include "leb128.h" #include "linker/buffered_output_stream.h" #include "linker/file_output_stream.h" #include "thread-current-inl.h" #include "thread_pool.h" #include "utils.h" namespace art { // .eh_frame and .debug_frame are almost identical. // Except for some minor formatting differences, the main difference // is that .eh_frame is allocated within the running program because // it is used by C++ exception handling (which we do not use so we // can choose either). C++ compilers generally tend to use .eh_frame // because if they need it sometimes, they might as well always use it. // Let's use .debug_frame because it is easier to strip or compress. constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT; class DebugInfoTask : public Task { public: DebugInfoTask(InstructionSet isa, const InstructionSetFeatures* features, size_t rodata_section_size, size_t text_section_size, const ArrayRef& method_infos) : isa_(isa), instruction_set_features_(features), rodata_section_size_(rodata_section_size), text_section_size_(text_section_size), method_infos_(method_infos) { } void Run(Thread*) { result_ = debug::MakeMiniDebugInfo(isa_, instruction_set_features_, rodata_section_size_, text_section_size_, method_infos_); } std::vector* GetResult() { return &result_; } private: InstructionSet isa_; const InstructionSetFeatures* instruction_set_features_; size_t rodata_section_size_; size_t text_section_size_; const ArrayRef method_infos_; std::vector result_; }; template class ElfWriterQuick FINAL : public ElfWriter { public: ElfWriterQuick(InstructionSet instruction_set, const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file); ~ElfWriterQuick(); void Start() OVERRIDE; void PrepareDynamicSection(size_t rodata_size, size_t text_size, size_t bss_size, size_t bss_methods_offset, size_t bss_roots_offset) OVERRIDE; void PrepareDebugInfo(const ArrayRef& method_infos) OVERRIDE; OutputStream* StartRoData() OVERRIDE; void EndRoData(OutputStream* rodata) OVERRIDE; OutputStream* StartText() OVERRIDE; void EndText(OutputStream* text) OVERRIDE; void WriteDynamicSection() OVERRIDE; void WriteDebugInfo(const ArrayRef& method_infos) OVERRIDE; bool End() OVERRIDE; virtual OutputStream* GetStream() OVERRIDE; size_t GetLoadedSize() OVERRIDE; static void EncodeOatPatches(const std::vector& locations, std::vector* buffer); private: const InstructionSetFeatures* instruction_set_features_; const CompilerOptions* const compiler_options_; File* const elf_file_; size_t rodata_size_; size_t text_size_; size_t bss_size_; std::unique_ptr output_stream_; std::unique_ptr> builder_; std::unique_ptr debug_info_task_; std::unique_ptr debug_info_thread_pool_; void ComputeFileBuildId(uint8_t (*build_id)[ElfBuilder::kBuildIdLen]); DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick); }; std::unique_ptr CreateElfWriterQuick(InstructionSet instruction_set, const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file) { if (Is64BitInstructionSet(instruction_set)) { return std::make_unique>(instruction_set, features, compiler_options, elf_file); } else { return std::make_unique>(instruction_set, features, compiler_options, elf_file); } } template ElfWriterQuick::ElfWriterQuick(InstructionSet instruction_set, const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file) : ElfWriter(), instruction_set_features_(features), compiler_options_(compiler_options), elf_file_(elf_file), rodata_size_(0u), text_size_(0u), bss_size_(0u), output_stream_( std::make_unique(std::make_unique(elf_file))), builder_(new ElfBuilder(instruction_set, features, output_stream_.get())) {} template ElfWriterQuick::~ElfWriterQuick() {} template void ElfWriterQuick::Start() { builder_->Start(); if (compiler_options_->GetGenerateBuildId()) { builder_->WriteBuildIdSection(); } } template void ElfWriterQuick::PrepareDynamicSection(size_t rodata_size, size_t text_size, size_t bss_size, size_t bss_methods_offset, size_t bss_roots_offset) { DCHECK_EQ(rodata_size_, 0u); rodata_size_ = rodata_size; DCHECK_EQ(text_size_, 0u); text_size_ = text_size; DCHECK_EQ(bss_size_, 0u); bss_size_ = bss_size; builder_->PrepareDynamicSection(elf_file_->GetPath(), rodata_size_, text_size_, bss_size_, bss_methods_offset, bss_roots_offset); } template OutputStream* ElfWriterQuick::StartRoData() { auto* rodata = builder_->GetRoData(); rodata->Start(); return rodata; } template void ElfWriterQuick::EndRoData(OutputStream* rodata) { CHECK_EQ(builder_->GetRoData(), rodata); builder_->GetRoData()->End(); } template OutputStream* ElfWriterQuick::StartText() { auto* text = builder_->GetText(); text->Start(); return text; } template void ElfWriterQuick::EndText(OutputStream* text) { CHECK_EQ(builder_->GetText(), text); builder_->GetText()->End(); } template void ElfWriterQuick::WriteDynamicSection() { if (bss_size_ != 0u) { builder_->GetBss()->WriteNoBitsSection(bss_size_); } if (builder_->GetIsa() == kMips || builder_->GetIsa() == kMips64) { builder_->WriteMIPSabiflagsSection(); } builder_->WriteDynamicSection(); } template void ElfWriterQuick::PrepareDebugInfo( const ArrayRef& method_infos) { if (!method_infos.empty() && compiler_options_->GetGenerateMiniDebugInfo()) { // Prepare the mini-debug-info in background while we do other I/O. Thread* self = Thread::Current(); debug_info_task_ = std::unique_ptr( new DebugInfoTask(builder_->GetIsa(), instruction_set_features_, rodata_size_, text_size_, method_infos)); debug_info_thread_pool_ = std::unique_ptr( new ThreadPool("Mini-debug-info writer", 1)); debug_info_thread_pool_->AddTask(self, debug_info_task_.get()); debug_info_thread_pool_->StartWorkers(self); } } template void ElfWriterQuick::WriteDebugInfo( const ArrayRef& method_infos) { if (!method_infos.empty()) { if (compiler_options_->GetGenerateDebugInfo()) { // Generate all the debug information we can. debug::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat, true /* write_oat_patches */); } if (compiler_options_->GetGenerateMiniDebugInfo()) { // Wait for the mini-debug-info generation to finish and write it to disk. Thread* self = Thread::Current(); DCHECK(debug_info_thread_pool_ != nullptr); debug_info_thread_pool_->Wait(self, true, false); builder_->WriteSection(".gnu_debugdata", debug_info_task_->GetResult()); } } } template bool ElfWriterQuick::End() { builder_->End(); if (compiler_options_->GetGenerateBuildId()) { uint8_t build_id[ElfBuilder::kBuildIdLen]; ComputeFileBuildId(&build_id); builder_->WriteBuildId(build_id); } return builder_->Good(); } template void ElfWriterQuick::ComputeFileBuildId( uint8_t (*build_id)[ElfBuilder::kBuildIdLen]) { constexpr int kBufSize = 8192; std::vector buffer(kBufSize); int64_t offset = 0; SHA_CTX ctx; SHA1_Init(&ctx); while (true) { int64_t bytes_read = elf_file_->Read(buffer.data(), kBufSize, offset); CHECK_GE(bytes_read, 0); if (bytes_read == 0) { // End of file. break; } SHA1_Update(&ctx, buffer.data(), bytes_read); offset += bytes_read; } SHA1_Final(*build_id, &ctx); } template OutputStream* ElfWriterQuick::GetStream() { return builder_->GetStream(); } template size_t ElfWriterQuick::GetLoadedSize() { return builder_->GetLoadedSize(); } // Explicit instantiations template class ElfWriterQuick; template class ElfWriterQuick; } // namespace art android-platform-art-8.1.0+r23/compiler/elf_writer_quick.h000066400000000000000000000023511336577252300235070ustar00rootroot00000000000000/* * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_ELF_WRITER_QUICK_H_ #define ART_COMPILER_ELF_WRITER_QUICK_H_ #include #include "arch/instruction_set.h" #include "elf_writer.h" #include "os.h" namespace art { class CompilerOptions; class InstructionSetFeatures; std::unique_ptr CreateElfWriterQuick(InstructionSet instruction_set, const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file); } // namespace art #endif // ART_COMPILER_ELF_WRITER_QUICK_H_ android-platform-art-8.1.0+r23/compiler/elf_writer_test.cc000066400000000000000000000143301336577252300235100ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "elf_file.h" #include "base/unix_file/fd_file.h" #include "common_compiler_test.h" #include "elf_file.h" #include "elf_file_impl.h" #include "elf_builder.h" #include "elf_writer_quick.h" #include "oat.h" #include "utils.h" namespace art { class ElfWriterTest : public CommonCompilerTest { protected: virtual void SetUp() { ReserveImageSpace(); CommonCompilerTest::SetUp(); } }; #define EXPECT_ELF_FILE_ADDRESS(ef, expected_value, symbol_name, build_map) \ do { \ void* addr = reinterpret_cast((ef)->FindSymbolAddress(SHT_DYNSYM, \ symbol_name, \ build_map)); \ EXPECT_NE(nullptr, addr); \ if ((expected_value) == nullptr) { \ (expected_value) = addr; \ } \ EXPECT_EQ(expected_value, addr); \ EXPECT_EQ(expected_value, (ef)->FindDynamicSymbolAddress(symbol_name)); \ } while (false) TEST_F(ElfWriterTest, dlsym) { std::string elf_location = GetCoreOatLocation(); std::string elf_filename = GetSystemImageFilename(elf_location.c_str(), kRuntimeISA); LOG(INFO) << "elf_filename=" << elf_filename; UnreserveImageSpace(); void* dl_oatdata = nullptr; void* dl_oatexec = nullptr; void* dl_oatlastword = nullptr; std::unique_ptr file(OS::OpenFileForReading(elf_filename.c_str())); ASSERT_TRUE(file.get() != nullptr) << elf_filename; { std::string error_msg; std::unique_ptr ef(ElfFile::Open(file.get(), false, false, /*low_4gb*/false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", false); EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", false); EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", false); } { std::string error_msg; std::unique_ptr ef(ElfFile::Open(file.get(), false, false, /*low_4gb*/false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; EXPECT_ELF_FILE_ADDRESS(ef, dl_oatdata, "oatdata", true); EXPECT_ELF_FILE_ADDRESS(ef, dl_oatexec, "oatexec", true); EXPECT_ELF_FILE_ADDRESS(ef, dl_oatlastword, "oatlastword", true); } { uint8_t* base = reinterpret_cast(ART_BASE_ADDRESS); std::string error_msg; std::unique_ptr ef(ElfFile::Open(file.get(), false, true, /*low_4gb*/false, &error_msg, base)); CHECK(ef.get() != nullptr) << error_msg; CHECK(ef->Load(file.get(), false, /*low_4gb*/false, &error_msg)) << error_msg; EXPECT_EQ(reinterpret_cast(dl_oatdata) + reinterpret_cast(base), reinterpret_cast(ef->FindDynamicSymbolAddress("oatdata"))); EXPECT_EQ(reinterpret_cast(dl_oatexec) + reinterpret_cast(base), reinterpret_cast(ef->FindDynamicSymbolAddress("oatexec"))); EXPECT_EQ(reinterpret_cast(dl_oatlastword) + reinterpret_cast(base), reinterpret_cast(ef->FindDynamicSymbolAddress("oatlastword"))); } } TEST_F(ElfWriterTest, CheckBuildIdPresent) { std::string elf_location = GetCoreOatLocation(); std::string elf_filename = GetSystemImageFilename(elf_location.c_str(), kRuntimeISA); LOG(INFO) << "elf_filename=" << elf_filename; std::unique_ptr file(OS::OpenFileForReading(elf_filename.c_str())); ASSERT_TRUE(file.get() != nullptr); { std::string error_msg; std::unique_ptr ef(ElfFile::Open(file.get(), false, false, /*low_4gb*/false, &error_msg)); CHECK(ef.get() != nullptr) << error_msg; EXPECT_TRUE(ef->HasSection(".note.gnu.build-id")); } } TEST_F(ElfWriterTest, EncodeDecodeOatPatches) { const std::vector> test_data { { 0, 4, 8, 15, 128, 200 }, { 8, 8 + 127 }, { 8, 8 + 128 }, { }, }; for (const auto& patch_locations : test_data) { constexpr int32_t delta = 0x11235813; // Encode patch locations. std::vector oat_patches; ElfBuilder::EncodeOatPatches(ArrayRef(patch_locations), &oat_patches); // Create buffer to be patched. std::vector initial_data(256); for (size_t i = 0; i < initial_data.size(); i++) { initial_data[i] = i; } // Patch manually. std::vector expected = initial_data; for (uintptr_t location : patch_locations) { typedef __attribute__((__aligned__(1))) uint32_t UnalignedAddress; *reinterpret_cast(expected.data() + location) += delta; } // Decode and apply patch locations. std::vector actual = initial_data; ElfFileImpl32::ApplyOatPatches( oat_patches.data(), oat_patches.data() + oat_patches.size(), delta, actual.data(), actual.data() + actual.size()); EXPECT_EQ(expected, actual); } } } // namespace art android-platform-art-8.1.0+r23/compiler/exception_test.cc000066400000000000000000000231101336577252300233400ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "base/arena_allocator.h" #include "base/callee_save_type.h" #include "base/enums.h" #include "class_linker.h" #include "common_runtime_test.h" #include "dex_file.h" #include "dex_file-inl.h" #include "gtest/gtest.h" #include "leb128.h" #include "mirror/class-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "mirror/stack_trace_element.h" #include "oat_quick_method_header.h" #include "optimizing/stack_map_stream.h" #include "runtime-inl.h" #include "scoped_thread_state_change-inl.h" #include "handle_scope-inl.h" #include "thread.h" namespace art { class ExceptionTest : public CommonRuntimeTest { protected: virtual void SetUp() { CommonRuntimeTest::SetUp(); ScopedObjectAccess soa(Thread::Current()); StackHandleScope<2> hs(soa.Self()); Handle class_loader( hs.NewHandle(soa.Decode(LoadDex("ExceptionHandle")))); my_klass_ = class_linker_->FindClass(soa.Self(), "LExceptionHandle;", class_loader); ASSERT_TRUE(my_klass_ != nullptr); Handle klass(hs.NewHandle(my_klass_)); class_linker_->EnsureInitialized(soa.Self(), klass, true, true); my_klass_ = klass.Get(); dex_ = my_klass_->GetDexCache()->GetDexFile(); uint32_t code_size = 12; for (size_t i = 0 ; i < code_size; i++) { fake_code_.push_back(0x70 | i); } ArenaPool pool; ArenaAllocator allocator(&pool); StackMapStream stack_maps(&allocator, kRuntimeISA); stack_maps.BeginStackMapEntry(/* dex_pc */ 3u, /* native_pc_offset */ 3u, /* register_mask */ 0u, /* sp_mask */ nullptr, /* num_dex_registers */ 0u, /* inlining_depth */ 0u); stack_maps.EndStackMapEntry(); size_t stack_maps_size = stack_maps.PrepareForFillIn(); size_t stack_maps_offset = stack_maps_size + sizeof(OatQuickMethodHeader); fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size()); MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size); stack_maps.FillInCodeInfo(stack_maps_region); OatQuickMethodHeader method_header(stack_maps_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size); memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header)); std::copy(fake_code_.begin(), fake_code_.end(), fake_header_code_and_maps_.begin() + stack_maps_offset); // Align the code. const size_t alignment = GetInstructionSetAlignment(kRuntimeISA); fake_header_code_and_maps_.reserve(fake_header_code_and_maps_.size() + alignment); const void* unaligned_code_ptr = fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size); size_t offset = dchecked_integral_cast(reinterpret_cast(unaligned_code_ptr)); size_t padding = RoundUp(offset, alignment) - offset; // Make sure no resizing takes place. CHECK_GE(fake_header_code_and_maps_.capacity(), fake_header_code_and_maps_.size() + padding); fake_header_code_and_maps_.insert(fake_header_code_and_maps_.begin(), padding, 0); const void* code_ptr = reinterpret_cast(unaligned_code_ptr) + padding; CHECK_EQ(code_ptr, static_cast(fake_header_code_and_maps_.data() + (fake_header_code_and_maps_.size() - code_size))); if (kRuntimeISA == kArm) { // Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer(). CHECK_ALIGNED(stack_maps_offset, 2); } method_f_ = my_klass_->FindClassMethod("f", "()I", kRuntimePointerSize); ASSERT_TRUE(method_f_ != nullptr); ASSERT_FALSE(method_f_->IsDirect()); method_f_->SetEntryPointFromQuickCompiledCode(code_ptr); method_g_ = my_klass_->FindClassMethod("g", "(I)V", kRuntimePointerSize); ASSERT_TRUE(method_g_ != nullptr); ASSERT_FALSE(method_g_->IsDirect()); method_g_->SetEntryPointFromQuickCompiledCode(code_ptr); } const DexFile* dex_; std::vector fake_code_; std::vector fake_header_code_and_maps_; ArtMethod* method_f_; ArtMethod* method_g_; private: mirror::Class* my_klass_; }; TEST_F(ExceptionTest, FindCatchHandler) { ScopedObjectAccess soa(Thread::Current()); const DexFile::CodeItem* code_item = dex_->GetCodeItem(method_f_->GetCodeItemOffset()); ASSERT_TRUE(code_item != nullptr); ASSERT_EQ(2u, code_item->tries_size_); ASSERT_NE(0u, code_item->insns_size_in_code_units_); const DexFile::TryItem *t0, *t1; t0 = dex_->GetTryItems(*code_item, 0); t1 = dex_->GetTryItems(*code_item, 1); EXPECT_LE(t0->start_addr_, t1->start_addr_); { CatchHandlerIterator iter(*code_item, 4 /* Dex PC in the first try block */); EXPECT_STREQ("Ljava/io/IOException;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); ASSERT_TRUE(iter.HasNext()); iter.Next(); EXPECT_STREQ("Ljava/lang/Exception;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); ASSERT_TRUE(iter.HasNext()); iter.Next(); EXPECT_FALSE(iter.HasNext()); } { CatchHandlerIterator iter(*code_item, 8 /* Dex PC in the second try block */); EXPECT_STREQ("Ljava/io/IOException;", dex_->StringByTypeIdx(iter.GetHandlerTypeIndex())); ASSERT_TRUE(iter.HasNext()); iter.Next(); EXPECT_FALSE(iter.HasNext()); } { CatchHandlerIterator iter(*code_item, 11 /* Dex PC not in any try block */); EXPECT_FALSE(iter.HasNext()); } } TEST_F(ExceptionTest, StackTraceElement) { Thread* thread = Thread::Current(); thread->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); CHECK(started); JNIEnv* env = thread->GetJniEnv(); ScopedObjectAccess soa(env); std::vector fake_stack; Runtime* r = Runtime::Current(); r->SetInstructionSet(kRuntimeISA); ArtMethod* save_method = r->CreateCalleeSaveMethod(); r->SetCalleeSaveMethod(save_method, CalleeSaveType::kSaveAllCalleeSaves); QuickMethodFrameInfo frame_info = r->GetRuntimeMethodFrameInfo(save_method); ASSERT_EQ(kStackAlignment, 16U); // ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t)); // Create three fake stack frames with mapping data created in SetUp. We map offset 3 in the // code to dex pc 3. const uint32_t dex_pc = 3; // Create the stack frame for the callee save method, expected by the runtime. fake_stack.push_back(reinterpret_cast(save_method)); for (size_t i = 0; i < frame_info.FrameSizeInBytes() - 2 * sizeof(uintptr_t); i += sizeof(uintptr_t)) { fake_stack.push_back(0); } fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc( method_g_, dex_pc, /* is_catch_handler */ false)); // return pc // Create/push fake 16byte stack frame for method g fake_stack.push_back(reinterpret_cast(method_g_)); fake_stack.push_back(0); fake_stack.push_back(0); fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc( method_g_, dex_pc, /* is_catch_handler */ false)); // return pc // Create/push fake 16byte stack frame for method f fake_stack.push_back(reinterpret_cast(method_f_)); fake_stack.push_back(0); fake_stack.push_back(0); fake_stack.push_back(0xEBAD6070); // return pc // Push Method* of null to terminate the trace fake_stack.push_back(0); // Push null values which will become null incoming arguments. fake_stack.push_back(0); fake_stack.push_back(0); fake_stack.push_back(0); // Set up thread to appear as if we called out of method_g_ at pc dex 3 thread->SetTopOfStack(reinterpret_cast(&fake_stack[0])); jobject internal = thread->CreateInternalStackTrace(soa); ASSERT_TRUE(internal != nullptr); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); ASSERT_TRUE(ste_array != nullptr); auto trace_array = soa.Decode>(ste_array); ASSERT_TRUE(trace_array != nullptr); ASSERT_TRUE(trace_array->Get(0) != nullptr); EXPECT_STREQ("ExceptionHandle", trace_array->Get(0)->GetDeclaringClass()->ToModifiedUtf8().c_str()); EXPECT_STREQ("ExceptionHandle.java", trace_array->Get(0)->GetFileName()->ToModifiedUtf8().c_str()); EXPECT_STREQ("g", trace_array->Get(0)->GetMethodName()->ToModifiedUtf8().c_str()); EXPECT_EQ(37, trace_array->Get(0)->GetLineNumber()); ASSERT_TRUE(trace_array->Get(1) != nullptr); EXPECT_STREQ("ExceptionHandle", trace_array->Get(1)->GetDeclaringClass()->ToModifiedUtf8().c_str()); EXPECT_STREQ("ExceptionHandle.java", trace_array->Get(1)->GetFileName()->ToModifiedUtf8().c_str()); EXPECT_STREQ("f", trace_array->Get(1)->GetMethodName()->ToModifiedUtf8().c_str()); EXPECT_EQ(22, trace_array->Get(1)->GetLineNumber()); thread->SetTopOfStack(nullptr); // Disarm the assertion that no code is running when we detach. } } // namespace art android-platform-art-8.1.0+r23/compiler/generate-operator-out.py000077700000000000000000000000001336577252300327512../tools/generate-operator-out.pyustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/image_test.cc000066400000000000000000000147451336577252300224420ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "image_test.h" #include "image.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" namespace art { TEST_F(ImageTest, TestImageLayout) { std::vector image_sizes; std::vector image_sizes_extra; // Compile multi-image with ImageLayoutA being the last image. { CompilationHelper helper; Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutA", {"LMyClass;"}); image_sizes = helper.GetImageObjectSectionSizes(); } TearDown(); runtime_.reset(); SetUp(); // Compile multi-image with ImageLayoutB being the last image. { CompilationHelper helper; Compile(ImageHeader::kStorageModeUncompressed, helper, "ImageLayoutB", {"LMyClass;"}); image_sizes_extra = helper.GetImageObjectSectionSizes(); } // Make sure that the new stuff in the clinit in ImageLayoutB is in the last image and not in the // first two images. ASSERT_EQ(image_sizes.size(), image_sizes.size()); // Sizes of the images should be the same. These sizes are for the whole image unrounded. for (size_t i = 0; i < image_sizes.size() - 1; ++i) { EXPECT_EQ(image_sizes[i], image_sizes_extra[i]); } // Last image should be larger since it has a hash map and a string. EXPECT_LT(image_sizes.back(), image_sizes_extra.back()); } TEST_F(ImageTest, ImageHeaderIsValid) { uint32_t image_begin = ART_BASE_ADDRESS; uint32_t image_size_ = 16 * KB; uint32_t image_roots = ART_BASE_ADDRESS + (1 * KB); uint32_t oat_checksum = 0; uint32_t oat_file_begin = ART_BASE_ADDRESS + (4 * KB); // page aligned uint32_t oat_data_begin = ART_BASE_ADDRESS + (8 * KB); // page aligned uint32_t oat_data_end = ART_BASE_ADDRESS + (9 * KB); uint32_t oat_file_end = ART_BASE_ADDRESS + (10 * KB); ImageSection sections[ImageHeader::kSectionCount]; ImageHeader image_header(image_begin, image_size_, sections, image_roots, oat_checksum, oat_file_begin, oat_data_begin, oat_data_end, oat_file_end, /*boot_image_begin*/0U, /*boot_image_size*/0U, /*boot_oat_begin*/0U, /*boot_oat_size_*/0U, sizeof(void*), /*compile_pic*/false, /*is_pic*/false, ImageHeader::kDefaultStorageMode, /*data_size*/0u); ASSERT_TRUE(image_header.IsValid()); ASSERT_TRUE(!image_header.IsAppImage()); char* magic = const_cast(image_header.GetMagic()); strcpy(magic, ""); // bad magic ASSERT_FALSE(image_header.IsValid()); strcpy(magic, "art\n000"); // bad version ASSERT_FALSE(image_header.IsValid()); } // Test that pointer to quick code is the same in // a default method of an interface and in a copied method // of a class which implements the interface. This should be true // only if the copied method and the origin method are located in the // same oat file. TEST_F(ImageTest, TestDefaultMethods) { CompilationHelper helper; Compile(ImageHeader::kStorageModeUncompressed, helper, "DefaultMethods", {"LIface;", "LImpl;", "LIterableBase;"}); PointerSize pointer_size = class_linker_->GetImagePointerSize(); Thread* self = Thread::Current(); ScopedObjectAccess soa(self); // Test the pointer to quick code is the same in origin method // and in the copied method form the same oat file. mirror::Class* iface_klass = class_linker_->LookupClass( self, "LIface;", ObjPtr()); ASSERT_NE(nullptr, iface_klass); ArtMethod* origin = iface_klass->FindInterfaceMethod("defaultMethod", "()V", pointer_size); ASSERT_NE(nullptr, origin); ASSERT_TRUE(origin->GetDeclaringClass() == iface_klass); const void* code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); // The origin method should have a pointer to quick code ASSERT_NE(nullptr, code); ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code)); mirror::Class* impl_klass = class_linker_->LookupClass( self, "LImpl;", ObjPtr()); ASSERT_NE(nullptr, impl_klass); ArtMethod* copied = FindCopiedMethod(origin, impl_klass); ASSERT_NE(nullptr, copied); // the copied method should have pointer to the same quick code as the origin method ASSERT_EQ(code, copied->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)); // Test the origin method has pointer to quick code // but the copied method has pointer to interpreter // because these methods are in different oat files. mirror::Class* iterable_klass = class_linker_->LookupClass( self, "Ljava/lang/Iterable;", ObjPtr()); ASSERT_NE(nullptr, iterable_klass); origin = iterable_klass->FindClassMethod( "forEach", "(Ljava/util/function/Consumer;)V", pointer_size); ASSERT_NE(nullptr, origin); ASSERT_FALSE(origin->IsDirect()); ASSERT_TRUE(origin->GetDeclaringClass() == iterable_klass); code = origin->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); // the origin method should have a pointer to quick code ASSERT_NE(nullptr, code); ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code)); mirror::Class* iterablebase_klass = class_linker_->LookupClass( self, "LIterableBase;", ObjPtr()); ASSERT_NE(nullptr, iterablebase_klass); copied = FindCopiedMethod(origin, iterablebase_klass); ASSERT_NE(nullptr, copied); code = copied->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size); // the copied method should have a pointer to interpreter ASSERT_TRUE(class_linker_->IsQuickToInterpreterBridge(code)); } } // namespace art android-platform-art-8.1.0+r23/compiler/image_test.h000066400000000000000000000505431336577252300223000ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_IMAGE_TEST_H_ #define ART_COMPILER_IMAGE_TEST_H_ #include "image.h" #include #include #include #include "android-base/stringprintf.h" #include "art_method-inl.h" #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" #include "compiler_callbacks.h" #include "common_compiler_test.h" #include "debug/method_debug_info.h" #include "dex/quick_compiler_callbacks.h" #include "driver/compiler_options.h" #include "elf_writer.h" #include "elf_writer_quick.h" #include "gc/space/image_space.h" #include "image_writer.h" #include "linker/buffered_output_stream.h" #include "linker/file_output_stream.h" #include "linker/multi_oat_relative_patcher.h" #include "lock_word.h" #include "mirror/object-inl.h" #include "oat_writer.h" #include "scoped_thread_state_change-inl.h" #include "signal_catcher.h" #include "utils.h" namespace art { static const uintptr_t kRequestedImageBase = ART_BASE_ADDRESS; struct CompilationHelper { std::vector dex_file_locations; std::vector image_locations; std::vector> extra_dex_files; std::vector image_files; std::vector oat_files; std::vector vdex_files; std::string image_dir; void Compile(CompilerDriver* driver, ImageHeader::StorageMode storage_mode); std::vector GetImageObjectSectionSizes(); ~CompilationHelper(); }; class ImageTest : public CommonCompilerTest { protected: virtual void SetUp() { ReserveImageSpace(); CommonCompilerTest::SetUp(); } void TestWriteRead(ImageHeader::StorageMode storage_mode); void Compile(ImageHeader::StorageMode storage_mode, CompilationHelper& out_helper, const std::string& extra_dex = "", const std::initializer_list& image_classes = {}); void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE { CommonCompilerTest::SetUpRuntimeOptions(options); QuickCompilerCallbacks* new_callbacks = new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileBootImage); new_callbacks->SetVerificationResults(verification_results_.get()); callbacks_.reset(new_callbacks); options->push_back(std::make_pair("compilercallbacks", callbacks_.get())); } std::unordered_set* GetImageClasses() OVERRIDE { return new std::unordered_set(image_classes_); } ArtMethod* FindCopiedMethod(ArtMethod* origin, mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) { PointerSize pointer_size = class_linker_->GetImagePointerSize(); for (ArtMethod& m : klass->GetCopiedMethods(pointer_size)) { if (strcmp(origin->GetName(), m.GetName()) == 0 && origin->GetSignature() == m.GetSignature()) { return &m; } } return nullptr; } private: std::unordered_set image_classes_; }; inline CompilationHelper::~CompilationHelper() { for (ScratchFile& image_file : image_files) { image_file.Unlink(); } for (ScratchFile& oat_file : oat_files) { oat_file.Unlink(); } for (ScratchFile& vdex_file : vdex_files) { vdex_file.Unlink(); } const int rmdir_result = rmdir(image_dir.c_str()); CHECK_EQ(0, rmdir_result); } inline std::vector CompilationHelper::GetImageObjectSectionSizes() { std::vector ret; for (ScratchFile& image_file : image_files) { std::unique_ptr file(OS::OpenFileForReading(image_file.GetFilename().c_str())); CHECK(file.get() != nullptr); ImageHeader image_header; CHECK_EQ(file->ReadFully(&image_header, sizeof(image_header)), true); CHECK(image_header.IsValid()); ret.push_back(image_header.GetImageSize()); } return ret; } inline void CompilationHelper::Compile(CompilerDriver* driver, ImageHeader::StorageMode storage_mode) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); std::vector class_path = class_linker->GetBootClassPath(); for (const std::unique_ptr& dex_file : extra_dex_files) { { ScopedObjectAccess soa(Thread::Current()); // Inject in boot class path so that the compiler driver can see it. class_linker->AppendToBootClassPath(soa.Self(), *dex_file.get()); } class_path.push_back(dex_file.get()); } // Enable write for dex2dex. for (const DexFile* dex_file : class_path) { dex_file_locations.push_back(dex_file->GetLocation()); if (dex_file->IsReadOnly()) { dex_file->EnableWrite(); } } { // Create a generic tmp file, to be the base of the .art and .oat temporary files. ScratchFile location; for (int i = 0; i < static_cast(class_path.size()); ++i) { std::string cur_location = android::base::StringPrintf("%s-%d.art", location.GetFilename().c_str(), i); image_locations.push_back(ScratchFile(cur_location)); } } std::vector image_filenames; for (ScratchFile& file : image_locations) { std::string image_filename(GetSystemImageFilename(file.GetFilename().c_str(), kRuntimeISA)); image_filenames.push_back(image_filename); size_t pos = image_filename.rfind('/'); CHECK_NE(pos, std::string::npos) << image_filename; if (image_dir.empty()) { image_dir = image_filename.substr(0, pos); int mkdir_result = mkdir(image_dir.c_str(), 0700); CHECK_EQ(0, mkdir_result) << image_dir; } image_files.push_back(ScratchFile(OS::CreateEmptyFile(image_filename.c_str()))); } std::vector oat_filenames; std::vector vdex_filenames; for (const std::string& image_filename : image_filenames) { std::string oat_filename = ReplaceFileExtension(image_filename, "oat"); oat_files.push_back(ScratchFile(OS::CreateEmptyFile(oat_filename.c_str()))); oat_filenames.push_back(oat_filename); std::string vdex_filename = ReplaceFileExtension(image_filename, "vdex"); vdex_files.push_back(ScratchFile(OS::CreateEmptyFile(vdex_filename.c_str()))); vdex_filenames.push_back(vdex_filename); } std::unordered_map dex_file_to_oat_index_map; std::vector oat_filename_vector; for (const std::string& file : oat_filenames) { oat_filename_vector.push_back(file.c_str()); } std::vector image_filename_vector; for (const std::string& file : image_filenames) { image_filename_vector.push_back(file.c_str()); } size_t image_idx = 0; for (const DexFile* dex_file : class_path) { dex_file_to_oat_index_map.emplace(dex_file, image_idx); ++image_idx; } // TODO: compile_pic should be a test argument. std::unique_ptr writer(new ImageWriter(*driver, kRequestedImageBase, /*compile_pic*/false, /*compile_app_image*/false, storage_mode, oat_filename_vector, dex_file_to_oat_index_map, /*dirty_image_objects*/nullptr)); { { jobject class_loader = nullptr; TimingLogger timings("ImageTest::WriteRead", false, false); TimingLogger::ScopedTiming t("CompileAll", &timings); driver->SetDexFilesForOatFile(class_path); driver->CompileAll(class_loader, class_path, &timings); t.NewTiming("WriteElf"); SafeMap key_value_store; std::vector dex_filename_vector; for (size_t i = 0; i < class_path.size(); ++i) { dex_filename_vector.push_back(""); } key_value_store.Put(OatHeader::kBootClassPathKey, gc::space::ImageSpace::GetMultiImageBootClassPath( dex_filename_vector, oat_filename_vector, image_filename_vector)); std::vector> elf_writers; std::vector> oat_writers; for (ScratchFile& oat_file : oat_files) { elf_writers.emplace_back(CreateElfWriterQuick(driver->GetInstructionSet(), driver->GetInstructionSetFeatures(), &driver->GetCompilerOptions(), oat_file.GetFile())); elf_writers.back()->Start(); oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true, &timings, /*profile_compilation_info*/nullptr)); } std::vector rodata; std::vector> opened_dex_files_map; std::vector> opened_dex_files; // Now that we have finalized key_value_store_, start writing the oat file. for (size_t i = 0, size = oat_writers.size(); i != size; ++i) { const DexFile* dex_file = class_path[i]; rodata.push_back(elf_writers[i]->StartRoData()); ArrayRef raw_dex_file( reinterpret_cast(&dex_file->GetHeader()), dex_file->GetHeader().file_size_); oat_writers[i]->AddRawDexFileSource(raw_dex_file, dex_file->GetLocation().c_str(), dex_file->GetLocationChecksum()); std::unique_ptr cur_opened_dex_files_map; std::vector> cur_opened_dex_files; bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles( kIsVdexEnabled ? vdex_files[i].GetFile() : oat_files[i].GetFile(), rodata.back(), driver->GetInstructionSet(), driver->GetInstructionSetFeatures(), &key_value_store, /* verify */ false, // Dex files may be dex-to-dex-ed, don't verify. /* update_input_vdex */ false, &cur_opened_dex_files_map, &cur_opened_dex_files); ASSERT_TRUE(dex_files_ok); if (cur_opened_dex_files_map != nullptr) { opened_dex_files_map.push_back(std::move(cur_opened_dex_files_map)); for (std::unique_ptr& cur_dex_file : cur_opened_dex_files) { // dex_file_oat_index_map_.emplace(dex_file.get(), i); opened_dex_files.push_back(std::move(cur_dex_file)); } } else { ASSERT_TRUE(cur_opened_dex_files.empty()); } } bool image_space_ok = writer->PrepareImageAddressSpace(); ASSERT_TRUE(image_space_ok); if (kIsVdexEnabled) { for (size_t i = 0, size = vdex_files.size(); i != size; ++i) { std::unique_ptr vdex_out = std::make_unique( std::make_unique(vdex_files[i].GetFile())); oat_writers[i]->WriteVerifierDeps(vdex_out.get(), nullptr); oat_writers[i]->WriteChecksumsAndVdexHeader(vdex_out.get()); } } for (size_t i = 0, size = oat_files.size(); i != size; ++i) { linker::MultiOatRelativePatcher patcher(driver->GetInstructionSet(), driver->GetInstructionSetFeatures()); OatWriter* const oat_writer = oat_writers[i].get(); ElfWriter* const elf_writer = elf_writers[i].get(); std::vector cur_dex_files(1u, class_path[i]); oat_writer->Initialize(driver, writer.get(), cur_dex_files); oat_writer->PrepareLayout(&patcher); size_t rodata_size = oat_writer->GetOatHeader().GetExecutableOffset(); size_t text_size = oat_writer->GetOatSize() - rodata_size; elf_writer->PrepareDynamicSection(rodata_size, text_size, oat_writer->GetBssSize(), oat_writer->GetBssMethodsOffset(), oat_writer->GetBssRootsOffset()); writer->UpdateOatFileLayout(i, elf_writer->GetLoadedSize(), oat_writer->GetOatDataOffset(), oat_writer->GetOatSize()); bool rodata_ok = oat_writer->WriteRodata(rodata[i]); ASSERT_TRUE(rodata_ok); elf_writer->EndRoData(rodata[i]); OutputStream* text = elf_writer->StartText(); bool text_ok = oat_writer->WriteCode(text); ASSERT_TRUE(text_ok); elf_writer->EndText(text); bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(), 0u, 0u, 0u); ASSERT_TRUE(header_ok); writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader()); elf_writer->WriteDynamicSection(); elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo()); bool success = elf_writer->End(); ASSERT_TRUE(success); } } bool success_image = writer->Write(kInvalidFd, image_filename_vector, oat_filename_vector); ASSERT_TRUE(success_image); for (size_t i = 0, size = oat_filenames.size(); i != size; ++i) { const char* oat_filename = oat_filenames[i].c_str(); std::unique_ptr oat_file(OS::OpenFileReadWrite(oat_filename)); ASSERT_TRUE(oat_file != nullptr); bool success_fixup = ElfWriter::Fixup(oat_file.get(), writer->GetOatDataBegin(i)); ASSERT_TRUE(success_fixup); ASSERT_EQ(oat_file->FlushCloseOrErase(), 0) << "Could not flush and close oat file " << oat_filename; } } } inline void ImageTest::Compile(ImageHeader::StorageMode storage_mode, CompilationHelper& helper, const std::string& extra_dex, const std::initializer_list& image_classes) { for (const std::string& image_class : image_classes) { image_classes_.insert(image_class); } CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U); // Set inline filter values. compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits); image_classes_.clear(); if (!extra_dex.empty()) { helper.extra_dex_files = OpenTestDexFiles(extra_dex.c_str()); } helper.Compile(compiler_driver_.get(), storage_mode); if (image_classes.begin() != image_classes.end()) { // Make sure the class got initialized. ScopedObjectAccess soa(Thread::Current()); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); for (const std::string& image_class : image_classes) { mirror::Class* klass = class_linker->FindSystemClass(Thread::Current(), image_class.c_str()); EXPECT_TRUE(klass != nullptr); EXPECT_TRUE(klass->IsInitialized()); } } } inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) { CompilationHelper helper; Compile(storage_mode, /*out*/ helper); std::vector image_file_sizes; for (ScratchFile& image_file : helper.image_files) { std::unique_ptr file(OS::OpenFileForReading(image_file.GetFilename().c_str())); ASSERT_TRUE(file.get() != nullptr); ImageHeader image_header; ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true); ASSERT_TRUE(image_header.IsValid()); const auto& bitmap_section = image_header.GetImageSection(ImageHeader::kSectionImageBitmap); ASSERT_GE(bitmap_section.Offset(), sizeof(image_header)); ASSERT_NE(0U, bitmap_section.Size()); gc::Heap* heap = Runtime::Current()->GetHeap(); ASSERT_TRUE(heap->HaveContinuousSpaces()); gc::space::ContinuousSpace* space = heap->GetNonMovingSpace(); ASSERT_FALSE(space->IsImageSpace()); ASSERT_TRUE(space != nullptr); ASSERT_TRUE(space->IsMallocSpace()); image_file_sizes.push_back(file->GetLength()); } ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr); std::unordered_set image_classes(*compiler_driver_->GetImageClasses()); // Need to delete the compiler since it has worker threads which are attached to runtime. compiler_driver_.reset(); // Tear down old runtime before making a new one, clearing out misc state. // Remove the reservation of the memory for use to load the image. // Need to do this before we reset the runtime. UnreserveImageSpace(); helper.extra_dex_files.clear(); runtime_.reset(); java_lang_dex_file_ = nullptr; MemMap::Init(); RuntimeOptions options; std::string image("-Ximage:"); image.append(helper.image_locations[0].GetFilename()); options.push_back(std::make_pair(image.c_str(), static_cast(nullptr))); // By default the compiler this creates will not include patch information. options.push_back(std::make_pair("-Xnorelocate", nullptr)); if (!Runtime::Create(options, false)) { LOG(FATAL) << "Failed to create runtime"; return; } runtime_.reset(Runtime::Current()); // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, // give it away now and then switch to a more managable ScopedObjectAccess. Thread::Current()->TransitionFromRunnableToSuspended(kNative); ScopedObjectAccess soa(Thread::Current()); ASSERT_TRUE(runtime_.get() != nullptr); class_linker_ = runtime_->GetClassLinker(); gc::Heap* heap = Runtime::Current()->GetHeap(); ASSERT_TRUE(heap->HasBootImageSpace()); ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace()); // We loaded the runtime with an explicit image, so it must exist. ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size()); for (size_t i = 0; i < helper.dex_file_locations.size(); ++i) { std::unique_ptr dex( LoadExpectSingleDexFile(helper.dex_file_locations[i].c_str())); ASSERT_TRUE(dex != nullptr); uint64_t image_file_size = image_file_sizes[i]; gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[i]; ASSERT_TRUE(image_space != nullptr); if (storage_mode == ImageHeader::kStorageModeUncompressed) { // Uncompressed, image should be smaller than file. ASSERT_LE(image_space->GetImageHeader().GetImageSize(), image_file_size); } else if (image_file_size > 16 * KB) { // Compressed, file should be smaller than image. Not really valid for small images. ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize()); } image_space->VerifyImageAllocations(); uint8_t* image_begin = image_space->Begin(); uint8_t* image_end = image_space->End(); if (i == 0) { // This check is only valid for image 0. CHECK_EQ(kRequestedImageBase, reinterpret_cast(image_begin)); } for (size_t j = 0; j < dex->NumClassDefs(); ++j) { const DexFile::ClassDef& class_def = dex->GetClassDef(j); const char* descriptor = dex->GetClassDescriptor(class_def); mirror::Class* klass = class_linker_->FindSystemClass(soa.Self(), descriptor); EXPECT_TRUE(klass != nullptr) << descriptor; if (image_classes.find(descriptor) == image_classes.end()) { EXPECT_TRUE(reinterpret_cast(klass) >= image_end || reinterpret_cast(klass) < image_begin) << descriptor; } else { // Image classes should be located inside the image. EXPECT_LT(image_begin, reinterpret_cast(klass)) << descriptor; EXPECT_LT(reinterpret_cast(klass), image_end) << descriptor; } EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false))); } } } } // namespace art #endif // ART_COMPILER_IMAGE_TEST_H_ android-platform-art-8.1.0+r23/compiler/image_write_read_test.cc000066400000000000000000000016751336577252300246450ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "image_test.h" namespace art { TEST_F(ImageTest, WriteReadUncompressed) { TestWriteRead(ImageHeader::kStorageModeUncompressed); } TEST_F(ImageTest, WriteReadLZ4) { TestWriteRead(ImageHeader::kStorageModeLZ4); } TEST_F(ImageTest, WriteReadLZ4HC) { TestWriteRead(ImageHeader::kStorageModeLZ4HC); } } // namespace art android-platform-art-8.1.0+r23/compiler/image_writer.cc000066400000000000000000003607161336577252300230010ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "image_writer.h" #include #include #include #include #include #include #include #include "art_field-inl.h" #include "art_method-inl.h" #include "base/callee_save_type.h" #include "base/enums.h" #include "base/logging.h" #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" #include "compiled_method.h" #include "dex_file-inl.h" #include "dex_file_types.h" #include "driver/compiler_driver.h" #include "elf_file.h" #include "elf_utils.h" #include "elf_writer.h" #include "gc/accounting/card_table-inl.h" #include "gc/accounting/heap_bitmap.h" #include "gc/accounting/space_bitmap-inl.h" #include "gc/collector/concurrent_copying.h" #include "gc/heap.h" #include "gc/heap-visit-objects-inl.h" #include "gc/space/large_object_space.h" #include "gc/space/space-inl.h" #include "gc/verification.h" #include "globals.h" #include "handle_scope-inl.h" #include "image.h" #include "imt_conflict_table.h" #include "jni_internal.h" #include "linear_alloc.h" #include "lock_word.h" #include "mirror/array-inl.h" #include "mirror/class-inl.h" #include "mirror/class_ext.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "mirror/dex_cache-inl.h" #include "mirror/executable.h" #include "mirror/method.h" #include "mirror/object-inl.h" #include "mirror/object-refvisitor-inl.h" #include "mirror/object_array-inl.h" #include "mirror/string-inl.h" #include "oat.h" #include "oat_file.h" #include "oat_file_manager.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "utils/dex_cache_arrays_layout-inl.h" using ::art::mirror::Class; using ::art::mirror::DexCache; using ::art::mirror::Object; using ::art::mirror::ObjectArray; using ::art::mirror::String; namespace art { // Separate objects into multiple bins to optimize dirty memory use. static constexpr bool kBinObjects = true; // Return true if an object is already in an image space. bool ImageWriter::IsInBootImage(const void* obj) const { gc::Heap* const heap = Runtime::Current()->GetHeap(); if (!compile_app_image_) { DCHECK(heap->GetBootImageSpaces().empty()); return false; } for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) { const uint8_t* image_begin = boot_image_space->Begin(); // Real image end including ArtMethods and ArtField sections. const uint8_t* image_end = image_begin + boot_image_space->GetImageHeader().GetImageSize(); if (image_begin <= obj && obj < image_end) { return true; } } return false; } bool ImageWriter::IsInBootOatFile(const void* ptr) const { gc::Heap* const heap = Runtime::Current()->GetHeap(); if (!compile_app_image_) { DCHECK(heap->GetBootImageSpaces().empty()); return false; } for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) { const ImageHeader& image_header = boot_image_space->GetImageHeader(); if (image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd()) { return true; } } return false; } static void ClearDexFileCookies() REQUIRES_SHARED(Locks::mutator_lock_) { auto visitor = [](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(obj != nullptr); Class* klass = obj->GetClass(); if (klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile)) { ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); // Null out the cookie to enable determinism. b/34090128 field->SetObject(obj, nullptr); } }; Runtime::Current()->GetHeap()->VisitObjects(visitor); } bool ImageWriter::PrepareImageAddressSpace() { target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet()); gc::Heap* const heap = Runtime::Current()->GetHeap(); { ScopedObjectAccess soa(Thread::Current()); PruneNonImageClasses(); // Remove junk if (compile_app_image_) { // Clear dex file cookies for app images to enable app image determinism. This is required // since the cookie field contains long pointers to DexFiles which are not deterministic. // b/34090128 ClearDexFileCookies(); } else { // Avoid for app image since this may increase RAM and image size. ComputeLazyFieldsForImageClasses(); // Add useful information } } heap->CollectGarbage(false); // Remove garbage. if (kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); CheckNonImageClassesRemoved(); } { ScopedObjectAccess soa(Thread::Current()); CalculateNewObjectOffsets(); } // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and // bin size sums being calculated. if (!AllocMemory()) { return false; } return true; } bool ImageWriter::Write(int image_fd, const std::vector& image_filenames, const std::vector& oat_filenames) { // If image_fd or oat_fd are not kInvalidFd then we may have empty strings in image_filenames or // oat_filenames. CHECK(!image_filenames.empty()); if (image_fd != kInvalidFd) { CHECK_EQ(image_filenames.size(), 1u); } CHECK(!oat_filenames.empty()); CHECK_EQ(image_filenames.size(), oat_filenames.size()); { ScopedObjectAccess soa(Thread::Current()); for (size_t i = 0; i < oat_filenames.size(); ++i) { CreateHeader(i); CopyAndFixupNativeData(i); } } { // TODO: heap validation can't handle these fix up passes. ScopedObjectAccess soa(Thread::Current()); Runtime::Current()->GetHeap()->DisableObjectValidation(); CopyAndFixupObjects(); } for (size_t i = 0; i < image_filenames.size(); ++i) { const char* image_filename = image_filenames[i]; ImageInfo& image_info = GetImageInfo(i); std::unique_ptr image_file; if (image_fd != kInvalidFd) { if (strlen(image_filename) == 0u) { image_file.reset(new File(image_fd, unix_file::kCheckSafeUsage)); // Empty the file in case it already exists. if (image_file != nullptr) { TEMP_FAILURE_RETRY(image_file->SetLength(0)); TEMP_FAILURE_RETRY(image_file->Flush()); } } else { LOG(ERROR) << "image fd " << image_fd << " name " << image_filename; } } else { image_file.reset(OS::CreateEmptyFile(image_filename)); } if (image_file == nullptr) { LOG(ERROR) << "Failed to open image file " << image_filename; return false; } if (!compile_app_image_ && fchmod(image_file->Fd(), 0644) != 0) { PLOG(ERROR) << "Failed to make image file world readable: " << image_filename; image_file->Erase(); return EXIT_FAILURE; } std::unique_ptr compressed_data; // Image data size excludes the bitmap and the header. ImageHeader* const image_header = reinterpret_cast(image_info.image_->Begin()); const size_t image_data_size = image_header->GetImageSize() - sizeof(ImageHeader); char* image_data = reinterpret_cast(image_info.image_->Begin()) + sizeof(ImageHeader); size_t data_size; const char* image_data_to_write; const uint64_t compress_start_time = NanoTime(); CHECK_EQ(image_header->storage_mode_, image_storage_mode_); switch (image_storage_mode_) { case ImageHeader::kStorageModeLZ4HC: // Fall-through. case ImageHeader::kStorageModeLZ4: { const size_t compressed_max_size = LZ4_compressBound(image_data_size); compressed_data.reset(new char[compressed_max_size]); data_size = LZ4_compress_default( reinterpret_cast(image_info.image_->Begin()) + sizeof(ImageHeader), &compressed_data[0], image_data_size, compressed_max_size); break; } /* * Disabled due to image_test64 flakyness. Both use same decompression. b/27560444 case ImageHeader::kStorageModeLZ4HC: { // Bound is same as non HC. const size_t compressed_max_size = LZ4_compressBound(image_data_size); compressed_data.reset(new char[compressed_max_size]); data_size = LZ4_compressHC( reinterpret_cast(image_info.image_->Begin()) + sizeof(ImageHeader), &compressed_data[0], image_data_size); break; } */ case ImageHeader::kStorageModeUncompressed: { data_size = image_data_size; image_data_to_write = image_data; break; } default: { LOG(FATAL) << "Unsupported"; UNREACHABLE(); } } if (compressed_data != nullptr) { image_data_to_write = &compressed_data[0]; VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in " << PrettyDuration(NanoTime() - compress_start_time); if (kIsDebugBuild) { std::unique_ptr temp(new uint8_t[image_data_size]); const size_t decompressed_size = LZ4_decompress_safe( reinterpret_cast(&compressed_data[0]), reinterpret_cast(&temp[0]), data_size, image_data_size); CHECK_EQ(decompressed_size, image_data_size); CHECK_EQ(memcmp(image_data, &temp[0], image_data_size), 0) << image_storage_mode_; } } // Write out the image + fields + methods. const bool is_compressed = compressed_data != nullptr; if (!image_file->PwriteFully(image_data_to_write, data_size, sizeof(ImageHeader))) { PLOG(ERROR) << "Failed to write image file data " << image_filename; image_file->Erase(); return false; } // Write out the image bitmap at the page aligned start of the image end, also uncompressed for // convenience. const ImageSection& bitmap_section = image_header->GetImageSection( ImageHeader::kSectionImageBitmap); // Align up since data size may be unaligned if the image is compressed. size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + data_size, kPageSize); if (!is_compressed) { CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset()); } if (!image_file->PwriteFully(reinterpret_cast(image_info.image_bitmap_->Begin()), bitmap_section.Size(), bitmap_position_in_file)) { PLOG(ERROR) << "Failed to write image file " << image_filename; image_file->Erase(); return false; } int err = image_file->Flush(); if (err < 0) { PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err; image_file->Erase(); return false; } // Write header last in case the compiler gets killed in the middle of image writing. // We do not want to have a corrupted image with a valid header. // The header is uncompressed since it contains whether the image is compressed or not. image_header->data_size_ = data_size; if (!image_file->PwriteFully(reinterpret_cast(image_info.image_->Begin()), sizeof(ImageHeader), 0)) { PLOG(ERROR) << "Failed to write image file header " << image_filename; image_file->Erase(); return false; } CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(), static_cast(image_file->GetLength())); if (image_file->FlushCloseOrErase() != 0) { PLOG(ERROR) << "Failed to flush and close image file " << image_filename; return false; } } return true; } void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) { DCHECK(object != nullptr); DCHECK_NE(offset, 0U); // The object is already deflated from when we set the bin slot. Just overwrite the lock word. object->SetLockWord(LockWord::FromForwardingAddress(offset), false); DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u); DCHECK(IsImageOffsetAssigned(object)); } void ImageWriter::UpdateImageOffset(mirror::Object* obj, uintptr_t offset) { DCHECK(IsImageOffsetAssigned(obj)) << obj << " " << offset; obj->SetLockWord(LockWord::FromForwardingAddress(offset), false); DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0u); } void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) { DCHECK(object != nullptr); DCHECK_NE(image_objects_offset_begin_, 0u); size_t oat_index = GetOatIndex(object); ImageInfo& image_info = GetImageInfo(oat_index); size_t bin_slot_offset = image_info.bin_slot_offsets_[bin_slot.GetBin()]; size_t new_offset = bin_slot_offset + bin_slot.GetIndex(); DCHECK_ALIGNED(new_offset, kObjectAlignment); SetImageOffset(object, new_offset); DCHECK_LT(new_offset, image_info.image_end_); } bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const { // Will also return true if the bin slot was assigned since we are reusing the lock word. DCHECK(object != nullptr); return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress; } size_t ImageWriter::GetImageOffset(mirror::Object* object) const { DCHECK(object != nullptr); DCHECK(IsImageOffsetAssigned(object)); LockWord lock_word = object->GetLockWord(false); size_t offset = lock_word.ForwardingAddress(); size_t oat_index = GetOatIndex(object); const ImageInfo& image_info = GetImageInfo(oat_index); DCHECK_LT(offset, image_info.image_end_); return offset; } void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) { DCHECK(object != nullptr); DCHECK(!IsImageOffsetAssigned(object)); DCHECK(!IsImageBinSlotAssigned(object)); // Before we stomp over the lock word, save the hash code for later. LockWord lw(object->GetLockWord(false)); switch (lw.GetState()) { case LockWord::kFatLocked: FALLTHROUGH_INTENDED; case LockWord::kThinLocked: { std::ostringstream oss; bool thin = (lw.GetState() == LockWord::kThinLocked); oss << (thin ? "Thin" : "Fat") << " locked object " << object << "(" << object->PrettyTypeOf() << ") found during object copy"; if (thin) { oss << ". Lock owner:" << lw.ThinLockOwner(); } LOG(FATAL) << oss.str(); break; } case LockWord::kUnlocked: // No hash, don't need to save it. break; case LockWord::kHashCode: DCHECK(saved_hashcode_map_.find(object) == saved_hashcode_map_.end()); saved_hashcode_map_.emplace(object, lw.GetHashCode()); break; default: LOG(FATAL) << "Unreachable."; UNREACHABLE(); } object->SetLockWord(LockWord::FromForwardingAddress(bin_slot.Uint32Value()), false); DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u); DCHECK(IsImageBinSlotAssigned(object)); } void ImageWriter::PrepareDexCacheArraySlots() { // Prepare dex cache array starts based on the ordering specified in the CompilerDriver. // Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned() // when AssignImageBinSlot() assigns their indexes out or order. for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) { auto it = dex_file_oat_index_map_.find(dex_file); DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation(); ImageInfo& image_info = GetImageInfo(it->second); image_info.dex_cache_array_starts_.Put(dex_file, image_info.bin_slot_sizes_[kBinDexCacheArray]); DexCacheArraysLayout layout(target_ptr_size_, dex_file); image_info.bin_slot_sizes_[kBinDexCacheArray] += layout.Size(); } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Thread* const self = Thread::Current(); ReaderMutexLock mu(self, *Locks::dex_lock_); for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { ObjPtr dex_cache = ObjPtr::DownCast(self->DecodeJObject(data.weak_root)); if (dex_cache == nullptr || IsInBootImage(dex_cache.Ptr())) { continue; } const DexFile* dex_file = dex_cache->GetDexFile(); CHECK(dex_file_oat_index_map_.find(dex_file) != dex_file_oat_index_map_.end()) << "Dex cache should have been pruned " << dex_file->GetLocation() << "; possibly in class path"; DexCacheArraysLayout layout(target_ptr_size_, dex_file); DCHECK(layout.Valid()); size_t oat_index = GetOatIndexForDexCache(dex_cache); ImageInfo& image_info = GetImageInfo(oat_index); uint32_t start = image_info.dex_cache_array_starts_.Get(dex_file); DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr); AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(), start + layout.TypesOffset(), dex_cache); DCHECK_EQ(dex_file->NumMethodIds() != 0u, dex_cache->GetResolvedMethods() != nullptr); AddDexCacheArrayRelocation(dex_cache->GetResolvedMethods(), start + layout.MethodsOffset(), dex_cache); DCHECK_EQ(dex_file->NumFieldIds() != 0u, dex_cache->GetResolvedFields() != nullptr); AddDexCacheArrayRelocation(dex_cache->GetResolvedFields(), start + layout.FieldsOffset(), dex_cache); DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr); AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), dex_cache); if (dex_cache->GetResolvedMethodTypes() != nullptr) { AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(), start + layout.MethodTypesOffset(), dex_cache); } if (dex_cache->GetResolvedCallSites() != nullptr) { AddDexCacheArrayRelocation(dex_cache->GetResolvedCallSites(), start + layout.CallSitesOffset(), dex_cache); } } } void ImageWriter::AddDexCacheArrayRelocation(void* array, size_t offset, ObjPtr dex_cache) { if (array != nullptr) { DCHECK(!IsInBootImage(array)); size_t oat_index = GetOatIndexForDexCache(dex_cache); native_object_relocations_.emplace(array, NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeDexCacheArray }); } } void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) { DCHECK(arr != nullptr); if (kIsDebugBuild) { for (size_t i = 0, len = arr->GetLength(); i < len; i++) { ArtMethod* method = arr->GetElementPtrSize(i, target_ptr_size_); if (method != nullptr && !method->IsRuntimeMethod()) { mirror::Class* klass = method->GetDeclaringClass(); CHECK(klass == nullptr || KeepClass(klass)) << Class::PrettyClass(klass) << " should be a kept class"; } } } // kBinArtMethodClean picked arbitrarily, just required to differentiate between ArtFields and // ArtMethods. pointer_arrays_.emplace(arr, kBinArtMethodClean); } void ImageWriter::AssignImageBinSlot(mirror::Object* object, size_t oat_index) { DCHECK(object != nullptr); size_t object_size = object->SizeOf(); // The magic happens here. We segregate objects into different bins based // on how likely they are to get dirty at runtime. // // Likely-to-dirty objects get packed together into the same bin so that // at runtime their page dirtiness ratio (how many dirty objects a page has) is // maximized. // // This means more pages will stay either clean or shared dirty (with zygote) and // the app will use less of its own (private) memory. Bin bin = kBinRegular; size_t current_offset = 0u; if (kBinObjects) { // // Changing the bin of an object is purely a memory-use tuning. // It has no change on runtime correctness. // // Memory analysis has determined that the following types of objects get dirtied // the most: // // * Dex cache arrays are stored in a special bin. The arrays for each dex cache have // a fixed layout which helps improve generated code (using PC-relative addressing), // so we pre-calculate their offsets separately in PrepareDexCacheArraySlots(). // Since these arrays are huge, most pages do not overlap other objects and it's not // really important where they are for the clean/dirty separation. Due to their // special PC-relative addressing, we arbitrarily keep them at the end. // * Class'es which are verified [their clinit runs only at runtime] // - classes in general [because their static fields get overwritten] // - initialized classes with all-final statics are unlikely to be ever dirty, // so bin them separately // * Art Methods that are: // - native [their native entry point is not looked up until runtime] // - have declaring classes that aren't initialized // [their interpreter/quick entry points are trampolines until the class // becomes initialized] // // We also assume the following objects get dirtied either never or extremely rarely: // * Strings (they are immutable) // * Art methods that aren't native and have initialized declared classes // // We assume that "regular" bin objects are highly unlikely to become dirtied, // so packing them together will not result in a noticeably tighter dirty-to-clean ratio. // if (object->IsClass()) { bin = kBinClassVerified; mirror::Class* klass = object->AsClass(); // Add non-embedded vtable to the pointer array table if there is one. auto* vtable = klass->GetVTable(); if (vtable != nullptr) { AddMethodPointerArray(vtable); } auto* iftable = klass->GetIfTable(); if (iftable != nullptr) { for (int32_t i = 0; i < klass->GetIfTableCount(); ++i) { if (iftable->GetMethodArrayCount(i) > 0) { AddMethodPointerArray(iftable->GetMethodArray(i)); } } } // Move known dirty objects into their own sections. This includes: // - classes with dirty static fields. if (dirty_image_objects_ != nullptr && dirty_image_objects_->find(klass->PrettyDescriptor()) != dirty_image_objects_->end()) { bin = kBinKnownDirty; } else if (klass->GetStatus() == Class::kStatusInitialized) { bin = kBinClassInitialized; // If the class's static fields are all final, put it into a separate bin // since it's very likely it will stay clean. uint32_t num_static_fields = klass->NumStaticFields(); if (num_static_fields == 0) { bin = kBinClassInitializedFinalStatics; } else { // Maybe all the statics are final? bool all_final = true; for (uint32_t i = 0; i < num_static_fields; ++i) { ArtField* field = klass->GetStaticField(i); if (!field->IsFinal()) { all_final = false; break; } } if (all_final) { bin = kBinClassInitializedFinalStatics; } } } } else if (object->GetClass()->IsStringClass()) { bin = kBinString; // Strings are almost always immutable (except for object header). } else if (object->GetClass() == Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangObject)) { // Instance of java lang object, probably a lock object. This means it will be dirty when we // synchronize on it. bin = kBinMiscDirty; } else if (object->IsDexCache()) { // Dex file field becomes dirty when the image is loaded. bin = kBinMiscDirty; } // else bin = kBinRegular } // Assign the oat index too. DCHECK(oat_index_map_.find(object) == oat_index_map_.end()); oat_index_map_.emplace(object, oat_index); ImageInfo& image_info = GetImageInfo(oat_index); size_t offset_delta = RoundUp(object_size, kObjectAlignment); // 64-bit alignment current_offset = image_info.bin_slot_sizes_[bin]; // How many bytes the current bin is at (aligned). // Move the current bin size up to accommodate the object we just assigned a bin slot. image_info.bin_slot_sizes_[bin] += offset_delta; BinSlot new_bin_slot(bin, current_offset); SetImageBinSlot(object, new_bin_slot); ++image_info.bin_slot_count_[bin]; // Grow the image closer to the end by the object we just assigned. image_info.image_end_ += offset_delta; } bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const { if (m->IsNative()) { return true; } mirror::Class* declaring_class = m->GetDeclaringClass(); // Initialized is highly unlikely to dirty since there's no entry points to mutate. return declaring_class == nullptr || declaring_class->GetStatus() != Class::kStatusInitialized; } bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const { DCHECK(object != nullptr); // We always stash the bin slot into a lockword, in the 'forwarding address' state. // If it's in some other state, then we haven't yet assigned an image bin slot. if (object->GetLockWord(false).GetState() != LockWord::kForwardingAddress) { return false; } else if (kIsDebugBuild) { LockWord lock_word = object->GetLockWord(false); size_t offset = lock_word.ForwardingAddress(); BinSlot bin_slot(offset); size_t oat_index = GetOatIndex(object); const ImageInfo& image_info = GetImageInfo(oat_index); DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()]) << "bin slot offset should not exceed the size of that bin"; } return true; } ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const { DCHECK(object != nullptr); DCHECK(IsImageBinSlotAssigned(object)); LockWord lock_word = object->GetLockWord(false); size_t offset = lock_word.ForwardingAddress(); // TODO: ForwardingAddress should be uint32_t DCHECK_LE(offset, std::numeric_limits::max()); BinSlot bin_slot(static_cast(offset)); size_t oat_index = GetOatIndex(object); const ImageInfo& image_info = GetImageInfo(oat_index); DCHECK_LT(bin_slot.GetIndex(), image_info.bin_slot_sizes_[bin_slot.GetBin()]); return bin_slot; } bool ImageWriter::AllocMemory() { for (ImageInfo& image_info : image_infos_) { ImageSection unused_sections[ImageHeader::kSectionCount]; const size_t length = RoundUp( image_info.CreateImageSections(unused_sections), kPageSize); std::string error_msg; image_info.image_.reset(MemMap::MapAnonymous("image writer image", nullptr, length, PROT_READ | PROT_WRITE, false, false, &error_msg)); if (UNLIKELY(image_info.image_.get() == nullptr)) { LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg; return false; } // Create the image bitmap, only needs to cover mirror object section which is up to image_end_. CHECK_LE(image_info.image_end_, length); image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create( "image bitmap", image_info.image_->Begin(), RoundUp(image_info.image_end_, kPageSize))); if (image_info.image_bitmap_.get() == nullptr) { LOG(ERROR) << "Failed to allocate memory for image bitmap"; return false; } } return true; } class ImageWriter::ComputeLazyFieldsForClassesVisitor : public ClassVisitor { public: bool operator()(ObjPtr c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { StackHandleScope<1> hs(Thread::Current()); mirror::Class::ComputeName(hs.NewHandle(c)); return true; } }; void ImageWriter::ComputeLazyFieldsForImageClasses() { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ComputeLazyFieldsForClassesVisitor visitor; class_linker->VisitClassesWithoutClassesLock(&visitor); } static bool IsBootClassLoaderClass(ObjPtr klass) REQUIRES_SHARED(Locks::mutator_lock_) { return klass->GetClassLoader() == nullptr; } bool ImageWriter::IsBootClassLoaderNonImageClass(mirror::Class* klass) { return IsBootClassLoaderClass(klass) && !IsInBootImage(klass); } // This visitor follows the references of an instance, recursively then prune this class // if a type of any field is pruned. class ImageWriter::PruneObjectReferenceVisitor { public: PruneObjectReferenceVisitor(ImageWriter* image_writer, bool* early_exit, std::unordered_set* visited, bool* result) : image_writer_(image_writer), early_exit_(early_exit), visited_(visited), result_(result) {} ALWAYS_INLINE void VisitRootIfNonNull( mirror::CompressedReference* root ATTRIBUTE_UNUSED) const REQUIRES_SHARED(Locks::mutator_lock_) { } ALWAYS_INLINE void VisitRoot( mirror::CompressedReference* root ATTRIBUTE_UNUSED) const REQUIRES_SHARED(Locks::mutator_lock_) { } ALWAYS_INLINE void operator() (ObjPtr obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const REQUIRES_SHARED(Locks::mutator_lock_) { mirror::Object* ref = obj->GetFieldObject(offset); if (ref == nullptr || visited_->find(ref) != visited_->end()) { return; } ObjPtr klass = ref->IsClass() ? ref->AsClass() : ref->GetClass(); if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) { // Prune all classes using reflection because the content they held will not be fixup. *result_ = true; } // Record the object visited in case of circular reference. visited_->emplace(ref); if (ref->IsClass()) { *result_ = *result_ || image_writer_->PruneAppImageClassInternal(ref->AsClass(), early_exit_, visited_); } else { *result_ = *result_ || image_writer_->PruneAppImageClassInternal(klass, early_exit_, visited_); ref->VisitReferences(*this, *this); } // Clean up before exit for next call of this function. visited_->erase(ref); } ALWAYS_INLINE void operator() (ObjPtr klass ATTRIBUTE_UNUSED, ObjPtr ref) const REQUIRES_SHARED(Locks::mutator_lock_) { operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false); } ALWAYS_INLINE bool GetResult() const { return result_; } private: ImageWriter* image_writer_; bool* early_exit_; std::unordered_set* visited_; bool* const result_; }; bool ImageWriter::PruneAppImageClass(ObjPtr klass) { bool early_exit = false; std::unordered_set visited; return PruneAppImageClassInternal(klass, &early_exit, &visited); } bool ImageWriter::PruneAppImageClassInternal( ObjPtr klass, bool* early_exit, std::unordered_set* visited) { DCHECK(early_exit != nullptr); DCHECK(visited != nullptr); DCHECK(compile_app_image_); if (klass == nullptr || IsInBootImage(klass.Ptr())) { return false; } auto found = prune_class_memo_.find(klass.Ptr()); if (found != prune_class_memo_.end()) { // Already computed, return the found value. return found->second; } // Circular dependencies, return false but do not store the result in the memoization table. if (visited->find(klass.Ptr()) != visited->end()) { *early_exit = true; return false; } visited->emplace(klass.Ptr()); bool result = IsBootClassLoaderClass(klass); std::string temp; // Prune if not an image class, this handles any broken sets of image classes such as having a // class in the set but not it's superclass. result = result || !compiler_driver_.IsImageClass(klass->GetDescriptor(&temp)); bool my_early_exit = false; // Only for ourselves, ignore caller. // Remove classes that failed to verify since we don't want to have java.lang.VerifyError in the // app image. if (klass->IsErroneous()) { result = true; } else { ObjPtr ext(klass->GetExtData()); CHECK(ext.IsNull() || ext->GetVerifyError() == nullptr) << klass->PrettyClass(); } if (!result) { // Check interfaces since these wont be visited through VisitReferences.) mirror::IfTable* if_table = klass->GetIfTable(); for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) { result = result || PruneAppImageClassInternal(if_table->GetInterface(i), &my_early_exit, visited); } } if (klass->IsObjectArrayClass()) { result = result || PruneAppImageClassInternal(klass->GetComponentType(), &my_early_exit, visited); } // Check static fields and their classes. if (klass->IsResolved() && klass->NumReferenceStaticFields() != 0) { size_t num_static_fields = klass->NumReferenceStaticFields(); // Presumably GC can happen when we are cross compiling, it should not cause performance // problems to do pointer size logic. MemberOffset field_offset = klass->GetFirstReferenceStaticFieldOffset( Runtime::Current()->GetClassLinker()->GetImagePointerSize()); for (size_t i = 0u; i < num_static_fields; ++i) { mirror::Object* ref = klass->GetFieldObject(field_offset); if (ref != nullptr) { if (ref->IsClass()) { result = result || PruneAppImageClassInternal(ref->AsClass(), &my_early_exit, visited); } else { mirror::Class* type = ref->GetClass(); result = result || PruneAppImageClassInternal(type, &my_early_exit, visited); if (!result) { // For non-class case, also go through all the types mentioned by it's fields' // references recursively to decide whether to keep this class. bool tmp = false; PruneObjectReferenceVisitor visitor(this, &my_early_exit, visited, &tmp); ref->VisitReferences(visitor, visitor); result = result || tmp; } } } field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(mirror::HeapReference)); } } result = result || PruneAppImageClassInternal(klass->GetSuperClass(), &my_early_exit, visited); // Remove the class if the dex file is not in the set of dex files. This happens for classes that // are from uses-library if there is no profile. b/30688277 mirror::DexCache* dex_cache = klass->GetDexCache(); if (dex_cache != nullptr) { result = result || dex_file_oat_index_map_.find(dex_cache->GetDexFile()) == dex_file_oat_index_map_.end(); } // Erase the element we stored earlier since we are exiting the function. auto it = visited->find(klass.Ptr()); DCHECK(it != visited->end()); visited->erase(it); // Only store result if it is true or none of the calls early exited due to circular // dependencies. If visited is empty then we are the root caller, in this case the cycle was in // a child call and we can remember the result. if (result == true || !my_early_exit || visited->empty()) { prune_class_memo_[klass.Ptr()] = result; } *early_exit |= my_early_exit; return result; } bool ImageWriter::KeepClass(ObjPtr klass) { if (klass == nullptr) { return false; } if (compile_app_image_ && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) { // Already in boot image, return true. return true; } std::string temp; if (!compiler_driver_.IsImageClass(klass->GetDescriptor(&temp))) { return false; } if (compile_app_image_) { // For app images, we need to prune boot loader classes that are not in the boot image since // these may have already been loaded when the app image is loaded. // Keep classes in the boot image space since we don't want to re-resolve these. return !PruneAppImageClass(klass); } return true; } class ImageWriter::PruneClassesVisitor : public ClassVisitor { public: PruneClassesVisitor(ImageWriter* image_writer, ObjPtr class_loader) : image_writer_(image_writer), class_loader_(class_loader), classes_to_prune_(), defined_class_count_(0u) { } bool operator()(ObjPtr klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { if (!image_writer_->KeepClass(klass.Ptr())) { classes_to_prune_.insert(klass.Ptr()); if (klass->GetClassLoader() == class_loader_) { ++defined_class_count_; } } return true; } size_t Prune() REQUIRES_SHARED(Locks::mutator_lock_) { ClassTable* class_table = Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader_); for (mirror::Class* klass : classes_to_prune_) { std::string storage; const char* descriptor = klass->GetDescriptor(&storage); bool result = class_table->Remove(descriptor); DCHECK(result); DCHECK(!class_table->Remove(descriptor)) << descriptor; } return defined_class_count_; } private: ImageWriter* const image_writer_; const ObjPtr class_loader_; std::unordered_set classes_to_prune_; size_t defined_class_count_; }; class ImageWriter::PruneClassLoaderClassesVisitor : public ClassLoaderVisitor { public: explicit PruneClassLoaderClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer), removed_class_count_(0) {} virtual void Visit(ObjPtr class_loader) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { PruneClassesVisitor classes_visitor(image_writer_, class_loader); ClassTable* class_table = Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader); class_table->Visit(classes_visitor); removed_class_count_ += classes_visitor.Prune(); // Record app image class loader. The fake boot class loader should not get registered // and we should end up with only one class loader for an app and none for boot image. if (class_loader != nullptr && class_table != nullptr) { DCHECK(class_loader_ == nullptr); class_loader_ = class_loader; } } size_t GetRemovedClassCount() const { return removed_class_count_; } ObjPtr GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) { return class_loader_; } private: ImageWriter* const image_writer_; size_t removed_class_count_; ObjPtr class_loader_; }; void ImageWriter::VisitClassLoaders(ClassLoaderVisitor* visitor) { WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); visitor->Visit(nullptr); // Visit boot class loader. Runtime::Current()->GetClassLinker()->VisitClassLoaders(visitor); } void ImageWriter::PruneAndPreloadDexCache(ObjPtr dex_cache, ObjPtr class_loader) { // To ensure deterministic contents of the hash-based arrays, each slot shall contain // the candidate with the lowest index. As we're processing entries in increasing index // order, this means trying to look up the entry for the current index if the slot is // empty or if it contains a higher index. Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); const DexFile& dex_file = *dex_cache->GetDexFile(); // Prune methods. mirror::MethodDexCacheType* resolved_methods = dex_cache->GetResolvedMethods(); dex::TypeIndex last_class_idx; // Initialized to invalid index. ObjPtr last_class = nullptr; for (size_t i = 0, num = dex_cache->GetDexFile()->NumMethodIds(); i != num; ++i) { uint32_t slot_idx = dex_cache->MethodSlotIndex(i); auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_methods, slot_idx, target_ptr_size_); uint32_t stored_index = pair.index; ArtMethod* method = pair.object; if (method != nullptr && i > stored_index) { continue; // Already checked. } // Check if the referenced class is in the image. Note that we want to check the referenced // class rather than the declaring class to preserve the semantics, i.e. using a MethodId // results in resolving the referenced class and that can for example throw OOME. const DexFile::MethodId& method_id = dex_file.GetMethodId(i); if (method_id.class_idx_ != last_class_idx) { last_class_idx = method_id.class_idx_; last_class = class_linker->LookupResolvedType( dex_file, last_class_idx, dex_cache, class_loader); if (last_class != nullptr && !KeepClass(last_class)) { last_class = nullptr; } } if (method == nullptr || i < stored_index) { if (last_class != nullptr) { const char* name = dex_file.StringDataByIdx(method_id.name_idx_); Signature signature = dex_file.GetMethodSignature(method_id); if (last_class->IsInterface()) { method = last_class->FindInterfaceMethod(name, signature, target_ptr_size_); } else { method = last_class->FindClassMethod(name, signature, target_ptr_size_); } if (method != nullptr) { // If the referenced class is in the image, the defining class must also be there. DCHECK(KeepClass(method->GetDeclaringClass())); dex_cache->SetResolvedMethod(i, method, target_ptr_size_); } } } else { DCHECK_EQ(i, stored_index); if (last_class == nullptr) { dex_cache->ClearResolvedMethod(stored_index, target_ptr_size_); } } } // Prune fields and make the contents of the field array deterministic. mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields(); last_class_idx = dex::TypeIndex(); // Initialized to invalid index. last_class = nullptr; for (size_t i = 0, end = dex_file.NumFieldIds(); i < end; ++i) { uint32_t slot_idx = dex_cache->FieldSlotIndex(i); auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, slot_idx, target_ptr_size_); uint32_t stored_index = pair.index; ArtField* field = pair.object; if (field != nullptr && i > stored_index) { continue; // Already checked. } // Check if the referenced class is in the image. Note that we want to check the referenced // class rather than the declaring class to preserve the semantics, i.e. using a FieldId // results in resolving the referenced class and that can for example throw OOME. const DexFile::FieldId& field_id = dex_file.GetFieldId(i); if (field_id.class_idx_ != last_class_idx) { last_class_idx = field_id.class_idx_; last_class = class_linker->LookupResolvedType( dex_file, last_class_idx, dex_cache, class_loader); if (last_class != nullptr && !KeepClass(last_class)) { last_class = nullptr; } } if (field == nullptr || i < stored_index) { if (last_class != nullptr) { const char* name = dex_file.StringDataByIdx(field_id.name_idx_); const char* type = dex_file.StringByTypeIdx(field_id.type_idx_); field = mirror::Class::FindField(Thread::Current(), last_class, name, type); if (field != nullptr) { // If the referenced class is in the image, the defining class must also be there. DCHECK(KeepClass(field->GetDeclaringClass())); dex_cache->SetResolvedField(i, field, target_ptr_size_); } } } else { DCHECK_EQ(i, stored_index); if (last_class == nullptr) { dex_cache->ClearResolvedField(stored_index, target_ptr_size_); } } } // Prune types and make the contents of the type array deterministic. // This is done after fields and methods as their lookup can touch the types array. for (size_t i = 0, end = dex_cache->GetDexFile()->NumTypeIds(); i < end; ++i) { dex::TypeIndex type_idx(i); uint32_t slot_idx = dex_cache->TypeSlotIndex(type_idx); mirror::TypeDexCachePair pair = dex_cache->GetResolvedTypes()[slot_idx].load(std::memory_order_relaxed); uint32_t stored_index = pair.index; ObjPtr klass = pair.object.Read(); if (klass == nullptr || i < stored_index) { klass = class_linker->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader); if (klass != nullptr) { DCHECK_EQ(dex_cache->GetResolvedType(type_idx), klass); stored_index = i; // For correct clearing below if not keeping the `klass`. } } else if (i == stored_index && !KeepClass(klass)) { dex_cache->ClearResolvedType(dex::TypeIndex(stored_index)); } } // Strings do not need pruning, but the contents of the string array must be deterministic. for (size_t i = 0, end = dex_cache->GetDexFile()->NumStringIds(); i < end; ++i) { dex::StringIndex string_idx(i); uint32_t slot_idx = dex_cache->StringSlotIndex(string_idx); mirror::StringDexCachePair pair = dex_cache->GetStrings()[slot_idx].load(std::memory_order_relaxed); uint32_t stored_index = pair.index; ObjPtr string = pair.object.Read(); if (string == nullptr || i < stored_index) { string = class_linker->LookupString(dex_file, string_idx, dex_cache); DCHECK(string == nullptr || dex_cache->GetResolvedString(string_idx) == string); } } } void ImageWriter::PruneNonImageClasses() { Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); Thread* self = Thread::Current(); ScopedAssertNoThreadSuspension sa(__FUNCTION__); // Prune uses-library dex caches. Only prune the uses-library dex caches since we want to make // sure the other ones don't get unloaded before the OatWriter runs. class_linker->VisitClassTables( [&](ClassTable* table) REQUIRES_SHARED(Locks::mutator_lock_) { table->RemoveStrongRoots( [&](GcRoot root) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr obj = root.Read(); if (obj->IsDexCache()) { // Return true if the dex file is not one of the ones in the map. return dex_file_oat_index_map_.find(obj->AsDexCache()->GetDexFile()) == dex_file_oat_index_map_.end(); } // Return false to avoid removing. return false; }); }); // Remove the undesired classes from the class roots. ObjPtr class_loader; { PruneClassLoaderClassesVisitor class_loader_visitor(this); VisitClassLoaders(&class_loader_visitor); VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes"; class_loader = class_loader_visitor.GetClassLoader(); DCHECK_EQ(class_loader != nullptr, compile_app_image_); } // Clear references to removed classes from the DexCaches. std::vector> dex_caches; { ReaderMutexLock mu2(self, *Locks::dex_lock_); dex_caches.reserve(class_linker->GetDexCachesData().size()); for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { if (self->IsJWeakCleared(data.weak_root)) { continue; } dex_caches.push_back(self->DecodeJObject(data.weak_root)->AsDexCache()); } } for (ObjPtr dex_cache : dex_caches) { PruneAndPreloadDexCache(dex_cache, class_loader); } // Drop the array class cache in the ClassLinker, as these are roots holding those classes live. class_linker->DropFindArrayClassCache(); // Clear to save RAM. prune_class_memo_.clear(); } void ImageWriter::CheckNonImageClassesRemoved() { if (compiler_driver_.GetImageClasses() != nullptr) { auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { if (obj->IsClass() && !IsInBootImage(obj)) { Class* klass = obj->AsClass(); if (!KeepClass(klass)) { DumpImageClasses(); std::string temp; CHECK(KeepClass(klass)) << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass); } } }; gc::Heap* heap = Runtime::Current()->GetHeap(); heap->VisitObjects(visitor); } } void ImageWriter::DumpImageClasses() { auto image_classes = compiler_driver_.GetImageClasses(); CHECK(image_classes != nullptr); for (const std::string& image_class : *image_classes) { LOG(INFO) << " " << image_class; } } mirror::String* ImageWriter::FindInternedString(mirror::String* string) { Thread* const self = Thread::Current(); for (const ImageInfo& image_info : image_infos_) { ObjPtr const found = image_info.intern_table_->LookupStrong(self, string); DCHECK(image_info.intern_table_->LookupWeak(self, string) == nullptr) << string->ToModifiedUtf8(); if (found != nullptr) { return found.Ptr(); } } if (compile_app_image_) { Runtime* const runtime = Runtime::Current(); ObjPtr found = runtime->GetInternTable()->LookupStrong(self, string); // If we found it in the runtime intern table it could either be in the boot image or interned // during app image compilation. If it was in the boot image return that, otherwise return null // since it belongs to another image space. if (found != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(found.Ptr())) { return found.Ptr(); } DCHECK(runtime->GetInternTable()->LookupWeak(self, string) == nullptr) << string->ToModifiedUtf8(); } return nullptr; } ObjectArray* ImageWriter::CreateImageRoots(size_t oat_index) const { Runtime* runtime = Runtime::Current(); ClassLinker* class_linker = runtime->GetClassLinker(); Thread* self = Thread::Current(); StackHandleScope<3> hs(self); Handle object_array_class(hs.NewHandle( class_linker->FindSystemClass(self, "[Ljava/lang/Object;"))); std::unordered_set image_dex_files; for (auto& pair : dex_file_oat_index_map_) { const DexFile* image_dex_file = pair.first; size_t image_oat_index = pair.second; if (oat_index == image_oat_index) { image_dex_files.insert(image_dex_file); } } // build an Object[] of all the DexCaches used in the source_space_. // Since we can't hold the dex lock when allocating the dex_caches // ObjectArray, we lock the dex lock twice, first to get the number // of dex caches first and then lock it again to copy the dex // caches. We check that the number of dex caches does not change. size_t dex_cache_count = 0; { ReaderMutexLock mu(self, *Locks::dex_lock_); // Count number of dex caches not in the boot image. for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { ObjPtr dex_cache = ObjPtr::DownCast(self->DecodeJObject(data.weak_root)); if (dex_cache == nullptr) { continue; } const DexFile* dex_file = dex_cache->GetDexFile(); if (!IsInBootImage(dex_cache.Ptr())) { dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; } } } Handle> dex_caches( hs.NewHandle(ObjectArray::Alloc(self, object_array_class.Get(), dex_cache_count))); CHECK(dex_caches != nullptr) << "Failed to allocate a dex cache array."; { ReaderMutexLock mu(self, *Locks::dex_lock_); size_t non_image_dex_caches = 0; // Re-count number of non image dex caches. for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { ObjPtr dex_cache = ObjPtr::DownCast(self->DecodeJObject(data.weak_root)); if (dex_cache == nullptr) { continue; } const DexFile* dex_file = dex_cache->GetDexFile(); if (!IsInBootImage(dex_cache.Ptr())) { non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; } } CHECK_EQ(dex_cache_count, non_image_dex_caches) << "The number of non-image dex caches changed."; size_t i = 0; for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { ObjPtr dex_cache = ObjPtr::DownCast(self->DecodeJObject(data.weak_root)); if (dex_cache == nullptr) { continue; } const DexFile* dex_file = dex_cache->GetDexFile(); if (!IsInBootImage(dex_cache.Ptr()) && image_dex_files.find(dex_file) != image_dex_files.end()) { dex_caches->Set(i, dex_cache.Ptr()); ++i; } } } // build an Object[] of the roots needed to restore the runtime int32_t image_roots_size = ImageHeader::NumberOfImageRoots(compile_app_image_); auto image_roots(hs.NewHandle( ObjectArray::Alloc(self, object_array_class.Get(), image_roots_size))); image_roots->Set(ImageHeader::kDexCaches, dex_caches.Get()); image_roots->Set(ImageHeader::kClassRoots, class_linker->GetClassRoots()); // image_roots[ImageHeader::kClassLoader] will be set later for app image. static_assert(ImageHeader::kClassLoader + 1u == ImageHeader::kImageRootsMax, "Class loader should be the last image root."); for (int32_t i = 0; i < ImageHeader::kImageRootsMax - 1; ++i) { CHECK(image_roots->Get(i) != nullptr); } return image_roots.Get(); } mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index) { if (obj == nullptr || IsInBootImage(obj)) { // Object is null or already in the image, there is no work to do. return obj; } if (!IsImageBinSlotAssigned(obj)) { // We want to intern all strings but also assign offsets for the source string. Since the // pruning phase has already happened, if we intern a string to one in the image we still // end up copying an unreachable string. if (obj->IsString()) { // Need to check if the string is already interned in another image info so that we don't have // the intern tables of two different images contain the same string. mirror::String* interned = FindInternedString(obj->AsString()); if (interned == nullptr) { // Not in another image space, insert to our table. interned = GetImageInfo(oat_index).intern_table_->InternStrongImageString(obj->AsString()).Ptr(); DCHECK_EQ(interned, obj); } } else if (obj->IsDexCache()) { oat_index = GetOatIndexForDexCache(obj->AsDexCache()); } else if (obj->IsClass()) { // Visit and assign offsets for fields and field arrays. mirror::Class* as_klass = obj->AsClass(); mirror::DexCache* dex_cache = as_klass->GetDexCache(); DCHECK(!as_klass->IsErroneous()) << as_klass->GetStatus(); if (compile_app_image_) { // Extra sanity, no boot loader classes should be left! CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass(); } LengthPrefixedArray* fields[] = { as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(), }; // Overwrite the oat index value since the class' dex cache is more accurate of where it // belongs. oat_index = GetOatIndexForDexCache(dex_cache); ImageInfo& image_info = GetImageInfo(oat_index); if (!compile_app_image_) { // Note: Avoid locking to prevent lock order violations from root visiting; // image_info.class_table_ is only accessed from the image writer. image_info.class_table_->InsertWithoutLocks(as_klass); } for (LengthPrefixedArray* cur_fields : fields) { // Total array length including header. if (cur_fields != nullptr) { const size_t header_size = LengthPrefixedArray::ComputeSize(0); // Forward the entire array at once. auto it = native_object_relocations_.find(cur_fields); CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields << " already forwarded"; size_t& offset = image_info.bin_slot_sizes_[kBinArtField]; DCHECK(!IsInBootImage(cur_fields)); native_object_relocations_.emplace( cur_fields, NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeArtFieldArray }); offset += header_size; // Forward individual fields so that we can quickly find where they belong. for (size_t i = 0, count = cur_fields->size(); i < count; ++i) { // Need to forward arrays separate of fields. ArtField* field = &cur_fields->At(i); auto it2 = native_object_relocations_.find(field); CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i << " already assigned " << field->PrettyField() << " static=" << field->IsStatic(); DCHECK(!IsInBootImage(field)); native_object_relocations_.emplace( field, NativeObjectRelocation { oat_index, offset, kNativeObjectRelocationTypeArtField }); offset += sizeof(ArtField); } } } // Visit and assign offsets for methods. size_t num_methods = as_klass->NumMethods(); if (num_methods != 0) { bool any_dirty = false; for (auto& m : as_klass->GetMethods(target_ptr_size_)) { if (WillMethodBeDirty(&m)) { any_dirty = true; break; } } NativeObjectRelocationType type = any_dirty ? kNativeObjectRelocationTypeArtMethodDirty : kNativeObjectRelocationTypeArtMethodClean; Bin bin_type = BinTypeForNativeRelocationType(type); // Forward the entire array at once, but header first. const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); const size_t method_size = ArtMethod::Size(target_ptr_size_); const size_t header_size = LengthPrefixedArray::ComputeSize(0, method_size, method_alignment); LengthPrefixedArray* array = as_klass->GetMethodsPtr(); auto it = native_object_relocations_.find(array); CHECK(it == native_object_relocations_.end()) << "Method array " << array << " already forwarded"; size_t& offset = image_info.bin_slot_sizes_[bin_type]; DCHECK(!IsInBootImage(array)); native_object_relocations_.emplace(array, NativeObjectRelocation { oat_index, offset, any_dirty ? kNativeObjectRelocationTypeArtMethodArrayDirty : kNativeObjectRelocationTypeArtMethodArrayClean }); offset += header_size; for (auto& m : as_klass->GetMethods(target_ptr_size_)) { AssignMethodOffset(&m, type, oat_index); } (any_dirty ? dirty_methods_ : clean_methods_) += num_methods; } // Assign offsets for all runtime methods in the IMT since these may hold conflict tables // live. if (as_klass->ShouldHaveImt()) { ImTable* imt = as_klass->GetImt(target_ptr_size_); if (TryAssignImTableOffset(imt, oat_index)) { // Since imt's can be shared only do this the first time to not double count imt method // fixups. for (size_t i = 0; i < ImTable::kSize; ++i) { ArtMethod* imt_method = imt->Get(i, target_ptr_size_); DCHECK(imt_method != nullptr); if (imt_method->IsRuntimeMethod() && !IsInBootImage(imt_method) && !NativeRelocationAssigned(imt_method)) { AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index); } } } } } else if (obj->IsClassLoader()) { // Register the class loader if it has a class table. // The fake boot class loader should not get registered and we should end up with only one // class loader. mirror::ClassLoader* class_loader = obj->AsClassLoader(); if (class_loader->GetClassTable() != nullptr) { DCHECK(compile_app_image_); DCHECK(class_loaders_.empty()); class_loaders_.insert(class_loader); ImageInfo& image_info = GetImageInfo(oat_index); // Note: Avoid locking to prevent lock order violations from root visiting; // image_info.class_table_ table is only accessed from the image writer // and class_loader->GetClassTable() is iterated but not modified. image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable()); } } AssignImageBinSlot(obj, oat_index); work_stack.emplace(obj, oat_index); } if (obj->IsString()) { // Always return the interned string if there exists one. mirror::String* interned = FindInternedString(obj->AsString()); if (interned != nullptr) { return interned; } } return obj; } bool ImageWriter::NativeRelocationAssigned(void* ptr) const { return native_object_relocations_.find(ptr) != native_object_relocations_.end(); } bool ImageWriter::TryAssignImTableOffset(ImTable* imt, size_t oat_index) { // No offset, or already assigned. if (imt == nullptr || IsInBootImage(imt) || NativeRelocationAssigned(imt)) { return false; } // If the method is a conflict method we also want to assign the conflict table offset. ImageInfo& image_info = GetImageInfo(oat_index); const size_t size = ImTable::SizeInBytes(target_ptr_size_); native_object_relocations_.emplace( imt, NativeObjectRelocation { oat_index, image_info.bin_slot_sizes_[kBinImTable], kNativeObjectRelocationTypeIMTable}); image_info.bin_slot_sizes_[kBinImTable] += size; return true; } void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { // No offset, or already assigned. if (table == nullptr || NativeRelocationAssigned(table)) { return; } CHECK(!IsInBootImage(table)); // If the method is a conflict method we also want to assign the conflict table offset. ImageInfo& image_info = GetImageInfo(oat_index); const size_t size = table->ComputeSize(target_ptr_size_); native_object_relocations_.emplace( table, NativeObjectRelocation { oat_index, image_info.bin_slot_sizes_[kBinIMTConflictTable], kNativeObjectRelocationTypeIMTConflictTable}); image_info.bin_slot_sizes_[kBinIMTConflictTable] += size; } void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type, size_t oat_index) { DCHECK(!IsInBootImage(method)); CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned " << ArtMethod::PrettyMethod(method); if (method->IsRuntimeMethod()) { TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index); } ImageInfo& image_info = GetImageInfo(oat_index); size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)]; native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type }); offset += ArtMethod::Size(target_ptr_size_); } void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) { DCHECK(!IsInBootImage(obj)); CHECK(obj != nullptr); // We know the bin slot, and the total bin sizes for all objects by now, // so calculate the object's final image offset. DCHECK(IsImageBinSlotAssigned(obj)); BinSlot bin_slot = GetImageBinSlot(obj); // Change the lockword from a bin slot into an offset AssignImageOffset(obj, bin_slot); } class ImageWriter::VisitReferencesVisitor { public: VisitReferencesVisitor(ImageWriter* image_writer, WorkStack* work_stack, size_t oat_index) : image_writer_(image_writer), work_stack_(work_stack), oat_index_(oat_index) {} // Fix up separately since we also need to fix up method entrypoints. ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference* root) const REQUIRES_SHARED(Locks::mutator_lock_) { if (!root->IsNull()) { VisitRoot(root); } } ALWAYS_INLINE void VisitRoot(mirror::CompressedReference* root) const REQUIRES_SHARED(Locks::mutator_lock_) { root->Assign(VisitReference(root->AsMirrorPtr())); } ALWAYS_INLINE void operator() (ObjPtr obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const REQUIRES_SHARED(Locks::mutator_lock_) { mirror::Object* ref = obj->GetFieldObject(offset); obj->SetFieldObject(offset, VisitReference(ref)); } ALWAYS_INLINE void operator() (ObjPtr klass ATTRIBUTE_UNUSED, ObjPtr ref) const REQUIRES_SHARED(Locks::mutator_lock_) { operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false); } private: mirror::Object* VisitReference(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) { return image_writer_->TryAssignBinSlot(*work_stack_, ref, oat_index_); } ImageWriter* const image_writer_; WorkStack* const work_stack_; const size_t oat_index_; }; class ImageWriter::GetRootsVisitor : public RootVisitor { public: explicit GetRootsVisitor(std::vector* roots) : roots_(roots) {} void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { roots_->push_back(*roots[i]); } } void VisitRoots(mirror::CompressedReference** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { roots_->push_back(roots[i]->AsMirrorPtr()); } } private: std::vector* const roots_; }; void ImageWriter::ProcessWorkStack(WorkStack* work_stack) { while (!work_stack->empty()) { std::pair pair(work_stack->top()); work_stack->pop(); VisitReferencesVisitor visitor(this, work_stack, /*oat_index*/ pair.second); // Walk references and assign bin slots for them. pair.first->VisitReferences( visitor, visitor); } } void ImageWriter::CalculateNewObjectOffsets() { Thread* const self = Thread::Current(); VariableSizedHandleScope handles(self); std::vector>> image_roots; for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) { image_roots.push_back(handles.NewHandle(CreateImageRoots(i))); } Runtime* const runtime = Runtime::Current(); gc::Heap* const heap = runtime->GetHeap(); // Leave space for the header, but do not write it yet, we need to // know where image_roots is going to end up image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); // Write the image runtime methods. image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod(); image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod(); image_methods_[ImageHeader::kImtUnimplementedMethod] = runtime->GetImtUnimplementedMethod(); image_methods_[ImageHeader::kSaveAllCalleeSavesMethod] = runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves); image_methods_[ImageHeader::kSaveRefsOnlyMethod] = runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly); image_methods_[ImageHeader::kSaveRefsAndArgsMethod] = runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs); image_methods_[ImageHeader::kSaveEverythingMethod] = runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything); // Visit image methods first to have the main runtime methods in the first image. for (auto* m : image_methods_) { CHECK(m != nullptr); CHECK(m->IsRuntimeMethod()); DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image"; if (!IsInBootImage(m)) { AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex()); } } // Deflate monitors before we visit roots since deflating acquires the monitor lock. Acquiring // this lock while holding other locks may cause lock order violations. { auto deflate_monitor = [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { Monitor::Deflate(Thread::Current(), obj); }; heap->VisitObjects(deflate_monitor); } // Work list of for objects. Everything on the stack must already be // assigned a bin slot. WorkStack work_stack; // Special case interned strings to put them in the image they are likely to be resolved from. for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) { auto it = dex_file_oat_index_map_.find(dex_file); DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation(); const size_t oat_index = it->second; InternTable* const intern_table = runtime->GetInternTable(); for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) { uint32_t utf16_length; const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &utf16_length); mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data).Ptr(); TryAssignBinSlot(work_stack, string, oat_index); } } // Get the GC roots and then visit them separately to avoid lock violations since the root visitor // visits roots while holding various locks. { std::vector roots; GetRootsVisitor root_visitor(&roots); runtime->VisitRoots(&root_visitor); for (mirror::Object* obj : roots) { TryAssignBinSlot(work_stack, obj, GetDefaultOatIndex()); } } ProcessWorkStack(&work_stack); // For app images, there may be objects that are only held live by the by the boot image. One // example is finalizer references. Forward these objects so that EnsureBinSlotAssignedCallback // does not fail any checks. TODO: We should probably avoid copying these objects. if (compile_app_image_) { for (gc::space::ImageSpace* space : heap->GetBootImageSpaces()) { DCHECK(space->IsImageSpace()); gc::accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); live_bitmap->VisitMarkedRange(reinterpret_cast(space->Begin()), reinterpret_cast(space->Limit()), [this, &work_stack](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { VisitReferencesVisitor visitor(this, &work_stack, GetDefaultOatIndex()); // Visit all references and try to assign bin slots for them (calls TryAssignBinSlot). obj->VisitReferences( visitor, visitor); }); } // Process the work stack in case anything was added by TryAssignBinSlot. ProcessWorkStack(&work_stack); // Store the class loader in the class roots. CHECK_EQ(class_loaders_.size(), 1u); CHECK_EQ(image_roots.size(), 1u); CHECK(*class_loaders_.begin() != nullptr); image_roots[0]->Set(ImageHeader::kClassLoader, *class_loaders_.begin()); } // Verify that all objects have assigned image bin slots. { auto ensure_bin_slots_assigned = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) { CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj; } }; heap->VisitObjects(ensure_bin_slots_assigned); } // Calculate size of the dex cache arrays slot and prepare offsets. PrepareDexCacheArraySlots(); // Calculate the sizes of the intern tables, class tables, and fixup tables. for (ImageInfo& image_info : image_infos_) { // Calculate how big the intern table will be after being serialized. InternTable* const intern_table = image_info.intern_table_.get(); CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings"; if (intern_table->StrongSize() != 0u) { image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr); } // Calculate the size of the class table. ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); DCHECK_EQ(image_info.class_table_->NumReferencedZygoteClasses(), 0u); if (image_info.class_table_->NumReferencedNonZygoteClasses() != 0u) { image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr); } } // Calculate bin slot offsets. for (ImageInfo& image_info : image_infos_) { size_t bin_offset = image_objects_offset_begin_; for (size_t i = 0; i != kBinSize; ++i) { switch (i) { case kBinArtMethodClean: case kBinArtMethodDirty: { bin_offset = RoundUp(bin_offset, method_alignment); break; } case kBinDexCacheArray: bin_offset = RoundUp(bin_offset, DexCacheArraysLayout::Alignment(target_ptr_size_)); break; case kBinImTable: case kBinIMTConflictTable: { bin_offset = RoundUp(bin_offset, static_cast(target_ptr_size_)); break; } default: { // Normal alignment. } } image_info.bin_slot_offsets_[i] = bin_offset; bin_offset += image_info.bin_slot_sizes_[i]; } // NOTE: There may be additional padding between the bin slots and the intern table. DCHECK_EQ(image_info.image_end_, GetBinSizeSum(image_info, kBinMirrorCount) + image_objects_offset_begin_); } // Calculate image offsets. size_t image_offset = 0; for (ImageInfo& image_info : image_infos_) { image_info.image_begin_ = global_image_begin_ + image_offset; image_info.image_offset_ = image_offset; ImageSection unused_sections[ImageHeader::kSectionCount]; image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize); // There should be no gaps until the next image. image_offset += image_info.image_size_; } // Transform each object's bin slot into an offset which will be used to do the final copy. { auto unbin_objects_into_offset = [&](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { if (!IsInBootImage(obj)) { UnbinObjectsIntoOffset(obj); } }; heap->VisitObjects(unbin_objects_into_offset); } size_t i = 0; for (ImageInfo& image_info : image_infos_) { image_info.image_roots_address_ = PointerToLowMemUInt32(GetImageAddress(image_roots[i].Get())); i++; } // Update the native relocations by adding their bin sums. for (auto& pair : native_object_relocations_) { NativeObjectRelocation& relocation = pair.second; Bin bin_type = BinTypeForNativeRelocationType(relocation.type); ImageInfo& image_info = GetImageInfo(relocation.oat_index); relocation.offset += image_info.bin_slot_offsets_[bin_type]; } } size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const { DCHECK(out_sections != nullptr); // Do not round up any sections here that are represented by the bins since it will break // offsets. // Objects section ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects]; *objects_section = ImageSection(0u, image_end_); // Add field section. ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields]; *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]); CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset()); // Add method section. ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; *methods_section = ImageSection( bin_slot_offsets_[kBinArtMethodClean], bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); // IMT section. ImageSection* imt_section = &out_sections[ImageHeader::kSectionImTables]; *imt_section = ImageSection(bin_slot_offsets_[kBinImTable], bin_slot_sizes_[kBinImTable]); // Conflict tables section. ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], bin_slot_sizes_[kBinIMTConflictTable]); // Runtime methods section. ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods]; *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod], bin_slot_sizes_[kBinRuntimeMethod]); // Add dex cache arrays section. ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray], bin_slot_sizes_[kBinDexCacheArray]); // Round up to the alignment the string table expects. See HashSet::WriteToMemory. size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t)); // Calculate the size of the interned strings. ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); cur_pos = interned_strings_section->End(); // Round up to the alignment the class table expects. See HashSet::WriteToMemory. cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the class table section. ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; *class_table_section = ImageSection(cur_pos, class_table_bytes_); cur_pos = class_table_section->End(); // Image end goes right before the start of the image bitmap. return cur_pos; } void ImageWriter::CreateHeader(size_t oat_index) { ImageInfo& image_info = GetImageInfo(oat_index); const uint8_t* oat_file_begin = image_info.oat_file_begin_; const uint8_t* oat_file_end = oat_file_begin + image_info.oat_loaded_size_; const uint8_t* oat_data_end = image_info.oat_data_begin_ + image_info.oat_size_; // Create the image sections. ImageSection sections[ImageHeader::kSectionCount]; const size_t image_end = image_info.CreateImageSections(sections); // Finally bitmap section. const size_t bitmap_bytes = image_info.image_bitmap_->Size(); auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap]; *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize)); if (VLOG_IS_ON(compiler)) { LOG(INFO) << "Creating header for " << oat_filenames_[oat_index]; size_t idx = 0; for (const ImageSection& section : sections) { LOG(INFO) << static_cast(idx) << " " << section; ++idx; } LOG(INFO) << "Methods: clean=" << clean_methods_ << " dirty=" << dirty_methods_; LOG(INFO) << "Image roots address=" << std::hex << image_info.image_roots_address_ << std::dec; LOG(INFO) << "Image begin=" << std::hex << reinterpret_cast(global_image_begin_) << " Image offset=" << image_info.image_offset_ << std::dec; LOG(INFO) << "Oat file begin=" << std::hex << reinterpret_cast(oat_file_begin) << " Oat data begin=" << reinterpret_cast(image_info.oat_data_begin_) << " Oat data end=" << reinterpret_cast(oat_data_end) << " Oat file end=" << reinterpret_cast(oat_file_end); } // Store boot image info for app image so that we can relocate. uint32_t boot_image_begin = 0; uint32_t boot_image_end = 0; uint32_t boot_oat_begin = 0; uint32_t boot_oat_end = 0; gc::Heap* const heap = Runtime::Current()->GetHeap(); heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end); // Create the header, leave 0 for data size since we will fill this in as we are writing the // image. new (image_info.image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_info.image_begin_), image_end, sections, image_info.image_roots_address_, image_info.oat_checksum_, PointerToLowMemUInt32(oat_file_begin), PointerToLowMemUInt32(image_info.oat_data_begin_), PointerToLowMemUInt32(oat_data_end), PointerToLowMemUInt32(oat_file_end), boot_image_begin, boot_image_end - boot_image_begin, boot_oat_begin, boot_oat_end - boot_oat_begin, static_cast(target_ptr_size_), compile_pic_, /*is_pic*/compile_app_image_, image_storage_mode_, /*data_size*/0u); } ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) { auto it = native_object_relocations_.find(method); CHECK(it != native_object_relocations_.end()) << ArtMethod::PrettyMethod(method) << " @ " << method; size_t oat_index = GetOatIndex(method->GetDexCache()); ImageInfo& image_info = GetImageInfo(oat_index); CHECK_GE(it->second.offset, image_info.image_end_) << "ArtMethods should be after Objects"; return reinterpret_cast(image_info.image_begin_ + it->second.offset); } class ImageWriter::FixupRootVisitor : public RootVisitor { public: explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) { } void VisitRoots(mirror::Object*** roots ATTRIBUTE_UNUSED, size_t count ATTRIBUTE_UNUSED, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { LOG(FATAL) << "Unsupported"; } void VisitRoots(mirror::CompressedReference** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { image_writer_->CopyReference(roots[i], roots[i]->AsMirrorPtr()); } } private: ImageWriter* const image_writer_; }; void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) { for (size_t i = 0; i < ImTable::kSize; ++i) { ArtMethod* method = orig->Get(i, target_ptr_size_); void** address = reinterpret_cast(copy->AddressOfElement(i, target_ptr_size_)); CopyAndFixupPointer(address, method); DCHECK_EQ(copy->Get(i, target_ptr_size_), NativeLocationInImage(method)); } } void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { const size_t count = orig->NumEntries(target_ptr_size_); for (size_t i = 0; i < count; ++i) { ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_); ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_); CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_), interface_method); CopyAndFixupPointer(copy->AddressOfImplementationMethod(i, target_ptr_size_), implementation_method); DCHECK_EQ(copy->GetInterfaceMethod(i, target_ptr_size_), NativeLocationInImage(interface_method)); DCHECK_EQ(copy->GetImplementationMethod(i, target_ptr_size_), NativeLocationInImage(implementation_method)); } } void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { const ImageInfo& image_info = GetImageInfo(oat_index); // Copy ArtFields and methods to their locations and update the array for convenience. for (auto& pair : native_object_relocations_) { NativeObjectRelocation& relocation = pair.second; // Only work with fields and methods that are in the current oat file. if (relocation.oat_index != oat_index) { continue; } auto* dest = image_info.image_->Begin() + relocation.offset; DCHECK_GE(dest, image_info.image_->Begin() + image_info.image_end_); DCHECK(!IsInBootImage(pair.first)); switch (relocation.type) { case kNativeObjectRelocationTypeArtField: { memcpy(dest, pair.first, sizeof(ArtField)); CopyReference( reinterpret_cast(dest)->GetDeclaringClassAddressWithoutBarrier(), reinterpret_cast(pair.first)->GetDeclaringClass().Ptr()); break; } case kNativeObjectRelocationTypeRuntimeMethod: case kNativeObjectRelocationTypeArtMethodClean: case kNativeObjectRelocationTypeArtMethodDirty: { CopyAndFixupMethod(reinterpret_cast(pair.first), reinterpret_cast(dest), image_info); break; } // For arrays, copy just the header since the elements will get copied by their corresponding // relocations. case kNativeObjectRelocationTypeArtFieldArray: { memcpy(dest, pair.first, LengthPrefixedArray::ComputeSize(0)); break; } case kNativeObjectRelocationTypeArtMethodArrayClean: case kNativeObjectRelocationTypeArtMethodArrayDirty: { size_t size = ArtMethod::Size(target_ptr_size_); size_t alignment = ArtMethod::Alignment(target_ptr_size_); memcpy(dest, pair.first, LengthPrefixedArray::ComputeSize(0, size, alignment)); // Clear padding to avoid non-deterministic data in the image (and placate valgrind). reinterpret_cast*>(dest)->ClearPadding(size, alignment); break; } case kNativeObjectRelocationTypeDexCacheArray: // Nothing to copy here, everything is done in FixupDexCache(). break; case kNativeObjectRelocationTypeIMTable: { ImTable* orig_imt = reinterpret_cast(pair.first); ImTable* dest_imt = reinterpret_cast(dest); CopyAndFixupImTable(orig_imt, dest_imt); break; } case kNativeObjectRelocationTypeIMTConflictTable: { auto* orig_table = reinterpret_cast(pair.first); CopyAndFixupImtConflictTable( orig_table, new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_)); break; } } } // Fixup the image method roots. auto* image_header = reinterpret_cast(image_info.image_->Begin()); for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) { ArtMethod* method = image_methods_[i]; CHECK(method != nullptr); if (!IsInBootImage(method)) { method = NativeLocationInImage(method); } image_header->SetImageMethod(static_cast(i), method); } FixupRootVisitor root_visitor(this); // Write the intern table into the image. if (image_info.intern_table_bytes_ > 0) { const ImageSection& intern_table_section = image_header->GetImageSection( ImageHeader::kSectionInternedStrings); InternTable* const intern_table = image_info.intern_table_.get(); uint8_t* const intern_table_memory_ptr = image_info.image_->Begin() + intern_table_section.Offset(); const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr); CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_); // Fixup the pointers in the newly written intern table to contain image addresses. InternTable temp_intern_table; // Note that we require that ReadFromMemory does not make an internal copy of the elements so that // the VisitRoots() will update the memory directly rather than the copies. // This also relies on visit roots not doing any verification which could fail after we update // the roots to be the image addresses. temp_intern_table.AddTableFromMemory(intern_table_memory_ptr); CHECK_EQ(temp_intern_table.Size(), intern_table->Size()); temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots); } // Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple // class loaders. Writing multiple class tables into the image is currently unsupported. if (image_info.class_table_bytes_ > 0u) { const ImageSection& class_table_section = image_header->GetImageSection( ImageHeader::kSectionClassTable); uint8_t* const class_table_memory_ptr = image_info.image_->Begin() + class_table_section.Offset(); ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); ClassTable* table = image_info.class_table_.get(); CHECK(table != nullptr); const size_t class_table_bytes = table->WriteToMemory(class_table_memory_ptr); CHECK_EQ(class_table_bytes, image_info.class_table_bytes_); // Fixup the pointers in the newly written class table to contain image addresses. See // above comment for intern tables. ClassTable temp_class_table; temp_class_table.ReadFromMemory(class_table_memory_ptr); CHECK_EQ(temp_class_table.NumReferencedZygoteClasses(), table->NumReferencedNonZygoteClasses() + table->NumReferencedZygoteClasses()); UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown)); temp_class_table.VisitRoots(visitor); } } void ImageWriter::CopyAndFixupObjects() { auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(obj != nullptr); CopyAndFixupObject(obj); }; Runtime::Current()->GetHeap()->VisitObjects(visitor); // Fix up the object previously had hash codes. for (const auto& hash_pair : saved_hashcode_map_) { Object* obj = hash_pair.first; DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0U); obj->SetLockWord(LockWord::FromHashCode(hash_pair.second, 0U), false); } saved_hashcode_map_.clear(); } void ImageWriter::FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass, Bin array_type) { CHECK(klass->IsArrayClass()); CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr; // Fixup int and long pointers for the ArtMethod or ArtField arrays. const size_t num_elements = arr->GetLength(); dst->SetClass(GetImageAddress(arr->GetClass())); auto* dest_array = down_cast(dst); for (size_t i = 0, count = num_elements; i < count; ++i) { void* elem = arr->GetElementPtrSize(i, target_ptr_size_); if (kIsDebugBuild && elem != nullptr && !IsInBootImage(elem)) { auto it = native_object_relocations_.find(elem); if (UNLIKELY(it == native_object_relocations_.end())) { if (it->second.IsArtMethodRelocation()) { auto* method = reinterpret_cast(elem); LOG(FATAL) << "No relocation entry for ArtMethod " << method->PrettyMethod() << " @ " << method << " idx=" << i << "/" << num_elements << " with declaring class " << Class::PrettyClass(method->GetDeclaringClass()); } else { CHECK_EQ(array_type, kBinArtField); auto* field = reinterpret_cast(elem); LOG(FATAL) << "No relocation entry for ArtField " << field->PrettyField() << " @ " << field << " idx=" << i << "/" << num_elements << " with declaring class " << Class::PrettyClass(field->GetDeclaringClass()); } UNREACHABLE(); } } CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem); } } void ImageWriter::CopyAndFixupObject(Object* obj) { if (IsInBootImage(obj)) { return; } size_t offset = GetImageOffset(obj); size_t oat_index = GetOatIndex(obj); ImageInfo& image_info = GetImageInfo(oat_index); auto* dst = reinterpret_cast(image_info.image_->Begin() + offset); DCHECK_LT(offset, image_info.image_end_); const auto* src = reinterpret_cast(obj); image_info.image_bitmap_->Set(dst); // Mark the obj as live. const size_t n = obj->SizeOf(); DCHECK_LE(offset + n, image_info.image_->Size()); memcpy(dst, src, n); // Write in a hash code of objects which have inflated monitors or a hash code in their monitor // word. const auto it = saved_hashcode_map_.find(obj); dst->SetLockWord(it != saved_hashcode_map_.end() ? LockWord::FromHashCode(it->second, 0u) : LockWord::Default(), false); if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) { // Treat all of the objects in the image as marked to avoid unnecessary dirty pages. This is // safe since we mark all of the objects that may reference non immune objects as gray. CHECK(dst->AtomicSetMarkBit(0, 1)); } FixupObject(obj, dst); } // Rewrite all the references in the copied object to point to their image address equivalent class ImageWriter::FixupVisitor { public: FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) { } // Ignore class roots since we don't have a way to map them to the destination. These are handled // with other logic. void VisitRootIfNonNull(mirror::CompressedReference* root ATTRIBUTE_UNUSED) const {} void VisitRoot(mirror::CompressedReference* root ATTRIBUTE_UNUSED) const {} void operator()(ObjPtr obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { ObjPtr ref = obj->GetFieldObject(offset); // Copy the reference and record the fixup if necessary. image_writer_->CopyReference( copy_->GetFieldObjectReferenceAddr(offset), ref.Ptr()); } // java.lang.ref.Reference visitor. void operator()(ObjPtr klass ATTRIBUTE_UNUSED, ObjPtr ref) const REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false); } protected: ImageWriter* const image_writer_; mirror::Object* const copy_; }; class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor { public: FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) { } void operator()(ObjPtr obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { DCHECK(obj->IsClass()); FixupVisitor::operator()(obj, offset, /*is_static*/false); } void operator()(ObjPtr klass ATTRIBUTE_UNUSED, ObjPtr ref ATTRIBUTE_UNUSED) const REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) { LOG(FATAL) << "Reference not expected here."; } }; uintptr_t ImageWriter::NativeOffsetInImage(void* obj) { DCHECK(obj != nullptr); DCHECK(!IsInBootImage(obj)); auto it = native_object_relocations_.find(obj); CHECK(it != native_object_relocations_.end()) << obj << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces(); const NativeObjectRelocation& relocation = it->second; return relocation.offset; } template std::string PrettyPrint(T* ptr) REQUIRES_SHARED(Locks::mutator_lock_) { std::ostringstream oss; oss << ptr; return oss.str(); } template <> std::string PrettyPrint(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { return ArtMethod::PrettyMethod(method); } template T* ImageWriter::NativeLocationInImage(T* obj) { if (obj == nullptr || IsInBootImage(obj)) { return obj; } else { auto it = native_object_relocations_.find(obj); CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj) << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces(); const NativeObjectRelocation& relocation = it->second; ImageInfo& image_info = GetImageInfo(relocation.oat_index); return reinterpret_cast(image_info.image_begin_ + relocation.offset); } } template T* ImageWriter::NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) { if (obj == nullptr || IsInBootImage(obj)) { return obj; } else { size_t oat_index = GetOatIndexForDexCache(dex_cache); ImageInfo& image_info = GetImageInfo(oat_index); return reinterpret_cast(image_info.image_->Begin() + NativeOffsetInImage(obj)); } } class ImageWriter::NativeLocationVisitor { public: explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {} template T* operator()(T* ptr, void** dest_addr = nullptr) const REQUIRES_SHARED(Locks::mutator_lock_) { if (dest_addr != nullptr) { image_writer_->CopyAndFixupPointer(dest_addr, ptr); } return image_writer_->NativeLocationInImage(ptr); } private: ImageWriter* const image_writer_; }; void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) { orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this)); FixupClassVisitor visitor(this, copy); ObjPtr(orig)->VisitReferences(visitor, visitor); // Remove the clinitThreadId. This is required for image determinism. copy->SetClinitThreadId(static_cast(0)); } void ImageWriter::FixupObject(Object* orig, Object* copy) { DCHECK(orig != nullptr); DCHECK(copy != nullptr); if (kUseBakerReadBarrier) { orig->AssertReadBarrierState(); } auto* klass = orig->GetClass(); if (klass->IsIntArrayClass() || klass->IsLongArrayClass()) { // Is this a native pointer array? auto it = pointer_arrays_.find(down_cast(orig)); if (it != pointer_arrays_.end()) { // Should only need to fixup every pointer array exactly once. FixupPointerArray(copy, down_cast(orig), klass, it->second); pointer_arrays_.erase(it); return; } } if (orig->IsClass()) { FixupClass(orig->AsClass(), down_cast(copy)); } else { if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) { // Need to go update the ArtMethod. auto* dest = down_cast(copy); auto* src = down_cast(orig); ArtMethod* src_method = src->GetArtMethod(); dest->SetArtMethod(GetImageMethodAddress(src_method)); } else if (!klass->IsArrayClass()) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) { FixupDexCache(down_cast(orig), down_cast(copy)); } else if (klass->IsClassLoaderClass()) { mirror::ClassLoader* copy_loader = down_cast(copy); // If src is a ClassLoader, set the class table to null so that it gets recreated by the // ClassLoader. copy_loader->SetClassTable(nullptr); // Also set allocator to null to be safe. The allocator is created when we create the class // table. We also never expect to unload things in the image since they are held live as // roots. copy_loader->SetAllocator(nullptr); } } FixupVisitor visitor(this, copy); orig->VisitReferences(visitor, visitor); } } class ImageWriter::ImageAddressVisitorForDexCacheArray { public: explicit ImageAddressVisitorForDexCacheArray(ImageWriter* image_writer) : image_writer_(image_writer) {} template T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) { return image_writer_->GetImageAddress(ptr); } private: ImageWriter* const image_writer_; }; void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache) { ImageAddressVisitorForDexCacheArray fixup_visitor(this); // Though the DexCache array fields are usually treated as native pointers, we set the full // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e. // static_cast(reinterpret_cast(image_begin_ + offset))). mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings(); if (orig_strings != nullptr) { copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::StringsOffset(), NativeLocationInImage(orig_strings), PointerSize::k64); orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache), fixup_visitor); } mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes(); if (orig_types != nullptr) { copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedTypesOffset(), NativeLocationInImage(orig_types), PointerSize::k64); orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache), fixup_visitor); } mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods(); if (orig_methods != nullptr) { copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedMethodsOffset(), NativeLocationInImage(orig_methods), PointerSize::k64); mirror::MethodDexCacheType* copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache); for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) { mirror::MethodDexCachePair orig_pair = mirror::DexCache::GetNativePairPtrSize(orig_methods, i, target_ptr_size_); // NativeLocationInImage also handles runtime methods since these have relocation info. mirror::MethodDexCachePair copy_pair(NativeLocationInImage(orig_pair.object), orig_pair.index); mirror::DexCache::SetNativePairPtrSize(copy_methods, i, copy_pair, target_ptr_size_); } } mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields(); if (orig_fields != nullptr) { copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedFieldsOffset(), NativeLocationInImage(orig_fields), PointerSize::k64); mirror::FieldDexCacheType* copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache); for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) { mirror::FieldDexCachePair orig = mirror::DexCache::GetNativePairPtrSize(orig_fields, i, target_ptr_size_); mirror::FieldDexCachePair copy = orig; copy.object = NativeLocationInImage(orig.object); mirror::DexCache::SetNativePairPtrSize(copy_fields, i, copy, target_ptr_size_); } } mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes(); if (orig_method_types != nullptr) { copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedMethodTypesOffset(), NativeLocationInImage(orig_method_types), PointerSize::k64); orig_dex_cache->FixupResolvedMethodTypes(NativeCopyLocation(orig_method_types, orig_dex_cache), fixup_visitor); } GcRoot* orig_call_sites = orig_dex_cache->GetResolvedCallSites(); if (orig_call_sites != nullptr) { copy_dex_cache->SetFieldPtrWithSize(mirror::DexCache::ResolvedCallSitesOffset(), NativeLocationInImage(orig_call_sites), PointerSize::k64); orig_dex_cache->FixupResolvedCallSites(NativeCopyLocation(orig_call_sites, orig_dex_cache), fixup_visitor); } // Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving // compiler pointers in here will make the output non-deterministic. copy_dex_cache->SetDexFile(nullptr); } const uint8_t* ImageWriter::GetOatAddress(OatAddress type) const { DCHECK_LT(type, kOatAddressCount); // If we are compiling an app image, we need to use the stubs of the boot image. if (compile_app_image_) { // Use the current image pointers. const std::vector& image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); DCHECK(!image_spaces.empty()); const OatFile* oat_file = image_spaces[0]->GetOatFile(); CHECK(oat_file != nullptr); const OatHeader& header = oat_file->GetOatHeader(); switch (type) { // TODO: We could maybe clean this up if we stored them in an array in the oat header. case kOatAddressQuickGenericJNITrampoline: return static_cast(header.GetQuickGenericJniTrampoline()); case kOatAddressInterpreterToInterpreterBridge: return static_cast(header.GetInterpreterToInterpreterBridge()); case kOatAddressInterpreterToCompiledCodeBridge: return static_cast(header.GetInterpreterToCompiledCodeBridge()); case kOatAddressJNIDlsymLookup: return static_cast(header.GetJniDlsymLookup()); case kOatAddressQuickIMTConflictTrampoline: return static_cast(header.GetQuickImtConflictTrampoline()); case kOatAddressQuickResolutionTrampoline: return static_cast(header.GetQuickResolutionTrampoline()); case kOatAddressQuickToInterpreterBridge: return static_cast(header.GetQuickToInterpreterBridge()); default: UNREACHABLE(); } } const ImageInfo& primary_image_info = GetImageInfo(0); return GetOatAddressForOffset(primary_image_info.oat_address_offsets_[type], primary_image_info); } const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, const ImageInfo& image_info, bool* quick_is_interpreted) { DCHECK(!method->IsResolutionMethod()) << method->PrettyMethod(); DCHECK_NE(method, Runtime::Current()->GetImtConflictMethod()) << method->PrettyMethod(); DCHECK(!method->IsImtUnimplementedMethod()) << method->PrettyMethod(); DCHECK(method->IsInvokable()) << method->PrettyMethod(); DCHECK(!IsInBootImage(method)) << method->PrettyMethod(); // Use original code if it exists. Otherwise, set the code pointer to the resolution // trampoline. // Quick entrypoint: const void* quick_oat_entry_point = method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_); const uint8_t* quick_code; if (UNLIKELY(IsInBootImage(method->GetDeclaringClass()))) { DCHECK(method->IsCopied()); // If the code is not in the oat file corresponding to this image (e.g. default methods) quick_code = reinterpret_cast(quick_oat_entry_point); } else { uint32_t quick_oat_code_offset = PointerToLowMemUInt32(quick_oat_entry_point); quick_code = GetOatAddressForOffset(quick_oat_code_offset, image_info); } *quick_is_interpreted = false; if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() || method->GetDeclaringClass()->IsInitialized())) { // We have code for a non-static or initialized method, just use the code. } else if (quick_code == nullptr && method->IsNative() && (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) { // Non-static or initialized native method missing compiled code, use generic JNI version. quick_code = GetOatAddress(kOatAddressQuickGenericJNITrampoline); } else if (quick_code == nullptr && !method->IsNative()) { // We don't have code at all for a non-native method, use the interpreter. quick_code = GetOatAddress(kOatAddressQuickToInterpreterBridge); *quick_is_interpreted = true; } else { CHECK(!method->GetDeclaringClass()->IsInitialized()); // We have code for a static method, but need to go through the resolution stub for class // initialization. quick_code = GetOatAddress(kOatAddressQuickResolutionTrampoline); } if (!IsInBootOatFile(quick_code)) { // DCHECK_GE(quick_code, oat_data_begin_); } return quick_code; } void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) { if (orig->IsAbstract()) { // Ignore the single-implementation info for abstract method. // Do this on orig instead of copy, otherwise there is a crash due to methods // are copied before classes. // TODO: handle fixup of single-implementation method for abstract method. orig->SetHasSingleImplementation(false); orig->SetSingleImplementation( nullptr, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); } memcpy(copy, orig, ArtMethod::Size(target_ptr_size_)); CopyReference(copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked()); mirror::MethodDexCacheType* orig_resolved_methods = orig->GetDexCacheResolvedMethods(target_ptr_size_); copy->SetDexCacheResolvedMethods(NativeLocationInImage(orig_resolved_methods), target_ptr_size_); // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to // oat_begin_ // The resolution method has a special trampoline to call. Runtime* runtime = Runtime::Current(); if (orig->IsRuntimeMethod()) { ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_); if (orig_table != nullptr) { // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method. copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_); copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_); } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) { copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_); } else { bool found_one = false; for (size_t i = 0; i < static_cast(CalleeSaveType::kLastCalleeSaveType); ++i) { auto idx = static_cast(i); if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { found_one = true; break; } } CHECK(found_one) << "Expected to find callee save method but got " << orig->PrettyMethod(); CHECK(copy->IsRuntimeMethod()); } } else { // We assume all methods have code. If they don't currently then we set them to the use the // resolution trampoline. Abstract methods never have code and so we need to make sure their // use results in an AbstractMethodError. We use the interpreter to achieve this. if (UNLIKELY(!orig->IsInvokable())) { copy->SetEntryPointFromQuickCompiledCodePtrSize( GetOatAddress(kOatAddressQuickToInterpreterBridge), target_ptr_size_); } else { bool quick_is_interpreted; const uint8_t* quick_code = GetQuickCode(orig, image_info, &quick_is_interpreted); copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_); // JNI entrypoint: if (orig->IsNative()) { // The native method's pointer is set to a stub to lookup via dlsym. // Note this is not the code_ pointer, that is handled above. copy->SetEntryPointFromJniPtrSize( GetOatAddress(kOatAddressJNIDlsymLookup), target_ptr_size_); } } } } size_t ImageWriter::GetBinSizeSum(ImageWriter::ImageInfo& image_info, ImageWriter::Bin up_to) const { DCHECK_LE(up_to, kBinSize); return std::accumulate(&image_info.bin_slot_sizes_[0], &image_info.bin_slot_sizes_[up_to], /*init*/0); } ImageWriter::BinSlot::BinSlot(uint32_t lockword) : lockword_(lockword) { // These values may need to get updated if more bins are added to the enum Bin static_assert(kBinBits == 3, "wrong number of bin bits"); static_assert(kBinShift == 27, "wrong number of shift"); static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes"); DCHECK_LT(GetBin(), kBinSize); DCHECK_ALIGNED(GetIndex(), kObjectAlignment); } ImageWriter::BinSlot::BinSlot(Bin bin, uint32_t index) : BinSlot(index | (static_cast(bin) << kBinShift)) { DCHECK_EQ(index, GetIndex()); } ImageWriter::Bin ImageWriter::BinSlot::GetBin() const { return static_cast((lockword_ & kBinMask) >> kBinShift); } uint32_t ImageWriter::BinSlot::GetIndex() const { return lockword_ & ~kBinMask; } ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocationType type) { switch (type) { case kNativeObjectRelocationTypeArtField: case kNativeObjectRelocationTypeArtFieldArray: return kBinArtField; case kNativeObjectRelocationTypeArtMethodClean: case kNativeObjectRelocationTypeArtMethodArrayClean: return kBinArtMethodClean; case kNativeObjectRelocationTypeArtMethodDirty: case kNativeObjectRelocationTypeArtMethodArrayDirty: return kBinArtMethodDirty; case kNativeObjectRelocationTypeDexCacheArray: return kBinDexCacheArray; case kNativeObjectRelocationTypeRuntimeMethod: return kBinRuntimeMethod; case kNativeObjectRelocationTypeIMTable: return kBinImTable; case kNativeObjectRelocationTypeIMTConflictTable: return kBinIMTConflictTable; } UNREACHABLE(); } size_t ImageWriter::GetOatIndex(mirror::Object* obj) const { if (!IsMultiImage()) { return GetDefaultOatIndex(); } auto it = oat_index_map_.find(obj); DCHECK(it != oat_index_map_.end()) << obj; return it->second; } size_t ImageWriter::GetOatIndexForDexFile(const DexFile* dex_file) const { if (!IsMultiImage()) { return GetDefaultOatIndex(); } auto it = dex_file_oat_index_map_.find(dex_file); DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation(); return it->second; } size_t ImageWriter::GetOatIndexForDexCache(ObjPtr dex_cache) const { return (dex_cache == nullptr) ? GetDefaultOatIndex() : GetOatIndexForDexFile(dex_cache->GetDexFile()); } void ImageWriter::UpdateOatFileLayout(size_t oat_index, size_t oat_loaded_size, size_t oat_data_offset, size_t oat_data_size) { const uint8_t* images_end = image_infos_.back().image_begin_ + image_infos_.back().image_size_; for (const ImageInfo& info : image_infos_) { DCHECK_LE(info.image_begin_ + info.image_size_, images_end); } DCHECK(images_end != nullptr); // Image space must be ready. ImageInfo& cur_image_info = GetImageInfo(oat_index); cur_image_info.oat_file_begin_ = images_end + cur_image_info.oat_offset_; cur_image_info.oat_loaded_size_ = oat_loaded_size; cur_image_info.oat_data_begin_ = cur_image_info.oat_file_begin_ + oat_data_offset; cur_image_info.oat_size_ = oat_data_size; if (compile_app_image_) { CHECK_EQ(oat_filenames_.size(), 1u) << "App image should have no next image."; return; } // Update the oat_offset of the next image info. if (oat_index + 1u != oat_filenames_.size()) { // There is a following one. ImageInfo& next_image_info = GetImageInfo(oat_index + 1u); next_image_info.oat_offset_ = cur_image_info.oat_offset_ + oat_loaded_size; } } void ImageWriter::UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header) { ImageInfo& cur_image_info = GetImageInfo(oat_index); cur_image_info.oat_checksum_ = oat_header.GetChecksum(); if (oat_index == GetDefaultOatIndex()) { // Primary oat file, read the trampolines. cur_image_info.oat_address_offsets_[kOatAddressInterpreterToInterpreterBridge] = oat_header.GetInterpreterToInterpreterBridgeOffset(); cur_image_info.oat_address_offsets_[kOatAddressInterpreterToCompiledCodeBridge] = oat_header.GetInterpreterToCompiledCodeBridgeOffset(); cur_image_info.oat_address_offsets_[kOatAddressJNIDlsymLookup] = oat_header.GetJniDlsymLookupOffset(); cur_image_info.oat_address_offsets_[kOatAddressQuickGenericJNITrampoline] = oat_header.GetQuickGenericJniTrampolineOffset(); cur_image_info.oat_address_offsets_[kOatAddressQuickIMTConflictTrampoline] = oat_header.GetQuickImtConflictTrampolineOffset(); cur_image_info.oat_address_offsets_[kOatAddressQuickResolutionTrampoline] = oat_header.GetQuickResolutionTrampolineOffset(); cur_image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] = oat_header.GetQuickToInterpreterBridgeOffset(); } } ImageWriter::ImageWriter( const CompilerDriver& compiler_driver, uintptr_t image_begin, bool compile_pic, bool compile_app_image, ImageHeader::StorageMode image_storage_mode, const std::vector& oat_filenames, const std::unordered_map& dex_file_oat_index_map, const std::unordered_set* dirty_image_objects) : compiler_driver_(compiler_driver), global_image_begin_(reinterpret_cast(image_begin)), image_objects_offset_begin_(0), compile_pic_(compile_pic), compile_app_image_(compile_app_image), target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), image_infos_(oat_filenames.size()), dirty_methods_(0u), clean_methods_(0u), image_storage_mode_(image_storage_mode), oat_filenames_(oat_filenames), dex_file_oat_index_map_(dex_file_oat_index_map), dirty_image_objects_(dirty_image_objects) { CHECK_NE(image_begin, 0U); std::fill_n(image_methods_, arraysize(image_methods_), nullptr); CHECK_EQ(compile_app_image, !Runtime::Current()->GetHeap()->GetBootImageSpaces().empty()) << "Compiling a boot image should occur iff there are no boot image spaces loaded"; } ImageWriter::ImageInfo::ImageInfo() : intern_table_(new InternTable), class_table_(new ClassTable) {} void ImageWriter::CopyReference(mirror::HeapReference* dest, ObjPtr src) { dest->Assign(GetImageAddress(src.Ptr())); } void ImageWriter::CopyReference(mirror::CompressedReference* dest, ObjPtr src) { dest->Assign(GetImageAddress(src.Ptr())); } void ImageWriter::CopyAndFixupPointer(void** target, void* value) { void* new_value = value; if (value != nullptr && !IsInBootImage(value)) { auto it = native_object_relocations_.find(value); CHECK(it != native_object_relocations_.end()) << value; const NativeObjectRelocation& relocation = it->second; ImageInfo& image_info = GetImageInfo(relocation.oat_index); new_value = reinterpret_cast(image_info.image_begin_ + relocation.offset); } if (target_ptr_size_ == PointerSize::k32) { *reinterpret_cast(target) = PointerToLowMemUInt32(new_value); } else { *reinterpret_cast(target) = reinterpret_cast(new_value); } } } // namespace art android-platform-art-8.1.0+r23/compiler/image_writer.h000066400000000000000000000634201336577252300226330ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_IMAGE_WRITER_H_ #define ART_COMPILER_IMAGE_WRITER_H_ #include #include "base/memory_tool.h" #include #include #include #include #include #include #include "art_method.h" #include "base/bit_utils.h" #include "base/dchecked_vector.h" #include "base/enums.h" #include "base/length_prefixed_array.h" #include "base/macros.h" #include "class_table.h" #include "driver/compiler_driver.h" #include "image.h" #include "intern_table.h" #include "lock_word.h" #include "mem_map.h" #include "mirror/dex_cache.h" #include "obj_ptr.h" #include "oat_file.h" #include "os.h" #include "safe_map.h" #include "utils.h" namespace art { namespace gc { namespace accounting { template class SpaceBitmap; typedef SpaceBitmap ContinuousSpaceBitmap; } // namespace accounting namespace space { class ImageSpace; } // namespace space } // namespace gc namespace mirror { class ClassLoader; } // namespace mirror class ClassLoaderVisitor; class ImtConflictTable; static constexpr int kInvalidFd = -1; // Write a Space built during compilation for use during execution. class ImageWriter FINAL { public: ImageWriter(const CompilerDriver& compiler_driver, uintptr_t image_begin, bool compile_pic, bool compile_app_image, ImageHeader::StorageMode image_storage_mode, const std::vector& oat_filenames, const std::unordered_map& dex_file_oat_index_map, const std::unordered_set* dirty_image_objects); bool PrepareImageAddressSpace(); bool IsImageAddressSpaceReady() const { DCHECK(!image_infos_.empty()); for (const ImageInfo& image_info : image_infos_) { if (image_info.image_roots_address_ == 0u) { return false; } } return true; } ObjPtr GetClassLoader() { CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u); return compile_app_image_ ? *class_loaders_.begin() : nullptr; } template T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) { if (object == nullptr || IsInBootImage(object)) { return object; } else { size_t oat_index = GetOatIndex(object); const ImageInfo& image_info = GetImageInfo(oat_index); return reinterpret_cast(image_info.image_begin_ + GetImageOffset(object)); } } ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); size_t GetOatFileOffset(size_t oat_index) const { return GetImageInfo(oat_index).oat_offset_; } const uint8_t* GetOatFileBegin(size_t oat_index) const { return GetImageInfo(oat_index).oat_file_begin_; } // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open // the names in image_filenames. // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open // the names in oat_filenames. bool Write(int image_fd, const std::vector& image_filenames, const std::vector& oat_filenames) REQUIRES(!Locks::mutator_lock_); uintptr_t GetOatDataBegin(size_t oat_index) { return reinterpret_cast(GetImageInfo(oat_index).oat_data_begin_); } // Get the index of the oat file containing the dex file. // // This "oat_index" is used to retrieve information about the the memory layout // of the oat file and its associated image file, needed for link-time patching // of references to the image or across oat files. size_t GetOatIndexForDexFile(const DexFile* dex_file) const; // Get the index of the oat file containing the dex file served by the dex cache. size_t GetOatIndexForDexCache(ObjPtr dex_cache) const REQUIRES_SHARED(Locks::mutator_lock_); // Update the oat layout for the given oat file. // This will make the oat_offset for the next oat file valid. void UpdateOatFileLayout(size_t oat_index, size_t oat_loaded_size, size_t oat_data_offset, size_t oat_data_size); // Update information about the oat header, i.e. checksum and trampoline offsets. void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header); private: using WorkStack = std::stack>; bool AllocMemory(); // Mark the objects defined in this space in the given live bitmap. void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_); // Classify different kinds of bins that objects end up getting packed into during image writing. // Ordered from dirtiest to cleanest (until ArtMethods). enum Bin { kBinKnownDirty, // Known dirty objects from --dirty-image-objects list kBinMiscDirty, // Dex caches, object locks, etc... kBinClassVerified, // Class verified, but initializers haven't been run // Unknown mix of clean/dirty: kBinRegular, kBinClassInitialized, // Class initializers have been run // All classes get their own bins since their fields often dirty kBinClassInitializedFinalStatics, // Class initializers have been run, no non-final statics // Likely-clean: kBinString, // [String] Almost always immutable (except for obj header). // Add more bins here if we add more segregation code. // Non mirror fields must be below. // ArtFields should be always clean. kBinArtField, // If the class is initialized, then the ArtMethods are probably clean. kBinArtMethodClean, // ArtMethods may be dirty if the class has native methods or a declaring class that isn't // initialized. kBinArtMethodDirty, // IMT (clean) kBinImTable, // Conflict tables (clean). kBinIMTConflictTable, // Runtime methods (always clean, do not have a length prefix array). kBinRuntimeMethod, // Dex cache arrays have a special slot for PC-relative addressing. Since they are // huge, and as such their dirtiness is not important for the clean/dirty separation, // we arbitrarily keep them at the end of the native data. kBinDexCacheArray, // Arrays belonging to dex cache. kBinSize, // Number of bins which are for mirror objects. kBinMirrorCount = kBinArtField, }; friend std::ostream& operator<<(std::ostream& stream, const Bin& bin); enum NativeObjectRelocationType { kNativeObjectRelocationTypeArtField, kNativeObjectRelocationTypeArtFieldArray, kNativeObjectRelocationTypeArtMethodClean, kNativeObjectRelocationTypeArtMethodArrayClean, kNativeObjectRelocationTypeArtMethodDirty, kNativeObjectRelocationTypeArtMethodArrayDirty, kNativeObjectRelocationTypeRuntimeMethod, kNativeObjectRelocationTypeIMTable, kNativeObjectRelocationTypeIMTConflictTable, kNativeObjectRelocationTypeDexCacheArray, }; friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type); enum OatAddress { kOatAddressInterpreterToInterpreterBridge, kOatAddressInterpreterToCompiledCodeBridge, kOatAddressJNIDlsymLookup, kOatAddressQuickGenericJNITrampoline, kOatAddressQuickIMTConflictTrampoline, kOatAddressQuickResolutionTrampoline, kOatAddressQuickToInterpreterBridge, // Number of elements in the enum. kOatAddressCount, }; friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address); static constexpr size_t kBinBits = MinimumBitsToStore(kBinMirrorCount - 1); // uint32 = typeof(lockword_) // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK // failures due to invalid read barrier bits during object field reads. static const size_t kBinShift = BitSizeOf() - kBinBits - LockWord::kGCStateSize; // 111000.....0 static const size_t kBinMask = ((static_cast(1) << kBinBits) - 1) << kBinShift; // We use the lock word to store the bin # and bin index of the object in the image. // // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up // stored in the lock word bit-for-bit when object forwarding addresses are being calculated. struct BinSlot { explicit BinSlot(uint32_t lockword); BinSlot(Bin bin, uint32_t index); // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc. Bin GetBin() const; // The offset in bytes from the beginning of the bin. Aligned to object size. uint32_t GetIndex() const; // Pack into a single uint32_t, for storing into a lock word. uint32_t Uint32Value() const { return lockword_; } // Comparison operator for map support bool operator<(const BinSlot& other) const { return lockword_ < other.lockword_; } private: // Must be the same size as LockWord, any larger and we would truncate the data. const uint32_t lockword_; }; struct ImageInfo { ImageInfo(); ImageInfo(ImageInfo&&) = default; // Create the image sections into the out sections variable, returns the size of the image // excluding the bitmap. size_t CreateImageSections(ImageSection* out_sections) const; std::unique_ptr image_; // Memory mapped for generating the image. // Target begin of this image. Notes: It is not valid to write here, this is the address // of the target image, not necessarily where image_ is mapped. The address is only valid // after layouting (otherwise null). uint8_t* image_begin_ = nullptr; // Offset to the free space in image_, initially size of image header. size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); uint32_t image_roots_address_ = 0; // The image roots address in the image. size_t image_offset_ = 0; // Offset of this image from the start of the first image. // Image size is the *address space* covered by this image. As the live bitmap is aligned // to the page size, the live bitmap will cover more address space than necessary. But live // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size. // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be // page-aligned). size_t image_size_ = 0; // Oat data. // Offset of the oat file for this image from start of oat files. This is // valid when the previous oat file has been written. size_t oat_offset_ = 0; // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout(). const uint8_t* oat_file_begin_ = nullptr; size_t oat_loaded_size_ = 0; const uint8_t* oat_data_begin_ = nullptr; size_t oat_size_ = 0; // Size of the corresponding oat data. // The oat header checksum, valid after UpdateOatFileHeader(). uint32_t oat_checksum_ = 0u; // Image bitmap which lets us know where the objects inside of the image reside. std::unique_ptr image_bitmap_; // The start offsets of the dex cache arrays. SafeMap dex_cache_array_starts_; // Offset from oat_data_begin_ to the stubs. uint32_t oat_address_offsets_[kOatAddressCount] = {}; // Bin slot tracking for dirty object packing. size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin. size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins. size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin. // Cached size of the intern table for when we allocate memory. size_t intern_table_bytes_ = 0; // Number of image class table bytes. size_t class_table_bytes_ = 0; // Number of object fixup bytes. size_t object_fixup_bytes_ = 0; // Number of pointer fixup bytes. size_t pointer_fixup_bytes_ = 0; // Intern table associated with this image for serialization. std::unique_ptr intern_table_; // Class table associated with this image for serialization. std::unique_ptr class_table_; }; // We use the lock word to store the offset of the object in the image. void AssignImageOffset(mirror::Object* object, BinSlot bin_slot) REQUIRES_SHARED(Locks::mutator_lock_); void SetImageOffset(mirror::Object* object, size_t offset) REQUIRES_SHARED(Locks::mutator_lock_); bool IsImageOffsetAssigned(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_); size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_); void UpdateImageOffset(mirror::Object* obj, uintptr_t offset) REQUIRES_SHARED(Locks::mutator_lock_); void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_); void AssignImageBinSlot(mirror::Object* object, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) REQUIRES_SHARED(Locks::mutator_lock_); bool IsImageBinSlotAssigned(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_); BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_); void AddDexCacheArrayRelocation(void* array, size_t offset, ObjPtr dex_cache) REQUIRES_SHARED(Locks::mutator_lock_); void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_); static void* GetImageAddressCallback(void* writer, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) { return reinterpret_cast(writer)->GetImageAddress(obj); } mirror::Object* GetLocalAddress(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_) { size_t offset = GetImageOffset(object); size_t oat_index = GetOatIndex(object); const ImageInfo& image_info = GetImageInfo(oat_index); uint8_t* dst = image_info.image_->Begin() + offset; return reinterpret_cast(dst); } // Returns the address in the boot image if we are compiling the app image. const uint8_t* GetOatAddress(OatAddress type) const; const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const { // With Quick, code is within the OatFile, as there are all in one // .o ELF object. But interpret it as signed. DCHECK_LE(static_cast(offset), static_cast(image_info.oat_size_)); DCHECK(image_info.oat_data_begin_ != nullptr); return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast(offset); } // Returns true if the class was in the original requested image classes list. bool KeepClass(ObjPtr klass) REQUIRES_SHARED(Locks::mutator_lock_); // Debug aid that list of requested image classes. void DumpImageClasses(); // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying. void ComputeLazyFieldsForImageClasses() REQUIRES_SHARED(Locks::mutator_lock_); // Visit all class loaders. void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); // Remove unwanted classes from various roots. void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_); // Remove unwanted classes from the DexCache roots and preload deterministic DexCache contents. void PruneAndPreloadDexCache(ObjPtr dex_cache, ObjPtr class_loader) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::classlinker_classes_lock_); // Verify unwanted classes removed. void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_); // Lays out where the image objects will be at runtime. void CalculateNewObjectOffsets() REQUIRES_SHARED(Locks::mutator_lock_); void ProcessWorkStack(WorkStack* work_stack) REQUIRES_SHARED(Locks::mutator_lock_); void CreateHeader(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); mirror::ObjectArray* CreateImageRoots(size_t oat_index) const REQUIRES_SHARED(Locks::mutator_lock_); void CalculateObjectBinSlots(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); void UnbinObjectsIntoOffset(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); // Creates the contiguous image in memory and adjusts pointers. void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_); void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) REQUIRES_SHARED(Locks::mutator_lock_); void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_); void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) REQUIRES_SHARED(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) REQUIRES_SHARED(Locks::mutator_lock_); void FixupObject(mirror::Object* orig, mirror::Object* copy) REQUIRES_SHARED(Locks::mutator_lock_); void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_); void FixupPointerArray(mirror::Object* dst, mirror::PointerArray* arr, mirror::Class* klass, Bin array_type) REQUIRES_SHARED(Locks::mutator_lock_); // Get quick code for non-resolution/imt_conflict/abstract method. const uint8_t* GetQuickCode(ArtMethod* method, const ImageInfo& image_info, bool* quick_is_interpreted) REQUIRES_SHARED(Locks::mutator_lock_); // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins. size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const; // Return true if a method is likely to be dirtied at runtime. bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_); // Assign the offset for an ArtMethod. void AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); // Return true if imt was newly inserted. bool TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); // Assign the offset for an IMT conflict table. Does nothing if the table already has a native // relocation. void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_); // Return true if klass is loaded by the boot class loader but not in the boot image. bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_); // Return true if klass depends on a boot class loader non image class. We want to prune these // classes since we do not want any boot class loader classes in the image. This means that // we also cannot have any classes which refer to these boot class loader non image classes. // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler // driver. bool PruneAppImageClass(ObjPtr klass) REQUIRES_SHARED(Locks::mutator_lock_); // early_exit is true if we had a cyclic dependency anywhere down the chain. bool PruneAppImageClassInternal(ObjPtr klass, bool* early_exit, std::unordered_set* visited) REQUIRES_SHARED(Locks::mutator_lock_); bool IsMultiImage() const { return image_infos_.size() > 1; } static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type); uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_); // Location of where the object will be when the image is loaded at runtime. template T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_); // Location of where the temporary copy of the object currently is. template T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_); // Return true of obj is inside of the boot image space. This may only return true if we are // compiling an app image. bool IsInBootImage(const void* obj) const; // Return true if ptr is within the boot oat file. bool IsInBootOatFile(const void* ptr) const; // Get the index of the oat file associated with the object. size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_); // The oat index for shared data in multi-image and all data in single-image compilation. size_t GetDefaultOatIndex() const { return 0u; } ImageInfo& GetImageInfo(size_t oat_index) { return image_infos_[oat_index]; } const ImageInfo& GetImageInfo(size_t oat_index) const { return image_infos_[oat_index]; } // Find an already strong interned string in the other images or in the boot image. Used to // remove duplicates in the multi image and app image case. mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_); // Return true if there already exists a native allocation for an object. bool NativeRelocationAssigned(void* ptr) const; void CopyReference(mirror::HeapReference* dest, ObjPtr src) REQUIRES_SHARED(Locks::mutator_lock_); void CopyReference(mirror::CompressedReference* dest, ObjPtr src) REQUIRES_SHARED(Locks::mutator_lock_); void CopyAndFixupPointer(void** target, void* value); const CompilerDriver& compiler_driver_; // Beginning target image address for the first image. uint8_t* global_image_begin_; // Offset from image_begin_ to where the first object is in image_. size_t image_objects_offset_begin_; // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need // to keep track. These include vtable arrays, iftable arrays, and dex caches. std::unordered_map pointer_arrays_; // Saved hash codes. We use these to restore lockwords which were temporarily used to have // forwarding addresses as well as copying over hash codes. std::unordered_map saved_hashcode_map_; // Oat index map for objects. std::unordered_map oat_index_map_; // Boolean flags. const bool compile_pic_; const bool compile_app_image_; // Size of pointers on the target architecture. PointerSize target_ptr_size_; // Image data indexed by the oat file index. dchecked_vector image_infos_; // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to // have one entry per art field for convenience. ArtFields are placed right after the end of the // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. struct NativeObjectRelocation { size_t oat_index; uintptr_t offset; NativeObjectRelocationType type; bool IsArtMethodRelocation() const { return type == kNativeObjectRelocationTypeArtMethodClean || type == kNativeObjectRelocationTypeArtMethodDirty || type == kNativeObjectRelocationTypeRuntimeMethod; } }; std::unordered_map native_object_relocations_; // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; // Counters for measurements, used for logging only. uint64_t dirty_methods_; uint64_t clean_methods_; // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass. std::unordered_map prune_class_memo_; // Class loaders with a class table to write out. There should only be one class loader because // dex2oat loads the dex files to be compiled into a single class loader. For the boot image, // null is a valid entry. std::unordered_set class_loaders_; // Which mode the image is stored as, see image.h const ImageHeader::StorageMode image_storage_mode_; // The file names of oat files. const std::vector& oat_filenames_; // Map of dex files to the indexes of oat files that they were compiled into. const std::unordered_map& dex_file_oat_index_map_; // Set of objects known to be dirty in the image. Can be nullptr if there are none. const std::unordered_set* dirty_image_objects_; class ComputeLazyFieldsForClassesVisitor; class FixupClassVisitor; class FixupRootVisitor; class FixupVisitor; class GetRootsVisitor; class ImageAddressVisitorForDexCacheArray; class NativeLocationVisitor; class PruneClassesVisitor; class PruneClassLoaderClassesVisitor; class RegisterBootClassPathClassesVisitor; class VisitReferencesVisitor; class PruneObjectReferenceVisitor; DISALLOW_COPY_AND_ASSIGN(ImageWriter); }; } // namespace art #endif // ART_COMPILER_IMAGE_WRITER_H_ android-platform-art-8.1.0+r23/compiler/intrinsics_enum.h000066400000000000000000000020571336577252300233650ustar00rootroot00000000000000/* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_INTRINSICS_ENUM_H_ #define ART_COMPILER_INTRINSICS_ENUM_H_ namespace art { enum class Intrinsics { #define OPTIMIZING_INTRINSICS(Name, ...) \ k ## Name, #include "intrinsics_list.h" kNone, INTRINSICS_LIST(OPTIMIZING_INTRINSICS) #undef INTRINSICS_LIST #undef OPTIMIZING_INTRINSICS }; std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic); } // namespace art #endif // ART_COMPILER_INTRINSICS_ENUM_H_ android-platform-art-8.1.0+r23/compiler/intrinsics_list.h000066400000000000000000000447561336577252300234100ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_INTRINSICS_LIST_H_ #define ART_COMPILER_INTRINSICS_LIST_H_ // All intrinsics supported by ART. Format is name, then whether it is expected // to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an // environment, may have side effects, or may throw exceptions. // Note: adding a new intrinsic requires an art image version change, // as the modifiers flag for some ArtMethods will need to be changed. // Note: j.l.Integer.valueOf says kNoThrow even though it could throw an OOME. // The kNoThrow should be renamed to kNoVisibleThrow, as it is ok to GVN Integer.valueOf // (kNoSideEffects), and it is also OK to remove it if it's unused. // Note: Thread.interrupted is marked with kAllSideEffects due to the lack of finer grain // side effects representation. #define INTRINSICS_LIST(V) \ V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J") \ V(DoubleDoubleToLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "doubleToLongBits", "(D)J") \ V(DoubleIsInfinite, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "isInfinite", "(D)Z") \ V(DoubleIsNaN, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "isNaN", "(D)Z") \ V(DoubleLongBitsToDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Double;", "longBitsToDouble", "(J)D") \ V(FloatFloatToRawIntBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Float;", "floatToRawIntBits", "(F)I") \ V(FloatFloatToIntBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Float;", "floatToIntBits", "(F)I") \ V(FloatIsInfinite, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Float;", "isInfinite", "(F)Z") \ V(FloatIsNaN, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Float;", "isNaN", "(F)Z") \ V(FloatIntBitsToFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Float;", "intBitsToFloat", "(I)F") \ V(IntegerReverse, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "reverse", "(I)I") \ V(IntegerReverseBytes, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "reverseBytes", "(I)I") \ V(IntegerBitCount, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "bitCount", "(I)I") \ V(IntegerCompare, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "compare", "(II)I") \ V(IntegerHighestOneBit, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "highestOneBit", "(I)I") \ V(IntegerLowestOneBit, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "lowestOneBit", "(I)I") \ V(IntegerNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "numberOfLeadingZeros", "(I)I") \ V(IntegerNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "numberOfTrailingZeros", "(I)I") \ V(IntegerRotateRight, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "rotateRight", "(II)I") \ V(IntegerRotateLeft, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "rotateLeft", "(II)I") \ V(IntegerSignum, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "signum", "(I)I") \ V(LongReverse, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "reverse", "(J)J") \ V(LongReverseBytes, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "reverseBytes", "(J)J") \ V(LongBitCount, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "bitCount", "(J)I") \ V(LongCompare, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "compare", "(JJ)I") \ V(LongHighestOneBit, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "highestOneBit", "(J)J") \ V(LongLowestOneBit, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "lowestOneBit", "(J)J") \ V(LongNumberOfLeadingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "numberOfLeadingZeros", "(J)I") \ V(LongNumberOfTrailingZeros, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "numberOfTrailingZeros", "(J)I") \ V(LongRotateRight, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "rotateRight", "(JI)J") \ V(LongRotateLeft, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "rotateLeft", "(JI)J") \ V(LongSignum, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Long;", "signum", "(J)I") \ V(ShortReverseBytes, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Short;", "reverseBytes", "(S)S") \ V(MathAbsDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "abs", "(D)D") \ V(MathAbsFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "abs", "(F)F") \ V(MathAbsLong, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "abs", "(J)J") \ V(MathAbsInt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "abs", "(I)I") \ V(MathMinDoubleDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "min", "(DD)D") \ V(MathMinFloatFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "min", "(FF)F") \ V(MathMinLongLong, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "min", "(JJ)J") \ V(MathMinIntInt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "min", "(II)I") \ V(MathMaxDoubleDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "max", "(DD)D") \ V(MathMaxFloatFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "max", "(FF)F") \ V(MathMaxLongLong, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "max", "(JJ)J") \ V(MathMaxIntInt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "max", "(II)I") \ V(MathCos, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cos", "(D)D") \ V(MathSin, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "sin", "(D)D") \ V(MathAcos, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "acos", "(D)D") \ V(MathAsin, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "asin", "(D)D") \ V(MathAtan, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "atan", "(D)D") \ V(MathAtan2, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "atan2", "(DD)D") \ V(MathCbrt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cbrt", "(D)D") \ V(MathCosh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cosh", "(D)D") \ V(MathExp, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "exp", "(D)D") \ V(MathExpm1, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "expm1", "(D)D") \ V(MathHypot, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "hypot", "(DD)D") \ V(MathLog, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "log", "(D)D") \ V(MathLog10, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "log10", "(D)D") \ V(MathNextAfter, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "nextAfter", "(DD)D") \ V(MathSinh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "sinh", "(D)D") \ V(MathTan, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "tan", "(D)D") \ V(MathTanh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "tanh", "(D)D") \ V(MathSqrt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "sqrt", "(D)D") \ V(MathCeil, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "ceil", "(D)D") \ V(MathFloor, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "floor", "(D)D") \ V(MathRint, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "rint", "(D)D") \ V(MathRoundDouble, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "round", "(D)J") \ V(MathRoundFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "round", "(F)I") \ V(SystemArrayCopyChar, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/System;", "arraycopy", "([CI[CII)V") \ V(SystemArrayCopy, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/System;", "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V") \ V(ThreadCurrentThread, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Thread;", "currentThread", "()Ljava/lang/Thread;") \ V(MemoryPeekByte, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Llibcore/io/Memory;", "peekByte", "(J)B") \ V(MemoryPeekIntNative, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Llibcore/io/Memory;", "peekIntNative", "(J)I") \ V(MemoryPeekLongNative, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Llibcore/io/Memory;", "peekLongNative", "(J)J") \ V(MemoryPeekShortNative, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Llibcore/io/Memory;", "peekShortNative", "(J)S") \ V(MemoryPokeByte, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeByte", "(JB)V") \ V(MemoryPokeIntNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeIntNative", "(JI)V") \ V(MemoryPokeLongNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeLongNative", "(JJ)V") \ V(MemoryPokeShortNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeShortNative", "(JS)V") \ V(StringCharAt, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "charAt", "(I)C") \ V(StringCompareTo, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I") \ V(StringEquals, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z") \ V(StringGetCharsNoCheck, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "getCharsNoCheck", "(II[CI)V") \ V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(I)I") \ V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(II)I") \ V(StringStringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;)I") \ V(StringStringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;I)I") \ V(StringIsEmpty, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "isEmpty", "()Z") \ V(StringLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "length", "()I") \ V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromBytes", "([BIII)Ljava/lang/String;") \ V(StringNewStringFromChars, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromChars", "(II[C)Ljava/lang/String;") \ V(StringNewStringFromString, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromString", "(Ljava/lang/String;)Ljava/lang/String;") \ V(StringBufferAppend, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuffer;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuffer;") \ V(StringBufferLength, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/StringBuffer;", "length", "()I") \ V(StringBufferToString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuffer;", "toString", "()Ljava/lang/String;") \ V(StringBuilderAppend, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;") \ V(StringBuilderLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/StringBuilder;", "length", "()I") \ V(StringBuilderToString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "toString", "()Ljava/lang/String;") \ V(UnsafeCASInt, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapInt", "(Ljava/lang/Object;JII)Z") \ V(UnsafeCASLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapLong", "(Ljava/lang/Object;JJJ)Z") \ V(UnsafeCASObject, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapObject", "(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z") \ V(UnsafeGet, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getInt", "(Ljava/lang/Object;J)I") \ V(UnsafeGetVolatile, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getIntVolatile", "(Ljava/lang/Object;J)I") \ V(UnsafeGetObject, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getObject", "(Ljava/lang/Object;J)Ljava/lang/Object;") \ V(UnsafeGetObjectVolatile, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getObjectVolatile", "(Ljava/lang/Object;J)Ljava/lang/Object;") \ V(UnsafeGetLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getLong", "(Ljava/lang/Object;J)J") \ V(UnsafeGetLongVolatile, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getLongVolatile", "(Ljava/lang/Object;J)J") \ V(UnsafePut, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putInt", "(Ljava/lang/Object;JI)V") \ V(UnsafePutOrdered, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putOrderedInt", "(Ljava/lang/Object;JI)V") \ V(UnsafePutVolatile, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putIntVolatile", "(Ljava/lang/Object;JI)V") \ V(UnsafePutObject, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putObject", "(Ljava/lang/Object;JLjava/lang/Object;)V") \ V(UnsafePutObjectOrdered, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putOrderedObject", "(Ljava/lang/Object;JLjava/lang/Object;)V") \ V(UnsafePutObjectVolatile, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putObjectVolatile", "(Ljava/lang/Object;JLjava/lang/Object;)V") \ V(UnsafePutLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putLong", "(Ljava/lang/Object;JJ)V") \ V(UnsafePutLongOrdered, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putOrderedLong", "(Ljava/lang/Object;JJ)V") \ V(UnsafePutLongVolatile, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "putLongVolatile", "(Ljava/lang/Object;JJ)V") \ V(UnsafeGetAndAddInt, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getAndAddInt", "(Ljava/lang/Object;JI)I") \ V(UnsafeGetAndAddLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getAndAddLong", "(Ljava/lang/Object;JJ)J") \ V(UnsafeGetAndSetInt, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getAndSetInt", "(Ljava/lang/Object;JI)I") \ V(UnsafeGetAndSetLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getAndSetLong", "(Ljava/lang/Object;JJ)J") \ V(UnsafeGetAndSetObject, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "getAndSetObject", "(Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object;") \ V(UnsafeLoadFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "loadFence", "()V") \ V(UnsafeStoreFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "storeFence", "()V") \ V(UnsafeFullFence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "fullFence", "()V") \ V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/ref/Reference;", "getReferent", "()Ljava/lang/Object;") \ V(IntegerValueOf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Integer;", "valueOf", "(I)Ljava/lang/Integer;") \ V(ThreadInterrupted, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/Thread;", "interrupted", "()Z") #endif // ART_COMPILER_INTRINSICS_LIST_H_ #undef ART_COMPILER_INTRINSICS_LIST_H_ // #define is only for lint. android-platform-art-8.1.0+r23/compiler/jit/000077500000000000000000000000001336577252300205655ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jit/jit_compiler.cc000066400000000000000000000162351336577252300235630ustar00rootroot00000000000000/* * Copyright 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jit_compiler.h" #include "android-base/stringprintf.h" #include "arch/instruction_set.h" #include "arch/instruction_set_features.h" #include "art_method-inl.h" #include "base/stringpiece.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "base/unix_file/fd_file.h" #include "debug/elf_debug_writer.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "jit/debugger_interface.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" #include "oat_file-inl.h" #include "oat_quick_method_header.h" #include "object_lock.h" #include "optimizing/register_allocator.h" #include "thread_list.h" namespace art { namespace jit { JitCompiler* JitCompiler::Create() { return new JitCompiler(); } extern "C" void* jit_load(bool* generate_debug_info) { VLOG(jit) << "loading jit compiler"; auto* const jit_compiler = JitCompiler::Create(); CHECK(jit_compiler != nullptr); *generate_debug_info = jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo(); VLOG(jit) << "Done loading jit compiler"; return jit_compiler; } extern "C" void jit_unload(void* handle) { DCHECK(handle != nullptr); delete reinterpret_cast(handle); } extern "C" bool jit_compile_method( void* handle, ArtMethod* method, Thread* self, bool osr) REQUIRES_SHARED(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast(handle); DCHECK(jit_compiler != nullptr); return jit_compiler->CompileMethod(self, method, osr); } extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) { auto* jit_compiler = reinterpret_cast(handle); DCHECK(jit_compiler != nullptr); if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) { const ArrayRef types_array(types, count); std::vector elf_file = debug::WriteDebugElfFileForClasses( kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array); CreateJITCodeEntry(std::move(elf_file)); } } // Callers of this method assume it has NO_RETURN. NO_RETURN static void Usage(const char* fmt, ...) { va_list ap; va_start(ap, fmt); std::string error; android::base::StringAppendV(&error, fmt, ap); LOG(FATAL) << error; va_end(ap); exit(EXIT_FAILURE); } JitCompiler::JitCompiler() { compiler_options_.reset(new CompilerOptions()); for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) { compiler_options_->ParseCompilerOption(argument, Usage); } // JIT is never PIC, no matter what the runtime compiler options specify. compiler_options_->SetNonPic(); // Set debuggability based on the runtime value. compiler_options_->SetDebuggable(Runtime::Current()->IsJavaDebuggable()); // Special case max code units for inlining, whose default is "unset" (implictly // meaning no limit). compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits); const InstructionSet instruction_set = kRuntimeISA; for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) { VLOG(compiler) << "JIT compiler option " << option; std::string error_msg; if (option.starts_with("--instruction-set-variant=")) { StringPiece str = option.substr(strlen("--instruction-set-variant=")).data(); VLOG(compiler) << "JIT instruction set variant " << str; instruction_set_features_ = InstructionSetFeatures::FromVariant( instruction_set, str.as_string(), &error_msg); if (instruction_set_features_ == nullptr) { LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; } } else if (option.starts_with("--instruction-set-features=")) { StringPiece str = option.substr(strlen("--instruction-set-features=")).data(); VLOG(compiler) << "JIT instruction set features " << str; if (instruction_set_features_ == nullptr) { instruction_set_features_ = InstructionSetFeatures::FromVariant( instruction_set, "default", &error_msg); if (instruction_set_features_ == nullptr) { LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; } } instruction_set_features_ = instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg); if (instruction_set_features_ == nullptr) { LOG(WARNING) << "Error parsing " << option << " message=" << error_msg; } } } if (instruction_set_features_ == nullptr) { instruction_set_features_ = InstructionSetFeatures::FromCppDefines(); } cumulative_logger_.reset(new CumulativeLogger("jit times")); compiler_driver_.reset(new CompilerDriver( compiler_options_.get(), /* verification_results */ nullptr, Compiler::kOptimizing, instruction_set, instruction_set_features_.get(), /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, /* thread_count */ 1, /* dump_stats */ false, /* dump_passes */ false, cumulative_logger_.get(), /* swap_fd */ -1, /* profile_compilation_info */ nullptr)); // Disable dedupe so we can remove compiled methods. compiler_driver_->SetDedupeEnabled(false); compiler_driver_->SetSupportBootImageFixup(false); size_t thread_count = compiler_driver_->GetThreadCount(); if (compiler_options_->GetGenerateDebugInfo()) { DCHECK_EQ(thread_count, 1u) << "Generating debug info only works with one compiler thread"; jit_logger_.reset(new JitLogger()); jit_logger_->OpenLog(); } } JitCompiler::~JitCompiler() { if (compiler_options_->GetGenerateDebugInfo()) { jit_logger_->CloseLog(); } } bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { DCHECK(!method->IsProxyMethod()); DCHECK(method->GetDeclaringClass()->IsResolved()); TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit)); self->AssertNoPendingException(); Runtime* runtime = Runtime::Current(); // Do the compilation. bool success = false; { TimingLogger::ScopedTiming t2("Compiling", &logger); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); success = compiler_driver_->GetCompiler()->JitCompile( self, code_cache, method, osr, jit_logger_.get()); } // Trim maps to reduce memory usage. // TODO: move this to an idle phase. { TimingLogger::ScopedTiming t2("TrimMaps", &logger); runtime->GetJitArenaPool()->TrimMaps(); } runtime->GetJit()->AddTimingLogger(logger); return success; } } // namespace jit } // namespace art android-platform-art-8.1.0+r23/compiler/jit/jit_compiler.h000066400000000000000000000040001336577252300234100ustar00rootroot00000000000000/* * Copyright 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JIT_JIT_COMPILER_H_ #define ART_COMPILER_JIT_JIT_COMPILER_H_ #include "base/mutex.h" #include "compiled_method.h" #include "jit_logger.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" namespace art { class ArtMethod; class InstructionSetFeatures; namespace jit { class JitCompiler { public: static JitCompiler* Create(); virtual ~JitCompiler(); // Compilation entrypoint. Returns whether the compilation succeeded. bool CompileMethod(Thread* self, ArtMethod* method, bool osr) REQUIRES_SHARED(Locks::mutator_lock_); CompilerOptions* GetCompilerOptions() const { return compiler_options_.get(); } CompilerDriver* GetCompilerDriver() const { return compiler_driver_.get(); } private: std::unique_ptr compiler_options_; std::unique_ptr cumulative_logger_; std::unique_ptr compiler_driver_; std::unique_ptr instruction_set_features_; std::unique_ptr jit_logger_; JitCompiler(); // This is in the compiler since the runtime doesn't have access to the compiled method // structures. bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method) REQUIRES_SHARED(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(JitCompiler); }; } // namespace jit } // namespace art #endif // ART_COMPILER_JIT_JIT_COMPILER_H_ android-platform-art-8.1.0+r23/compiler/jit/jit_logger.cc000066400000000000000000000273461336577252300232350ustar00rootroot00000000000000/* * Copyright 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jit_logger.h" #include "arch/instruction_set.h" #include "art_method-inl.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" #include "driver/compiler_driver.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" #include "oat_file-inl.h" namespace art { namespace jit { #ifdef ART_TARGET_ANDROID static const char* kLogPrefix = "/data/misc/trace"; #else static const char* kLogPrefix = "/tmp"; #endif // File format of perf-PID.map: // +---------------------+ // |ADDR SIZE symbolname1| // |ADDR SIZE symbolname2| // |... | // +---------------------+ void JitLogger::OpenPerfMapLog() { std::string pid_str = std::to_string(getpid()); std::string perf_filename = std::string(kLogPrefix) + "/perf-" + pid_str + ".map"; perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str())); if (perf_file_ == nullptr) { LOG(ERROR) << "Could not create perf file at " << perf_filename << " Are you on a user build? Perf only works on userdebug/eng builds"; } } void JitLogger::WritePerfMapLog(const void* ptr, size_t code_size, ArtMethod* method) { if (perf_file_ != nullptr) { std::string method_name = method->PrettyMethod(); std::ostringstream stream; stream << std::hex << reinterpret_cast(ptr) << " " << code_size << " " << method_name << std::endl; std::string str = stream.str(); bool res = perf_file_->WriteFully(str.c_str(), str.size()); if (!res) { LOG(WARNING) << "Failed to write jitted method info in log: write failure."; } } else { LOG(WARNING) << "Failed to write jitted method info in log: log file doesn't exist."; } } void JitLogger::ClosePerfMapLog() { if (perf_file_ != nullptr) { UNUSED(perf_file_->Flush()); UNUSED(perf_file_->Close()); } } // File format of jit-PID.jump: // // +--------------------------------+ // | PerfJitHeader | // +--------------------------------+ // | PerfJitCodeLoad { | . // | struct PerfJitBase; | . // | uint32_t process_id_; | . // | uint32_t thread_id_; | . // | uint64_t vma_; | . // | uint64_t code_address_; | . // | uint64_t code_size_; | . // | uint64_t code_id_; | . // | } | . // +- -+ . // | method_name'\0' | +--> one jitted method // +- -+ . // | jitted code binary | . // | ... | . // +--------------------------------+ . // | PerfJitCodeDebugInfo { | . // | struct PerfJitBase; | . // | uint64_t address_; | . // | uint64_t entry_count_; | . // | struct PerfJitDebugEntry; | . // | } | . // +--------------------------------+ // | PerfJitCodeLoad | // ... // struct PerfJitHeader { uint32_t magic_; // Characters "JiTD" uint32_t version_; // Header version uint32_t size_; // Total size of header uint32_t elf_mach_target_; // Elf mach target uint32_t reserved_; // Reserved, currently not used uint32_t process_id_; // Process ID of the JIT compiler uint64_t time_stamp_; // Timestamp when the header is generated uint64_t flags_; // Currently the flags are only used for choosing clock for timestamp, // we set it to 0 to tell perf that we use CLOCK_MONOTONIC clock. static const uint32_t kMagic = 0x4A695444; // "JiTD" static const uint32_t kVersion = 1; }; // Each record starts with such basic information: event type, total size, and timestamp. struct PerfJitBase { enum PerfJitEvent { // A jitted code load event. // In ART JIT, it is used to log a new method is jit compiled and committed to jit-code-cache. // Note that such kLoad event supports code cache GC in ART JIT. // For every kLoad event recorded in jit-PID.dump and every perf sample recorded in perf.data, // each event/sample has time stamp. In case code cache GC happens in ART JIT, and a new // jitted method is committed to the same address of a previously deleted method, // the time stamp information can help profiler to tell whether this sample belongs to the // era of the first jitted method, or does it belong to the period of the second jitted method. // JitCodeCache doesn't have to record any event on 'code delete'. kLoad = 0, // A jitted code move event, i,e. a jitted code moved from one address to another address. // It helps profiler to map samples to the right symbol even when the code is moved. // In ART JIT, this event can help log such behavior: // A jitted method is recorded in previous kLoad event, but due to some reason, // it is moved to another address in jit-code-cache. kMove = 1, // Logs debug line/column information. kDebugInfo = 2, // Logs JIT VM end of life event. kClose = 3 }; uint32_t event_; // Must be one of the events defined in PerfJitEvent. uint32_t size_; // Total size of this event record. // For example, for kLoad event, size of the event record is: // sizeof(PerfJitCodeLoad) + method_name.size() + compiled code size. uint64_t time_stamp_; // Timestamp for the event. }; // Logs a jitted code load event (kLoad). // In ART JIT, it is used to log a new method is jit compiled and commited to jit-code-cache. struct PerfJitCodeLoad : PerfJitBase { uint32_t process_id_; // Process ID who performs the jit code load. // In ART JIT, it is the pid of the JIT compiler. uint32_t thread_id_; // Thread ID who performs the jit code load. // In ART JIT, it is the tid of the JIT compiler. uint64_t vma_; // Address of the code section. In ART JIT, because code_address_ // uses absolute address, this field is 0. uint64_t code_address_; // Address where is jitted code is loaded. uint64_t code_size_; // Size of the jitted code. uint64_t code_id_; // Unique ID for each jitted code. }; // This structure is for source line/column mapping. // Currently this feature is not implemented in ART JIT yet. struct PerfJitDebugEntry { uint64_t address_; // Code address which maps to the line/column in source. uint32_t line_number_; // Source line number starting at 1. uint32_t column_; // Column discriminator, default 0. const char name_[0]; // Followed by null-terminated name or \0xff\0 if same as previous. }; // Logs debug line information (kDebugInfo). // This structure is for source line/column mapping. // Currently this feature is not implemented in ART JIT yet. struct PerfJitCodeDebugInfo : PerfJitBase { uint64_t address_; // Starting code address which the debug info describes. uint64_t entry_count_; // How many instances of PerfJitDebugEntry. PerfJitDebugEntry entries_[0]; // Followed by entry_count_ instances of PerfJitDebugEntry. }; static uint32_t GetElfMach() { #if defined(__arm__) static const uint32_t kElfMachARM = 0x28; return kElfMachARM; #elif defined(__aarch64__) static const uint32_t kElfMachARM64 = 0xB7; return kElfMachARM64; #elif defined(__i386__) static const uint32_t kElfMachIA32 = 0x3; return kElfMachIA32; #elif defined(__x86_64__) static const uint32_t kElfMachX64 = 0x3E; return kElfMachX64; #else UNIMPLEMENTED(WARNING) << "Unsupported architecture in JitLogger"; return 0; #endif } void JitLogger::OpenMarkerFile() { int fd = jit_dump_file_->Fd(); // The 'perf inject' tool requires that the jit-PID.dump file // must have a mmap(PROT_READ|PROT_EXEC) record in perf.data. marker_address_ = mmap(nullptr, kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); if (marker_address_ == MAP_FAILED) { LOG(WARNING) << "Failed to create record in perf.data. JITed code profiling will not work."; return; } } void JitLogger::CloseMarkerFile() { if (marker_address_ != nullptr) { munmap(marker_address_, kPageSize); } } void JitLogger::WriteJitDumpDebugInfo() { // In the future, we can add java source file line/column mapping here. } void JitLogger::WriteJitDumpHeader() { PerfJitHeader header; std::memset(&header, 0, sizeof(header)); header.magic_ = PerfJitHeader::kMagic; header.version_ = PerfJitHeader::kVersion; header.size_ = sizeof(header); header.elf_mach_target_ = GetElfMach(); header.process_id_ = static_cast(getpid()); header.time_stamp_ = art::NanoTime(); // CLOCK_MONOTONIC clock is required. header.flags_ = 0; bool res = jit_dump_file_->WriteFully(reinterpret_cast(&header), sizeof(header)); if (!res) { LOG(WARNING) << "Failed to write profiling log. The 'perf inject' tool will not work."; } } void JitLogger::OpenJitDumpLog() { std::string pid_str = std::to_string(getpid()); std::string jitdump_filename = std::string(kLogPrefix) + "/jit-" + pid_str + ".dump"; jit_dump_file_.reset(OS::CreateEmptyFile(jitdump_filename.c_str())); if (jit_dump_file_ == nullptr) { LOG(ERROR) << "Could not create jit dump file at " << jitdump_filename << " Are you on a user build? Perf only works on userdebug/eng builds"; return; } OpenMarkerFile(); // Continue to write jit-PID.dump file even above OpenMarkerFile() fails. // Even if that means 'perf inject' tool cannot work, developers can still use other tools // to map the samples in perf.data to the information (symbol,address,code) recorded // in the jit-PID.dump file, and still proceed the jitted code analysis. WriteJitDumpHeader(); } void JitLogger::WriteJitDumpLog(const void* ptr, size_t code_size, ArtMethod* method) { if (jit_dump_file_ != nullptr) { std::string method_name = method->PrettyMethod(); PerfJitCodeLoad jit_code; std::memset(&jit_code, 0, sizeof(jit_code)); jit_code.event_ = PerfJitCodeLoad::kLoad; jit_code.size_ = sizeof(jit_code) + method_name.size() + 1 + code_size; jit_code.time_stamp_ = art::NanoTime(); // CLOCK_MONOTONIC clock is required. jit_code.process_id_ = static_cast(getpid()); jit_code.thread_id_ = static_cast(art::GetTid()); jit_code.vma_ = 0x0; jit_code.code_address_ = reinterpret_cast(ptr); jit_code.code_size_ = code_size; jit_code.code_id_ = code_index_++; // Write one complete jitted method info, including: // - PerfJitCodeLoad structure // - Method name // - Complete generated code of this method // // Use UNUSED() here to avoid compiler warnings. UNUSED(jit_dump_file_->WriteFully(reinterpret_cast(&jit_code), sizeof(jit_code))); UNUSED(jit_dump_file_->WriteFully(method_name.c_str(), method_name.size() + 1)); UNUSED(jit_dump_file_->WriteFully(ptr, code_size)); WriteJitDumpDebugInfo(); } } void JitLogger::CloseJitDumpLog() { if (jit_dump_file_ != nullptr) { CloseMarkerFile(); UNUSED(jit_dump_file_->Flush()); UNUSED(jit_dump_file_->Close()); } } } // namespace jit } // namespace art android-platform-art-8.1.0+r23/compiler/jit/jit_logger.h000066400000000000000000000123761336577252300230740ustar00rootroot00000000000000/* * Copyright 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JIT_JIT_LOGGER_H_ #define ART_COMPILER_JIT_JIT_LOGGER_H_ #include "base/mutex.h" #include "compiled_method.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" namespace art { class ArtMethod; namespace jit { // // JitLogger supports two approaches of perf profiling. // // (1) perf-map: // The perf-map mechanism generates perf-PID.map file, // which provides simple "address, size, method_name" information to perf, // and allows perf to map samples in jit-code-cache to jitted method symbols. // // Command line Example: // $ perf record dalvikvm -Xcompiler-option --generate-debug-info -cp Test // $ perf report // NOTE: // - Make sure that the perf-PID.map file is available for 'perf report' tool to access, // so that jitted method can be displayed. // // // (2) perf-inject: // The perf-inject mechansim generates jit-PID.dump file, // which provides rich informations about a jitted method. // It allows perf or other profiling tools to do advanced analysis on jitted code, // for example instruction level profiling. // // Command line Example: // $ perf record -k mono dalvikvm -Xcompiler-option --generate-debug-info -cp Test // $ perf inject -i perf.data -o perf.data.jitted // $ perf report -i perf.data.jitted // $ perf annotate -i perf.data.jitted // NOTE: // REQUIREMENTS // - The 'perf record -k mono' option requires 4.1 (or higher) Linux kernel. // - The 'perf inject' (generating jit ELF files feature) requires perf 4.6 (or higher). // PERF RECORD // - The '-k mono' option tells 'perf record' to use CLOCK_MONOTONIC clock during sampling; // which is required by 'perf inject', to make sure that both perf.data and jit-PID.dump // have unified clock source for timestamps. // PERF INJECT // - The 'perf inject' tool injects information from jit-PID.dump into perf.data file, // and generates small ELF files (jitted-TID-CODEID.so) for each jitted method. // - On Android devices, the jit-PID.dump file is generated in /data/misc/trace/ folder, and // such location is recorded in perf.data file. // The 'perf inject' tool is going to look for jit-PID.dump and generates small ELF files in // this /data/misc/trace/ folder. // Make sure that you have the read/write access to /data/misc/trace/ folder. // - On non-Android devices, the jit-PID.dump file is generated in /tmp/ folder, and // 'perf inject' tool operates on this folder. // Make sure that you have the read/write access to /tmp/ folder. // - If you are executing 'perf inject' on non-Android devices (host), but perf.data and // jit-PID.dump files are adb-pulled from Android devices, make sure that there is a // /data/misc/trace/ folder on host, and jit-PID.dump file is copied to this folder. // - Currently 'perf inject' doesn't provide option to change the path for jit-PID.dump and // generated ELF files. // PERF ANNOTATE // - The 'perf annotate' tool displays assembly level profiling report. // Source code can also be displayed if the ELF file has debug symbols. // - Make sure above small ELF files are available for 'perf annotate' tool to access, // so that jitted code can be displayed in assembly view. // class JitLogger { public: JitLogger() : code_index_(0), marker_address_(nullptr) {} void OpenLog() { OpenPerfMapLog(); OpenJitDumpLog(); } void WriteLog(const void* ptr, size_t code_size, ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { WritePerfMapLog(ptr, code_size, method); WriteJitDumpLog(ptr, code_size, method); } void CloseLog() { ClosePerfMapLog(); CloseJitDumpLog(); } private: // For perf-map profiling void OpenPerfMapLog(); void WritePerfMapLog(const void* ptr, size_t code_size, ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); void ClosePerfMapLog(); // For perf-inject profiling void OpenJitDumpLog(); void WriteJitDumpLog(const void* ptr, size_t code_size, ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_); void CloseJitDumpLog(); void OpenMarkerFile(); void CloseMarkerFile(); void WriteJitDumpHeader(); void WriteJitDumpDebugInfo(); std::unique_ptr perf_file_; std::unique_ptr jit_dump_file_; uint64_t code_index_; void* marker_address_; DISALLOW_COPY_AND_ASSIGN(JitLogger); }; } // namespace jit } // namespace art #endif // ART_COMPILER_JIT_JIT_LOGGER_H_ android-platform-art-8.1.0+r23/compiler/jni/000077500000000000000000000000001336577252300205575ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/jni_cfi_test.cc000066400000000000000000000114041336577252300235260ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "arch/instruction_set.h" #include "base/arena_allocator.h" #include "base/enums.h" #include "cfi_test.h" #include "gtest/gtest.h" #include "jni/quick/calling_convention.h" #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" #include "jni/jni_cfi_test_expected.inc" namespace art { // Run the tests only on host. #ifndef ART_TARGET_ANDROID class JNICFITest : public CFITest { public: // Enable this flag to generate the expected outputs. static constexpr bool kGenerateExpected = false; void TestImpl(InstructionSet isa, const char* isa_str, const std::vector& expected_asm, const std::vector& expected_cfi) { if (Is64BitInstructionSet(isa)) { TestImplSized(isa, isa_str, expected_asm, expected_cfi); } else { TestImplSized(isa, isa_str, expected_asm, expected_cfi); } } private: template void TestImplSized(InstructionSet isa, const char* isa_str, const std::vector& expected_asm, const std::vector& expected_cfi) { // Description of simple method. const bool is_static = true; const bool is_synchronized = false; const char* shorty = "IIFII"; ArenaPool pool; ArenaAllocator arena(&pool); std::unique_ptr jni_conv( JniCallingConvention::Create(&arena, is_static, is_synchronized, /*is_critical_native*/false, shorty, isa)); std::unique_ptr mr_conv( ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, isa)); const int frame_size(jni_conv->FrameSize()); ArrayRef callee_save_regs = jni_conv->CalleeSaveRegisters(); // Assemble the method. std::unique_ptr> jni_asm( JNIMacroAssembler::Create(&arena, isa)); jni_asm->cfi().SetEnabled(true); jni_asm->BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills()); jni_asm->IncreaseFrameSize(32); jni_asm->DecreaseFrameSize(32); jni_asm->RemoveFrame(frame_size, callee_save_regs); jni_asm->FinalizeCode(); std::vector actual_asm(jni_asm->CodeSize()); MemoryRegion code(&actual_asm[0], actual_asm.size()); jni_asm->FinalizeInstructions(code); ASSERT_EQ(jni_asm->cfi().GetCurrentCFAOffset(), frame_size); const std::vector& actual_cfi = *(jni_asm->cfi().data()); if (kGenerateExpected) { GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi); } else { EXPECT_EQ(expected_asm, actual_asm); EXPECT_EQ(expected_cfi, actual_cfi); } } }; #define TEST_ISA(isa) \ TEST_F(JNICFITest, isa) { \ std::vector expected_asm(expected_asm_##isa, \ expected_asm_##isa + arraysize(expected_asm_##isa)); \ std::vector expected_cfi(expected_cfi_##isa, \ expected_cfi_##isa + arraysize(expected_cfi_##isa)); \ TestImpl(isa, #isa, expected_asm, expected_cfi); \ } #ifdef ART_ENABLE_CODEGEN_arm // Run the tests for ARM only with Baker read barriers, as the // expected generated code contains a Marking Register refresh // instruction. #if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) TEST_ISA(kThumb2) #endif #endif #ifdef ART_ENABLE_CODEGEN_arm64 // Run the tests for ARM64 only with Baker read barriers, as the // expected generated code contains a Marking Register refresh // instruction. #if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) TEST_ISA(kArm64) #endif #endif #ifdef ART_ENABLE_CODEGEN_x86 TEST_ISA(kX86) #endif #ifdef ART_ENABLE_CODEGEN_x86_64 TEST_ISA(kX86_64) #endif #ifdef ART_ENABLE_CODEGEN_mips TEST_ISA(kMips) #endif #ifdef ART_ENABLE_CODEGEN_mips64 TEST_ISA(kMips64) #endif #endif // ART_TARGET_ANDROID } // namespace art android-platform-art-8.1.0+r23/compiler/jni/jni_cfi_test_expected.inc000066400000000000000000000473651336577252300256120ustar00rootroot00000000000000static constexpr uint8_t expected_asm_kThumb2[] = { 0x2D, 0xE9, 0xE0, 0x4D, 0x2D, 0xED, 0x10, 0x8A, 0x89, 0xB0, 0x00, 0x90, 0x21, 0x91, 0x8D, 0xED, 0x22, 0x0A, 0x23, 0x92, 0x24, 0x93, 0x88, 0xB0, 0x08, 0xB0, 0x09, 0xB0, 0xBD, 0xEC, 0x10, 0x8A, 0xBD, 0xE8, 0xE0, 0x4D, 0xD9, 0xF8, 0x34, 0x80, 0x70, 0x47, }; static constexpr uint8_t expected_cfi_kThumb2[] = { 0x44, 0x0E, 0x1C, 0x85, 0x07, 0x86, 0x06, 0x87, 0x05, 0x88, 0x04, 0x8A, 0x03, 0x8B, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x5C, 0x05, 0x50, 0x17, 0x05, 0x51, 0x16, 0x05, 0x52, 0x15, 0x05, 0x53, 0x14, 0x05, 0x54, 0x13, 0x05, 0x55, 0x12, 0x05, 0x56, 0x11, 0x05, 0x57, 0x10, 0x05, 0x58, 0x0F, 0x05, 0x59, 0x0E, 0x05, 0x5A, 0x0D, 0x05, 0x5B, 0x0C, 0x05, 0x5C, 0x0B, 0x05, 0x5D, 0x0A, 0x05, 0x5E, 0x09, 0x05, 0x5F, 0x08, 0x42, 0x0E, 0x80, 0x01, 0x4E, 0x0E, 0xA0, 0x01, 0x42, 0x0E, 0x80, 0x01, 0x0A, 0x42, 0x0E, 0x5C, 0x44, 0x0E, 0x1C, 0x06, 0x50, 0x06, 0x51, 0x06, 0x52, 0x06, 0x53, 0x06, 0x54, 0x06, 0x55, 0x06, 0x56, 0x06, 0x57, 0x06, 0x58, 0x06, 0x59, 0x06, 0x5A, 0x06, 0x5B, 0x06, 0x5C, 0x06, 0x5D, 0x06, 0x5E, 0x06, 0x5F, 0x4A, 0x0B, 0x0E, 0x80, 0x01, }; // 0x00000000: push {r5,r6,r7,r8,r10,r11,lr} // 0x00000004: .cfi_def_cfa_offset: 28 // 0x00000004: .cfi_offset: r5 at cfa-28 // 0x00000004: .cfi_offset: r6 at cfa-24 // 0x00000004: .cfi_offset: r7 at cfa-20 // 0x00000004: .cfi_offset: r8 at cfa-16 // 0x00000004: .cfi_offset: r10 at cfa-12 // 0x00000004: .cfi_offset: r11 at cfa-8 // 0x00000004: .cfi_offset: r14 at cfa-4 // 0x00000004: vpush {s16-s31} // 0x00000008: .cfi_def_cfa_offset: 92 // 0x00000008: .cfi_offset_extended: r80 at cfa-92 // 0x00000008: .cfi_offset_extended: r81 at cfa-88 // 0x00000008: .cfi_offset_extended: r82 at cfa-84 // 0x00000008: .cfi_offset_extended: r83 at cfa-80 // 0x00000008: .cfi_offset_extended: r84 at cfa-76 // 0x00000008: .cfi_offset_extended: r85 at cfa-72 // 0x00000008: .cfi_offset_extended: r86 at cfa-68 // 0x00000008: .cfi_offset_extended: r87 at cfa-64 // 0x00000008: .cfi_offset_extended: r88 at cfa-60 // 0x00000008: .cfi_offset_extended: r89 at cfa-56 // 0x00000008: .cfi_offset_extended: r90 at cfa-52 // 0x00000008: .cfi_offset_extended: r91 at cfa-48 // 0x00000008: .cfi_offset_extended: r92 at cfa-44 // 0x00000008: .cfi_offset_extended: r93 at cfa-40 // 0x00000008: .cfi_offset_extended: r94 at cfa-36 // 0x00000008: .cfi_offset_extended: r95 at cfa-32 // 0x00000008: sub sp, #36 // 0x0000000a: .cfi_def_cfa_offset: 128 // 0x0000000a: str r0, [sp] // 0x0000000c: str r1, [sp, #132] // 0x0000000e: vstr s0, [sp, #136] // 0x00000012: str r2, [sp, #140] // 0x00000014: str r3, [sp, #144] // 0x00000016: sub sp, #32 // 0x00000018: .cfi_def_cfa_offset: 160 // 0x00000018: add sp, #32 // 0x0000001a: .cfi_def_cfa_offset: 128 // 0x0000001a: .cfi_remember_state // 0x0000001a: add sp, #36 // 0x0000001c: .cfi_def_cfa_offset: 92 // 0x0000001c: vpop {s16-s31} // 0x00000020: .cfi_def_cfa_offset: 28 // 0x00000020: .cfi_restore_extended: r80 // 0x00000020: .cfi_restore_extended: r81 // 0x00000020: .cfi_restore_extended: r82 // 0x00000020: .cfi_restore_extended: r83 // 0x00000020: .cfi_restore_extended: r84 // 0x00000020: .cfi_restore_extended: r85 // 0x00000020: .cfi_restore_extended: r86 // 0x00000020: .cfi_restore_extended: r87 // 0x00000020: .cfi_restore_extended: r88 // 0x00000020: .cfi_restore_extended: r89 // 0x00000020: .cfi_restore_extended: r90 // 0x00000020: .cfi_restore_extended: r91 // 0x00000020: .cfi_restore_extended: r92 // 0x00000020: .cfi_restore_extended: r93 // 0x00000020: .cfi_restore_extended: r94 // 0x00000020: .cfi_restore_extended: r95 // 0x00000020: pop {r5,r6,r7,r8,r10,r11,lr} // 0x00000024: ldr r8, [tr, #52] ; is_gc_marking // 0x00000028: bx lr // 0x0000002a: .cfi_restore_state // 0x0000002a: .cfi_def_cfa_offset: 128 static constexpr uint8_t expected_asm_kArm64[] = { 0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9, 0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9, 0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D, 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xE0, 0x03, 0x00, 0xF9, 0xE1, 0xCB, 0x00, 0xB9, 0xE0, 0xCF, 0x00, 0xBD, 0xE2, 0xD3, 0x00, 0xB9, 0xE3, 0xD7, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91, 0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, 0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, 0xE8, 0x27, 0x42, 0x6D, 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D, 0xEE, 0x3F, 0x45, 0x6D, 0x74, 0x36, 0x40, 0xB9, 0xFF, 0x03, 0x03, 0x91, 0xC0, 0x03, 0x5F, 0xD6, }; static constexpr uint8_t expected_cfi_kArm64[] = { 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14, 0x96, 0x12, 0x44, 0x97, 0x10, 0x98, 0x0E, 0x44, 0x99, 0x0C, 0x9A, 0x0A, 0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05, 0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22, 0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05, 0x4F, 0x1A, 0x58, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x44, 0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA, 0x44, 0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0x06, 0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E, 0x06, 0x4F, 0x48, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01, }; // 0x00000000: sub sp, sp, #0xc0 (192) // 0x00000004: .cfi_def_cfa_offset: 192 // 0x00000004: stp tr, x20, [sp, #96] // 0x00000008: .cfi_offset: r19 at cfa-96 // 0x00000008: .cfi_offset: r20 at cfa-88 // 0x00000008: stp x21, x22, [sp, #112] // 0x0000000c: .cfi_offset: r21 at cfa-80 // 0x0000000c: .cfi_offset: r22 at cfa-72 // 0x0000000c: stp x23, x24, [sp, #128] // 0x00000010: .cfi_offset: r23 at cfa-64 // 0x00000010: .cfi_offset: r24 at cfa-56 // 0x00000010: stp x25, x26, [sp, #144] // 0x00000014: .cfi_offset: r25 at cfa-48 // 0x00000014: .cfi_offset: r26 at cfa-40 // 0x00000014: stp x27, x28, [sp, #160] // 0x00000018: .cfi_offset: r27 at cfa-32 // 0x00000018: .cfi_offset: r28 at cfa-24 // 0x00000018: stp x29, lr, [sp, #176] // 0x0000001c: .cfi_offset: r29 at cfa-16 // 0x0000001c: .cfi_offset: r30 at cfa-8 // 0x0000001c: stp d8, d9, [sp, #32] // 0x00000020: .cfi_offset_extended: r72 at cfa-160 // 0x00000020: .cfi_offset_extended: r73 at cfa-152 // 0x00000020: stp d10, d11, [sp, #48] // 0x00000024: .cfi_offset_extended: r74 at cfa-144 // 0x00000024: .cfi_offset_extended: r75 at cfa-136 // 0x00000024: stp d12, d13, [sp, #64] // 0x00000028: .cfi_offset_extended: r76 at cfa-128 // 0x00000028: .cfi_offset_extended: r77 at cfa-120 // 0x00000028: stp d14, d15, [sp, #80] // 0x0000002c: .cfi_offset_extended: r78 at cfa-112 // 0x0000002c: .cfi_offset_extended: r79 at cfa-104 // 0x0000002c: str x0, [sp] // 0x00000030: str w1, [sp, #200] // 0x00000034: str s0, [sp, #204] // 0x00000038: str w2, [sp, #208] // 0x0000003c: str w3, [sp, #212] // 0x00000040: sub sp, sp, #0x20 (32) // 0x00000044: .cfi_def_cfa_offset: 224 // 0x00000044: add sp, sp, #0x20 (32) // 0x00000048: .cfi_def_cfa_offset: 192 // 0x00000048: .cfi_remember_state // 0x00000048: ldp tr, x20, [sp, #96] // 0x0000004c: .cfi_restore: r19 // 0x0000004c: .cfi_restore: r20 // 0x0000004c: ldp x21, x22, [sp, #112] // 0x00000050: .cfi_restore: r21 // 0x00000050: .cfi_restore: r22 // 0x00000050: ldp x23, x24, [sp, #128] // 0x00000054: .cfi_restore: r23 // 0x00000054: .cfi_restore: r24 // 0x00000054: ldp x25, x26, [sp, #144] // 0x00000058: .cfi_restore: r25 // 0x00000058: .cfi_restore: r26 // 0x00000058: ldp x27, x28, [sp, #160] // 0x0000005c: .cfi_restore: r27 // 0x0000005c: .cfi_restore: r28 // 0x0000005c: ldp x29, lr, [sp, #176] // 0x00000060: .cfi_restore: r29 // 0x00000060: .cfi_restore: r30 // 0x00000060: ldp d8, d9, [sp, #32] // 0x00000064: .cfi_restore_extended: r72 // 0x00000064: .cfi_restore_extended: r73 // 0x00000064: ldp d10, d11, [sp, #48] // 0x00000068: .cfi_restore_extended: r74 // 0x00000068: .cfi_restore_extended: r75 // 0x00000068: ldp d12, d13, [sp, #64] // 0x0000006c: .cfi_restore_extended: r76 // 0x0000006c: .cfi_restore_extended: r77 // 0x0000006c: ldp d14, d15, [sp, #80] // 0x00000070: .cfi_restore_extended: r78 // 0x00000070: .cfi_restore_extended: r79 // 0x00000070: ldr w20, [tr, #52] ; is_gc_marking // 0x00000074: add sp, sp, #0xc0 (192) // 0x00000078: .cfi_def_cfa_offset: 0 // 0x00000078: ret // 0x0000007c: .cfi_restore_state // 0x0000007c: .cfi_def_cfa_offset: 192 static constexpr uint8_t expected_asm_kX86[] = { 0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3, 0x0F, 0x11, 0x44, 0x24, 0x38, 0x89, 0x54, 0x24, 0x3C, 0x89, 0x5C, 0x24, 0x40, 0x83, 0xC4, 0xE0, 0x83, 0xC4, 0x20, 0x83, 0xC4, 0x20, 0x5D, 0x5E, 0x5F, 0xC3, }; static constexpr uint8_t expected_cfi_kX86[] = { 0x41, 0x0E, 0x08, 0x87, 0x02, 0x41, 0x0E, 0x0C, 0x86, 0x03, 0x41, 0x0E, 0x10, 0x85, 0x04, 0x43, 0x0E, 0x2C, 0x41, 0x0E, 0x30, 0x55, 0x0E, 0x50, 0x43, 0x0E, 0x30, 0x0A, 0x43, 0x0E, 0x10, 0x41, 0x0E, 0x0C, 0xC5, 0x41, 0x0E, 0x08, 0xC6, 0x41, 0x0E, 0x04, 0xC7, 0x41, 0x0B, 0x0E, 0x30, }; // 0x00000000: push edi // 0x00000001: .cfi_def_cfa_offset: 8 // 0x00000001: .cfi_offset: r7 at cfa-8 // 0x00000001: push esi // 0x00000002: .cfi_def_cfa_offset: 12 // 0x00000002: .cfi_offset: r6 at cfa-12 // 0x00000002: push ebp // 0x00000003: .cfi_def_cfa_offset: 16 // 0x00000003: .cfi_offset: r5 at cfa-16 // 0x00000003: add esp, -28 // 0x00000006: .cfi_def_cfa_offset: 44 // 0x00000006: push eax // 0x00000007: .cfi_def_cfa_offset: 48 // 0x00000007: mov [esp + 52], ecx // 0x0000000b: movss [esp + 56], xmm0 // 0x00000011: mov [esp + 60], edx // 0x00000015: mov [esp + 64], ebx // 0x00000019: add esp, -32 // 0x0000001c: .cfi_def_cfa_offset: 80 // 0x0000001c: add esp, 32 // 0x0000001f: .cfi_def_cfa_offset: 48 // 0x0000001f: .cfi_remember_state // 0x0000001f: add esp, 32 // 0x00000022: .cfi_def_cfa_offset: 16 // 0x00000022: pop ebp // 0x00000023: .cfi_def_cfa_offset: 12 // 0x00000023: .cfi_restore: r5 // 0x00000023: pop esi // 0x00000024: .cfi_def_cfa_offset: 8 // 0x00000024: .cfi_restore: r6 // 0x00000024: pop edi // 0x00000025: .cfi_def_cfa_offset: 4 // 0x00000025: .cfi_restore: r7 // 0x00000025: ret // 0x00000026: .cfi_restore_state // 0x00000026: .cfi_def_cfa_offset: 48 static constexpr uint8_t expected_asm_kX86_64[] = { 0x41, 0x57, 0x41, 0x56, 0x41, 0x55, 0x41, 0x54, 0x55, 0x53, 0x48, 0x83, 0xEC, 0x48, 0xF2, 0x44, 0x0F, 0x11, 0x7C, 0x24, 0x40, 0xF2, 0x44, 0x0F, 0x11, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x28, 0x48, 0x89, 0x3C, 0x24, 0x89, 0xB4, 0x24, 0x88, 0x00, 0x00, 0x00, 0xF3, 0x0F, 0x11, 0x84, 0x24, 0x8C, 0x00, 0x00, 0x00, 0x89, 0x94, 0x24, 0x90, 0x00, 0x00, 0x00, 0x89, 0x8C, 0x24, 0x94, 0x00, 0x00, 0x00, 0x48, 0x83, 0xC4, 0xE0, 0x48, 0x83, 0xC4, 0x20, 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x28, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24, 0x30, 0xF2, 0x44, 0x0F, 0x10, 0x74, 0x24, 0x38, 0xF2, 0x44, 0x0F, 0x10, 0x7C, 0x24, 0x40, 0x48, 0x83, 0xC4, 0x48, 0x5B, 0x5D, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, 0x5F, 0xC3, }; static constexpr uint8_t expected_cfi_kX86_64[] = { 0x42, 0x0E, 0x10, 0x8F, 0x04, 0x42, 0x0E, 0x18, 0x8E, 0x06, 0x42, 0x0E, 0x20, 0x8D, 0x08, 0x42, 0x0E, 0x28, 0x8C, 0x0A, 0x41, 0x0E, 0x30, 0x86, 0x0C, 0x41, 0x0E, 0x38, 0x83, 0x0E, 0x44, 0x0E, 0x80, 0x01, 0x47, 0xA0, 0x10, 0x47, 0x9F, 0x12, 0x47, 0x9E, 0x14, 0x47, 0x9D, 0x16, 0x66, 0x0E, 0xA0, 0x01, 0x44, 0x0E, 0x80, 0x01, 0x0A, 0x47, 0xDD, 0x47, 0xDE, 0x47, 0xDF, 0x47, 0xE0, 0x44, 0x0E, 0x38, 0x41, 0x0E, 0x30, 0xC3, 0x41, 0x0E, 0x28, 0xC6, 0x42, 0x0E, 0x20, 0xCC, 0x42, 0x0E, 0x18, 0xCD, 0x42, 0x0E, 0x10, 0xCE, 0x42, 0x0E, 0x08, 0xCF, 0x41, 0x0B, 0x0E, 0x80, 0x01, }; // 0x00000000: push r15 // 0x00000002: .cfi_def_cfa_offset: 16 // 0x00000002: .cfi_offset: r15 at cfa-16 // 0x00000002: push r14 // 0x00000004: .cfi_def_cfa_offset: 24 // 0x00000004: .cfi_offset: r14 at cfa-24 // 0x00000004: push r13 // 0x00000006: .cfi_def_cfa_offset: 32 // 0x00000006: .cfi_offset: r13 at cfa-32 // 0x00000006: push r12 // 0x00000008: .cfi_def_cfa_offset: 40 // 0x00000008: .cfi_offset: r12 at cfa-40 // 0x00000008: push rbp // 0x00000009: .cfi_def_cfa_offset: 48 // 0x00000009: .cfi_offset: r6 at cfa-48 // 0x00000009: push rbx // 0x0000000a: .cfi_def_cfa_offset: 56 // 0x0000000a: .cfi_offset: r3 at cfa-56 // 0x0000000a: subq rsp, 72 // 0x0000000e: .cfi_def_cfa_offset: 128 // 0x0000000e: movsd [rsp + 64], xmm15 // 0x00000015: .cfi_offset: r32 at cfa-64 // 0x00000015: movsd [rsp + 56], xmm14 // 0x0000001c: .cfi_offset: r31 at cfa-72 // 0x0000001c: movsd [rsp + 48], xmm13 // 0x00000023: .cfi_offset: r30 at cfa-80 // 0x00000023: movsd [rsp + 40], xmm12 // 0x0000002a: .cfi_offset: r29 at cfa-88 // 0x0000002a: movq [rsp], rdi // 0x0000002e: mov [rsp + 136], esi // 0x00000035: movss [rsp + 140], xmm0 // 0x0000003e: mov [rsp + 144], edx // 0x00000045: mov [rsp + 148], ecx // 0x0000004c: addq rsp, -32 // 0x00000050: .cfi_def_cfa_offset: 160 // 0x00000050: addq rsp, 32 // 0x00000054: .cfi_def_cfa_offset: 128 // 0x00000054: .cfi_remember_state // 0x00000054: movsd xmm12, [rsp + 40] // 0x0000005b: .cfi_restore: r29 // 0x0000005b: movsd xmm13, [rsp + 48] // 0x00000062: .cfi_restore: r30 // 0x00000062: movsd xmm14, [rsp + 56] // 0x00000069: .cfi_restore: r31 // 0x00000069: movsd xmm15, [rsp + 64] // 0x00000070: .cfi_restore: r32 // 0x00000070: addq rsp, 72 // 0x00000074: .cfi_def_cfa_offset: 56 // 0x00000074: pop rbx // 0x00000075: .cfi_def_cfa_offset: 48 // 0x00000075: .cfi_restore: r3 // 0x00000075: pop rbp // 0x00000076: .cfi_def_cfa_offset: 40 // 0x00000076: .cfi_restore: r6 // 0x00000076: pop r12 // 0x00000078: .cfi_def_cfa_offset: 32 // 0x00000078: .cfi_restore: r12 // 0x00000078: pop r13 // 0x0000007a: .cfi_def_cfa_offset: 24 // 0x0000007a: .cfi_restore: r13 // 0x0000007a: pop r14 // 0x0000007c: .cfi_def_cfa_offset: 16 // 0x0000007c: .cfi_restore: r14 // 0x0000007c: pop r15 // 0x0000007e: .cfi_def_cfa_offset: 8 // 0x0000007e: .cfi_restore: r15 // 0x0000007e: ret // 0x0000007f: .cfi_restore_state // 0x0000007f: .cfi_def_cfa_offset: 128 static constexpr uint8_t expected_asm_kMips[] = { 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xBE, 0xAF, 0x34, 0x00, 0xB7, 0xAF, 0x30, 0x00, 0xB6, 0xAF, 0x2C, 0x00, 0xB5, 0xAF, 0x28, 0x00, 0xB4, 0xAF, 0x24, 0x00, 0xB3, 0xAF, 0x20, 0x00, 0xB2, 0xAF, 0x00, 0x00, 0xA4, 0xAF, 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xA8, 0xE7, 0x4C, 0x00, 0xA6, 0xAF, 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27, 0x20, 0x00, 0xBD, 0x27, 0x20, 0x00, 0xB2, 0x8F, 0x24, 0x00, 0xB3, 0x8F, 0x28, 0x00, 0xB4, 0x8F, 0x2C, 0x00, 0xB5, 0x8F, 0x30, 0x00, 0xB6, 0x8F, 0x34, 0x00, 0xB7, 0x8F, 0x38, 0x00, 0xBE, 0x8F, 0x3C, 0x00, 0xBF, 0x8F, 0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27, }; static constexpr uint8_t expected_cfi_kMips[] = { 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x97, 0x03, 0x44, 0x96, 0x04, 0x44, 0x95, 0x05, 0x44, 0x94, 0x06, 0x44, 0x93, 0x07, 0x44, 0x92, 0x08, 0x58, 0x0E, 0x60, 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDE, 0x44, 0xDF, 0x48, 0x0E, 0x00, 0x0B, 0x0E, 0x40, }; // 0x00000000: addiu r29, r29, -64 // 0x00000004: .cfi_def_cfa_offset: 64 // 0x00000004: sw r31, +60(r29) // 0x00000008: .cfi_offset: r31 at cfa-4 // 0x00000008: sw r30, +56(r29) // 0x0000000c: .cfi_offset: r30 at cfa-8 // 0x0000000c: sw r23, +52(r29) // 0x00000010: .cfi_offset: r23 at cfa-12 // 0x00000010: sw r22, +48(r29) // 0x00000014: .cfi_offset: r22 at cfa-16 // 0x00000014: sw r21, +44(r29) // 0x00000018: .cfi_offset: r21 at cfa-20 // 0x00000018: sw r20, +40(r29) // 0x0000001c: .cfi_offset: r20 at cfa-24 // 0x0000001c: sw r19, +36(r29) // 0x00000020: .cfi_offset: r19 at cfa-28 // 0x00000020: sw r18, +32(r29) // 0x00000024: .cfi_offset: r18 at cfa-32 // 0x00000024: sw r4, +0(r29) // 0x00000028: sw r5, +68(r29) // 0x0000002c: swc1 f8, +72(r29) // 0x00000030: sw r6, +76(r29) // 0x00000034: sw r7, +80(r29) // 0x00000038: addiu r29, r29, -32 // 0x0000003c: .cfi_def_cfa_offset: 96 // 0x0000003c: addiu r29, r29, 32 // 0x00000040: .cfi_def_cfa_offset: 64 // 0x00000040: .cfi_remember_state // 0x00000040: lw r18, +32(r29) // 0x00000044: .cfi_restore: r18 // 0x00000044: lw r19, +36(r29) // 0x00000048: .cfi_restore: r19 // 0x00000048: lw r20, +40(r29) // 0x0000004c: .cfi_restore: r20 // 0x0000004c: lw r21, +44(r29) // 0x00000050: .cfi_restore: r21 // 0x00000050: lw r22, +48(r29) // 0x00000054: .cfi_restore: r22 // 0x00000054: lw r23, +52(r29) // 0x00000058: .cfi_restore: r23 // 0x00000058: lw r30, +56(r29) // 0x0000005c: .cfi_restore: r30 // 0x0000005c: lw r31, +60(r29) // 0x00000060: .cfi_restore: r31 // 0x00000060: jr r31 // 0x00000064: addiu r29, r29, 64 // 0x00000068: .cfi_def_cfa_offset: 0 // 0x00000068: .cfi_restore_state // 0x00000068: .cfi_def_cfa_offset: 64 static constexpr uint8_t expected_asm_kMips64[] = { 0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF, 0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF, 0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF, 0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF, 0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF, 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF, 0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF, 0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF, 0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00, }; static constexpr uint8_t expected_cfi_kMips64[] = { 0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06, 0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E, 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E, 0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x70, }; // 0x00000000: daddiu r29, r29, -112 // 0x00000004: .cfi_def_cfa_offset: 112 // 0x00000004: sd r31, +104(r29) // 0x00000008: .cfi_offset: r31 at cfa-8 // 0x00000008: sd r30, +96(r29) // 0x0000000c: .cfi_offset: r30 at cfa-16 // 0x0000000c: sd r28, +88(r29) // 0x00000010: .cfi_offset: r28 at cfa-24 // 0x00000010: sd r23, +80(r29) // 0x00000014: .cfi_offset: r23 at cfa-32 // 0x00000014: sd r22, +72(r29) // 0x00000018: .cfi_offset: r22 at cfa-40 // 0x00000018: sd r21, +64(r29) // 0x0000001c: .cfi_offset: r21 at cfa-48 // 0x0000001c: sd r20, +56(r29) // 0x00000020: .cfi_offset: r20 at cfa-56 // 0x00000020: sd r19, +48(r29) // 0x00000024: .cfi_offset: r19 at cfa-64 // 0x00000024: sd r18, +40(r29) // 0x00000028: .cfi_offset: r18 at cfa-72 // 0x00000028: sd r4, +0(r29) // 0x0000002c: sw r5, +120(r29) // 0x00000030: swc1 f14, +124(r29) // 0x00000034: sw r7, +128(r29) // 0x00000038: sw r8, +132(r29) // 0x0000003c: daddiu r29, r29, -32 // 0x00000040: .cfi_def_cfa_offset: 144 // 0x00000040: daddiu r29, r29, 32 // 0x00000044: .cfi_def_cfa_offset: 112 // 0x00000044: .cfi_remember_state // 0x00000044: ld r18, +40(r29) // 0x00000048: .cfi_restore: r18 // 0x00000048: ld r19, +48(r29) // 0x0000004c: .cfi_restore: r19 // 0x0000004c: ld r20, +56(r29) // 0x00000050: .cfi_restore: r20 // 0x00000050: ld r21, +64(r29) // 0x00000054: .cfi_restore: r21 // 0x00000054: ld r22, +72(r29) // 0x00000058: .cfi_restore: r22 // 0x00000058: ld r23, +80(r29) // 0x0000005c: .cfi_restore: r23 // 0x0000005c: ld r28, +88(r29) // 0x00000060: .cfi_restore: r28 // 0x00000060: ld r30, +96(r29) // 0x00000064: .cfi_restore: r30 // 0x00000064: ld r31, +104(r29) // 0x00000068: .cfi_restore: r31 // 0x00000068: daddiu r29, r29, 112 // 0x0000006c: .cfi_def_cfa_offset: 0 // 0x0000006c: jr r31 // 0x00000070: nop // 0x00000074: .cfi_restore_state // 0x00000074: .cfi_def_cfa_offset: 112 android-platform-art-8.1.0+r23/compiler/jni/jni_compiler_test.cc000066400000000000000000003162241336577252300246070ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include "art_method-inl.h" #include "base/bit_utils.h" #include "class_linker.h" #include "common_compiler_test.h" #include "compiler.h" #include "dex_file.h" #include "gtest/gtest.h" #include "indirect_reference_table.h" #include "java_vm_ext.h" #include "jni_internal.h" #include "mem_map.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "mirror/stack_trace_element.h" #include "nativehelper/ScopedLocalRef.h" #include "nativeloader/native_loader.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "thread.h" extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar(JNIEnv*, jobject, jint count) { return count + 1; } extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar(JNIEnv*, jclass, jint count) { return count + 1; } // TODO: In the Baker read barrier configuration, add checks to ensure // the Marking Register's value is correct. namespace art { enum class JniKind { kNormal = Compiler::kNone, // Regular kind of un-annotated natives. kFast = Compiler::kFastNative, // Native method annotated with @FastNative. kCritical = Compiler::kCriticalNative, // Native method annotated with @CriticalNative. kCount = Compiler::kCriticalNative + 1 // How many different types of JNIs we can have. }; // Used to initialize array sizes that want to have different state per current jni. static constexpr size_t kJniKindCount = static_cast(JniKind::kCount); // Do not use directly, use the helpers instead. uint32_t gCurrentJni = static_cast(JniKind::kNormal); // Is the current native method under test @CriticalNative? static bool IsCurrentJniCritical() { return gCurrentJni == static_cast(JniKind::kCritical); } // Is the current native method a plain-old non-annotated native? static bool IsCurrentJniNormal() { return gCurrentJni == static_cast(JniKind::kNormal); } // Signifify that a different kind of JNI is about to be tested. static void UpdateCurrentJni(JniKind kind) { gCurrentJni = static_cast(kind); } // (Match the name suffixes of native methods in MyClassNatives.java) static std::string CurrentJniStringSuffix() { switch (gCurrentJni) { case static_cast(JniKind::kNormal): { return ""; } case static_cast(JniKind::kFast): { return "_Fast"; } case static_cast(JniKind::kCritical): { return "_Critical"; } default: LOG(FATAL) << "Invalid current JNI value: " << gCurrentJni; UNREACHABLE(); } } // Dummy values passed to our JNI handlers when we enter @CriticalNative. // Normally @CriticalNative calling convention strips out the "JNIEnv*, jclass" parameters. // However to avoid duplicating every single test method we have a templated handler // that inserts dummy parameters (0,1) to make it compatible with a regular JNI handler. static JNIEnv* const kCriticalDummyJniEnv = reinterpret_cast(0xDEADFEAD); static jclass const kCriticalDummyJniClass = reinterpret_cast(0xBEAFBEEF); // Type trait. Returns true if "T" is the same type as one of the types in Args... // // Logically equal to OR(std::same_type for all U in Args). template struct is_any_of; template struct is_any_of { using value_type = bool; static constexpr const bool value = std::is_same::value || is_any_of::value; }; template struct is_any_of { using value_type = bool; static constexpr const bool value = std::is_same::value; }; // Type traits for JNI types. template struct jni_type_traits { // True if type T ends up holding an object reference. False otherwise. // (Non-JNI types will also be false). static constexpr const bool is_ref = is_any_of::value; }; template struct count_refs_helper { using value_type = size_t; static constexpr const size_t value = 0; }; template struct count_refs_helper { using value_type = size_t; static constexpr size_t value = (jni_type_traits::is_ref ? 1 : 0) + count_refs_helper::value; }; template struct count_refs_fn_helper; template struct count_refs_fn_helper : public count_refs_helper {}; // Given a function type 'T' figure out how many of the parameter types are a reference. // -- The implicit jclass and thisObject also count as 1 reference. // // Fields: // * value - the result counting # of refs // * value_type - the type of value (size_t) template struct count_refs : public count_refs_fn_helper {}; // Base case: No parameters = 0 refs. size_t count_nonnull_refs_helper() { return 0; } // SFINAE for ref types. 1 if non-null, 0 otherwise. template size_t count_nonnull_refs_single_helper(T arg, typename std::enable_if::is_ref>::type* = nullptr) { return ((arg == NULL) ? 0 : 1); } // SFINAE for non-ref-types. Always 0. template size_t count_nonnull_refs_single_helper(T arg ATTRIBUTE_UNUSED, typename std::enable_if::is_ref>::type* = nullptr) { return 0; } // Recursive case. template size_t count_nonnull_refs_helper(T arg, Args ... args) { return count_nonnull_refs_single_helper(arg) + count_nonnull_refs_helper(args...); } // Given any list of parameters, check how many object refs there are and only count // them if their runtime value is non-null. // // For example given (jobject, jint, jclass) we can get (2) if both #0/#2 are non-null, // (1) if either #0/#2 are null but not both, and (0) if all parameters are null. // Primitive parameters (including JNIEnv*, if present) are ignored. template size_t count_nonnull_refs(Args ... args) { return count_nonnull_refs_helper(args...); } template struct remove_extra_parameters_helper; template struct remove_extra_parameters_helper { // Note: Do not use Args&& here to maintain C-style parameter types. static R apply(Args... args) { JNIEnv* env = kCriticalDummyJniEnv; jclass kls = kCriticalDummyJniClass; return fn(env, kls, args...); } }; // Given a function 'fn' create a function 'apply' which will omit the JNIEnv/jklass parameters // // i.e. if fn(JNIEnv*,jklass,a,b,c,d,e...) then apply(a,b,c,d,e,...) template struct jni_remove_extra_parameters : public remove_extra_parameters_helper {}; class JniCompilerTest : public CommonCompilerTest { protected: void SetUp() OVERRIDE { CommonCompilerTest::SetUp(); check_generic_jni_ = false; } void TearDown() OVERRIDE { android::ResetNativeLoader(); CommonCompilerTest::TearDown(); } void SetCheckGenericJni(bool generic) { check_generic_jni_ = generic; } private: void CompileForTest(jobject class_loader, bool direct, const char* method_name, const char* method_sig) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); Handle loader( hs.NewHandle(soa.Decode(class_loader))); // Compile the native method before starting the runtime mirror::Class* c = class_linker_->FindClass(soa.Self(), "LMyClassNatives;", loader); const auto pointer_size = class_linker_->GetImagePointerSize(); ArtMethod* method = c->FindClassMethod(method_name, method_sig, pointer_size); ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig; ASSERT_EQ(direct, method->IsDirect()) << method_name << " " << method_sig; if (check_generic_jni_) { method->SetEntryPointFromQuickCompiledCode(class_linker_->GetRuntimeQuickGenericJniStub()); } else { const void* code = method->GetEntryPointFromQuickCompiledCode(); if (code == nullptr || class_linker_->IsQuickGenericJniStub(code)) { CompileMethod(method); ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr) << method_name << " " << method_sig; } } } protected: void CompileForTestWithCurrentJni(jobject class_loader, bool direct, const char* method_name_orig, const char* method_sig) { // Append the JNI kind to the method name, so that we automatically get the // fast or critical versions of the same method. std::string method_name_str = std::string(method_name_orig) + CurrentJniStringSuffix(); const char* method_name = method_name_str.c_str(); CompileForTest(class_loader, direct, method_name, method_sig); } void SetUpForTest(bool direct, const char* method_name_orig, const char* method_sig, void* native_fnptr) { // Append the JNI kind to the method name, so that we automatically get the // fast or critical versions of the same method. std::string method_name_str = std::string(method_name_orig) + CurrentJniStringSuffix(); const char* method_name = method_name_str.c_str(); // Initialize class loader and compile method when runtime not started. if (!runtime_->IsStarted()) { { ScopedObjectAccess soa(Thread::Current()); class_loader_ = LoadDex("MyClassNatives"); } CompileForTest(class_loader_, direct, method_name, method_sig); // Start runtime. Thread::Current()->TransitionFromSuspendedToRunnable(); android::InitializeNativeLoader(); bool started = runtime_->Start(); CHECK(started); } // JNI operations after runtime start. env_ = Thread::Current()->GetJniEnv(); library_search_path_ = env_->NewStringUTF(""); jklass_ = env_->FindClass("MyClassNatives"); ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig; if (direct) { jmethod_ = env_->GetStaticMethodID(jklass_, method_name, method_sig); } else { jmethod_ = env_->GetMethodID(jklass_, method_name, method_sig); } ASSERT_TRUE(jmethod_ != nullptr) << method_name << " " << method_sig; if (native_fnptr != nullptr) { JNINativeMethod methods[] = { { method_name, method_sig, native_fnptr } }; ASSERT_EQ(JNI_OK, env_->RegisterNatives(jklass_, methods, 1)) << method_name << " " << method_sig; } else { env_->UnregisterNatives(jklass_); } jmethodID constructor = env_->GetMethodID(jklass_, "", "()V"); jobj_ = env_->NewObject(jklass_, constructor); ASSERT_TRUE(jobj_ != nullptr) << method_name << " " << method_sig; } public: // Available as statics so our JNI handlers can access these. static jclass jklass_; static jobject jobj_; static jobject class_loader_; protected: // We have to list the methods here so we can share them between default and generic JNI. void CompileAndRunNoArgMethodImpl(); void CompileAndRunIntMethodThroughStubImpl(); void CompileAndRunStaticIntMethodThroughStubImpl(); void CompileAndRunIntMethodImpl(); void CompileAndRunIntIntMethodImpl(); void CompileAndRunLongLongMethodImpl(); void CompileAndRunDoubleDoubleMethodImpl(); void CompileAndRun_fooJJ_synchronizedImpl(); void CompileAndRunIntObjectObjectMethodImpl(); void CompileAndRunStaticIntIntMethodImpl(); void CompileAndRunStaticDoubleDoubleMethodImpl(); void RunStaticLogDoubleMethodImpl(); void RunStaticLogFloatMethodImpl(); void RunStaticReturnTrueImpl(); void RunStaticReturnFalseImpl(); void RunGenericStaticReturnIntImpl(); void RunGenericStaticReturnDoubleImpl(); void RunGenericStaticReturnLongImpl(); void CompileAndRunStaticIntObjectObjectMethodImpl(); void CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl(); void ExceptionHandlingImpl(); void NativeStackTraceElementImpl(); void ReturnGlobalRefImpl(); void LocalReferenceTableClearingTestImpl(); void JavaLangSystemArrayCopyImpl(); void CompareAndSwapIntImpl(); void GetTextImpl(); void GetSinkPropertiesNativeImpl(); void UpcallReturnTypeChecking_InstanceImpl(); void UpcallReturnTypeChecking_StaticImpl(); void UpcallArgumentTypeChecking_InstanceImpl(); void UpcallArgumentTypeChecking_StaticImpl(); void CompileAndRunFloatFloatMethodImpl(); void CheckParameterAlignImpl(); void MaxParamNumberImpl(); void WithoutImplementationImpl(); void WithoutImplementationRefReturnImpl(); void StackArgsIntsFirstImpl(); void StackArgsFloatsFirstImpl(); void StackArgsMixedImpl(); #if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) void StackArgsSignExtendedMips64Impl(); #endif void NormalNativeImpl(); void FastNativeImpl(); void CriticalNativeImpl(); JNIEnv* env_; jstring library_search_path_; jmethodID jmethod_; private: bool check_generic_jni_; }; jclass JniCompilerTest::jklass_; jobject JniCompilerTest::jobj_; jobject JniCompilerTest::class_loader_; // Test the normal compiler and normal generic JNI only. // The following features are unsupported in @FastNative: // 1) JNI stubs (lookup via dlsym) when methods aren't explicitly registered // 2) synchronized keyword // -- TODO: We can support (1) if we remove the mutator lock assert during stub lookup. # define JNI_TEST_NORMAL_ONLY(TestName) \ TEST_F(JniCompilerTest, TestName ## NormalCompiler) { \ ScopedCheckHandleScope top_handle_scope_check; \ SCOPED_TRACE("Normal JNI with compiler"); \ gCurrentJni = static_cast(JniKind::kNormal); \ TestName ## Impl(); \ } \ TEST_F(JniCompilerTest, TestName ## NormalGeneric) { \ ScopedCheckHandleScope top_handle_scope_check; \ SCOPED_TRACE("Normal JNI with generic"); \ gCurrentJni = static_cast(JniKind::kNormal); \ SetCheckGenericJni(true); \ TestName ## Impl(); \ } // Test (normal, @FastNative) x (compiler, generic). #define JNI_TEST(TestName) \ JNI_TEST_NORMAL_ONLY(TestName) \ TEST_F(JniCompilerTest, TestName ## FastCompiler) { \ ScopedCheckHandleScope top_handle_scope_check; \ SCOPED_TRACE("@FastNative JNI with compiler"); \ gCurrentJni = static_cast(JniKind::kFast); \ TestName ## Impl(); \ } \ \ TEST_F(JniCompilerTest, TestName ## FastGeneric) { \ ScopedCheckHandleScope top_handle_scope_check; \ SCOPED_TRACE("@FastNative JNI with generic"); \ gCurrentJni = static_cast(JniKind::kFast); \ SetCheckGenericJni(true); \ TestName ## Impl(); \ } // Test (@CriticalNative) x (compiler, generic) only. #define JNI_TEST_CRITICAL_ONLY(TestName) \ TEST_F(JniCompilerTest, TestName ## CriticalCompiler) { \ ScopedCheckHandleScope top_handle_scope_check; \ SCOPED_TRACE("@CriticalNative JNI with compiler"); \ gCurrentJni = static_cast(JniKind::kCritical); \ TestName ## Impl(); \ } \ TEST_F(JniCompilerTest, TestName ## CriticalGeneric) { \ ScopedCheckHandleScope top_handle_scope_check; \ SCOPED_TRACE("@CriticalNative JNI with generic"); \ gCurrentJni = static_cast(JniKind::kCritical); \ SetCheckGenericJni(true); \ TestName ## Impl(); \ } // Test everything: (normal, @FastNative, @CriticalNative) x (compiler, generic). #define JNI_TEST_CRITICAL(TestName) \ JNI_TEST(TestName) \ JNI_TEST_CRITICAL_ONLY(TestName) \ static void expectValidThreadState() { // Normal JNI always transitions to "Native". Other JNIs stay in the "Runnable" state. if (IsCurrentJniNormal()) { EXPECT_EQ(kNative, Thread::Current()->GetState()); } else { EXPECT_EQ(kRunnable, Thread::Current()->GetState()); } } #define EXPECT_THREAD_STATE_FOR_CURRENT_JNI() expectValidThreadState() static void expectValidMutatorLockHeld() { if (IsCurrentJniNormal()) { Locks::mutator_lock_->AssertNotHeld(Thread::Current()); } else { Locks::mutator_lock_->AssertSharedHeld(Thread::Current()); } } #define EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI() expectValidMutatorLockHeld() static void expectValidJniEnvAndObject(JNIEnv* env, jobject thisObj) { if (!IsCurrentJniCritical()) { EXPECT_EQ(Thread::Current()->GetJniEnv(), env); ASSERT_TRUE(thisObj != nullptr); EXPECT_TRUE(env->IsInstanceOf(thisObj, JniCompilerTest::jklass_)); } else { LOG(FATAL) << "Objects are not supported for @CriticalNative, why is this being tested?"; UNREACHABLE(); } } // Validates the JNIEnv to be the same as the current thread's JNIEnv, and makes sure // that the object here is an instance of the class we registered the method with. // // Hard-fails if this somehow gets invoked for @CriticalNative since objects are unsupported. #define EXPECT_JNI_ENV_AND_OBJECT_FOR_CURRENT_JNI(env, thisObj) \ expectValidJniEnvAndObject(env, thisObj) static void expectValidJniEnvAndClass(JNIEnv* env, jclass kls) { if (!IsCurrentJniCritical()) { EXPECT_EQ(Thread::Current()->GetJniEnv(), env); ASSERT_TRUE(kls != nullptr); EXPECT_TRUE(env->IsSameObject(static_cast(JniCompilerTest::jklass_), static_cast(kls))); } else { // This is pretty much vacuously true but catch any testing setup mistakes. EXPECT_EQ(env, kCriticalDummyJniEnv); EXPECT_EQ(kls, kCriticalDummyJniClass); } } // Validates the JNIEnv is the same as the current thread's JNIenv, and makes sure // that the jclass we got in the JNI handler is the same one as the class the method was looked // up for. // // (Checks are skipped for @CriticalNative since the two values are dummy). #define EXPECT_JNI_ENV_AND_CLASS_FOR_CURRENT_JNI(env, kls) expectValidJniEnvAndClass(env, kls) // Temporarily disable the EXPECT_NUM_STACK_REFERENCES check (for a single test). struct ScopedDisableCheckNumStackReferences { ScopedDisableCheckNumStackReferences() { CHECK(sCheckNumStackReferences); // No nested support. sCheckNumStackReferences = false; } ~ScopedDisableCheckNumStackReferences() { sCheckNumStackReferences = true; } static bool sCheckNumStackReferences; }; bool ScopedDisableCheckNumStackReferences::sCheckNumStackReferences = true; // Check that the handle scope at the start of this block is the same as the handle scope at the end of the block. struct ScopedCheckHandleScope { ScopedCheckHandleScope() : handle_scope_(Thread::Current()->GetTopHandleScope()) { } ~ScopedCheckHandleScope() { EXPECT_EQ(handle_scope_, Thread::Current()->GetTopHandleScope()) << "Top-most handle scope must be the same after all the JNI " << "invocations have finished (as before they were invoked)."; } BaseHandleScope* const handle_scope_; }; // Number of references allocated in JNI ShadowFrames on the given thread. static size_t NumJniShadowFrameReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { return self->GetManagedStack()->NumJniShadowFrameReferences(); } // Number of references in handle scope on the given thread. static size_t NumHandleReferences(Thread* self) { size_t count = 0; for (BaseHandleScope* cur = self->GetTopHandleScope(); cur != nullptr; cur = cur->GetLink()) { count += cur->NumberOfReferences(); } return count; } // Number of references allocated in handle scopes & JNI shadow frames on this thread. static size_t NumStackReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { return NumHandleReferences(self) + NumJniShadowFrameReferences(self); } static void expectNumStackReferences(size_t val1, size_t val2) { // In rare cases when JNI functions call themselves recursively, // disable this test because it will have a false negative. if (!IsCurrentJniCritical() && ScopedDisableCheckNumStackReferences::sCheckNumStackReferences) { /* @CriticalNative doesn't build a HandleScope, so this test is meaningless then. */ ScopedObjectAccess soa(Thread::Current()); size_t actual_num = NumStackReferences(Thread::Current()); // XX: Not too sure what's going on. // Sometimes null references get placed and sometimes they don't? EXPECT_TRUE(val1 == actual_num || val2 == actual_num) << "expected either " << val1 << " or " << val2 << " number of stack references, but got: " << actual_num; } } #define EXPECT_NUM_STACK_REFERENCES(val1, val2) expectNumStackReferences(val1, val2) template struct make_jni_test_decorator; // Decorator for "static" JNI callbacks. template struct make_jni_test_decorator { static R apply(JNIEnv* env, jclass kls, Args ... args) { EXPECT_THREAD_STATE_FOR_CURRENT_JNI(); EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI(); EXPECT_JNI_ENV_AND_CLASS_FOR_CURRENT_JNI(env, kls); // All incoming parameters + the jclass get put into the transition's StackHandleScope. EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(kls, args...), (count_refs_helper::value)); return fn(env, kls, args...); } }; // Decorator for instance JNI callbacks. template struct make_jni_test_decorator { static R apply(JNIEnv* env, jobject thisObj, Args ... args) { EXPECT_THREAD_STATE_FOR_CURRENT_JNI(); EXPECT_MUTATOR_LOCK_FOR_CURRENT_JNI(); EXPECT_JNI_ENV_AND_OBJECT_FOR_CURRENT_JNI(env, thisObj); // All incoming parameters + the implicit 'this' get put into the transition's StackHandleScope. EXPECT_NUM_STACK_REFERENCES(count_nonnull_refs(thisObj, args...), (count_refs_helper::value)); return fn(env, thisObj, args...); } }; // Decorate the regular JNI callee with the extra gtest checks. // This way we can have common test logic for everything generic like checking if a lock is held, // checking handle scope state, etc. #define MAKE_JNI_TEST_DECORATOR(fn) make_jni_test_decorator::apply // Convert function f(JNIEnv*,jclass,a,b,c,d...) into f2(a,b,c,d...) // -- This way we don't have to write out each implementation twice for @CriticalNative. #define JNI_CRITICAL_WRAPPER(func) jni_remove_extra_parameters::apply // Get a function pointer whose calling convention either matches a regular native // or a critical native depending on which kind of jni is currently under test. // -- This also has the benefit of genering a compile time error if the 'func' doesn't properly // have JNIEnv and jclass parameters first. #define CURRENT_JNI_WRAPPER(func) \ (IsCurrentJniCritical() \ ? reinterpret_cast(&JNI_CRITICAL_WRAPPER(MAKE_JNI_TEST_DECORATOR(func))) \ : reinterpret_cast(&MAKE_JNI_TEST_DECORATOR(func))) // Do the opposite of the above. Do *not* wrap the function, instead just cast it to a void*. // Only for "TEST_JNI_NORMAL_ONLY" configs, and it inserts a test assert to ensure this is the case. #define NORMAL_JNI_ONLY_NOWRAP(func) \ ({ ASSERT_TRUE(IsCurrentJniNormal()); reinterpret_cast(&(func)); }) // Same as above, but with nullptr. When we want to test the stub functionality. #define NORMAL_JNI_ONLY_NULLPTR \ ({ ASSERT_TRUE(IsCurrentJniNormal()); nullptr; }) int gJava_MyClassNatives_foo_calls[kJniKindCount] = {}; void Java_MyClassNatives_foo(JNIEnv*, jobject) { gJava_MyClassNatives_foo_calls[gCurrentJni]++; } void JniCompilerTest::CompileAndRunNoArgMethodImpl() { SetUpForTest(false, "foo", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_foo)); EXPECT_EQ(0, gJava_MyClassNatives_foo_calls[gCurrentJni]); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(1, gJava_MyClassNatives_foo_calls[gCurrentJni]); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(2, gJava_MyClassNatives_foo_calls[gCurrentJni]); gJava_MyClassNatives_foo_calls[gCurrentJni] = 0; } JNI_TEST(CompileAndRunNoArgMethod) void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() { SetUpForTest(false, "bar", "(I)I", NORMAL_JNI_ONLY_NULLPTR); // calling through stub will link with &Java_MyClassNatives_bar std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) << reason; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); EXPECT_EQ(25, result); } // TODO: Support @FastNative and @CriticalNative through stubs. JNI_TEST_NORMAL_ONLY(CompileAndRunIntMethodThroughStub) void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() { SetUpForTest(true, "sbar", "(I)I", NORMAL_JNI_ONLY_NULLPTR); // calling through stub will link with &Java_MyClassNatives_sbar std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) << reason; jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); EXPECT_EQ(43, result); } // TODO: Support @FastNative and @CriticalNative through stubs. JNI_TEST_NORMAL_ONLY(CompileAndRunStaticIntMethodThroughStub) int gJava_MyClassNatives_fooI_calls[kJniKindCount] = {}; jint Java_MyClassNatives_fooI(JNIEnv*, jobject, jint x) { gJava_MyClassNatives_fooI_calls[gCurrentJni]++; return x; } void JniCompilerTest::CompileAndRunIntMethodImpl() { SetUpForTest(false, "fooI", "(I)I", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooI)); EXPECT_EQ(0, gJava_MyClassNatives_fooI_calls[gCurrentJni]); jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 42); EXPECT_EQ(42, result); EXPECT_EQ(1, gJava_MyClassNatives_fooI_calls[gCurrentJni]); result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFED00D); EXPECT_EQ(static_cast(0xCAFED00D), result); EXPECT_EQ(2, gJava_MyClassNatives_fooI_calls[gCurrentJni]); gJava_MyClassNatives_fooI_calls[gCurrentJni] = 0; } JNI_TEST(CompileAndRunIntMethod) int gJava_MyClassNatives_fooII_calls[kJniKindCount] = {}; jint Java_MyClassNatives_fooII(JNIEnv*, jobject, jint x, jint y) { gJava_MyClassNatives_fooII_calls[gCurrentJni]++; return x - y; // non-commutative operator } void JniCompilerTest::CompileAndRunIntIntMethodImpl() { SetUpForTest(false, "fooII", "(II)I", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooII)); EXPECT_EQ(0, gJava_MyClassNatives_fooII_calls[gCurrentJni]); jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 99, 10); EXPECT_EQ(99 - 10, result); EXPECT_EQ(1, gJava_MyClassNatives_fooII_calls[gCurrentJni]); result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFEBABE, 0xCAFED00D); EXPECT_EQ(static_cast(0xCAFEBABE - 0xCAFED00D), result); EXPECT_EQ(2, gJava_MyClassNatives_fooII_calls[gCurrentJni]); gJava_MyClassNatives_fooII_calls[gCurrentJni] = 0; } JNI_TEST(CompileAndRunIntIntMethod) int gJava_MyClassNatives_fooJJ_calls[kJniKindCount] = {}; jlong Java_MyClassNatives_fooJJ(JNIEnv*, jobject, jlong x, jlong y) { gJava_MyClassNatives_fooJJ_calls[gCurrentJni]++; return x - y; // non-commutative operator } void JniCompilerTest::CompileAndRunLongLongMethodImpl() { SetUpForTest(false, "fooJJ", "(JJ)J", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooJJ)); EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_calls[gCurrentJni]); jlong a = INT64_C(0x1234567890ABCDEF); jlong b = INT64_C(0xFEDCBA0987654321); jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b); EXPECT_EQ(a - b, result); EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_calls[gCurrentJni]); result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, b, a); EXPECT_EQ(b - a, result); EXPECT_EQ(2, gJava_MyClassNatives_fooJJ_calls[gCurrentJni]); gJava_MyClassNatives_fooJJ_calls[gCurrentJni] = 0; } JNI_TEST(CompileAndRunLongLongMethod) int gJava_MyClassNatives_fooDD_calls[kJniKindCount] = {}; jdouble Java_MyClassNatives_fooDD(JNIEnv*, jobject, jdouble x, jdouble y) { gJava_MyClassNatives_fooDD_calls[gCurrentJni]++; return x - y; // non-commutative operator } void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() { SetUpForTest(false, "fooDD", "(DD)D", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooDD)); EXPECT_EQ(0, gJava_MyClassNatives_fooDD_calls[gCurrentJni]); jdouble result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, 99.0, 10.0); EXPECT_DOUBLE_EQ(99.0 - 10.0, result); EXPECT_EQ(1, gJava_MyClassNatives_fooDD_calls[gCurrentJni]); jdouble a = 3.14159265358979323846; jdouble b = 0.69314718055994530942; result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b); EXPECT_DOUBLE_EQ(a - b, result); EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls[gCurrentJni]); gJava_MyClassNatives_fooDD_calls[gCurrentJni] = 0; } int gJava_MyClassNatives_fooJJ_synchronized_calls[kJniKindCount] = {}; jlong Java_MyClassNatives_fooJJ_synchronized(JNIEnv*, jobject, jlong x, jlong y) { gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]++; return x | y; } void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() { SetUpForTest(false, "fooJJ_synchronized", "(JJ)J", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooJJ_synchronized)); EXPECT_EQ(0, gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]); jlong a = 0x1000000020000000ULL; jlong b = 0x00ff000000aa0000ULL; jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b); EXPECT_EQ(a | b, result); EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni]); gJava_MyClassNatives_fooJJ_synchronized_calls[gCurrentJni] = 0; } JNI_TEST_NORMAL_ONLY(CompileAndRun_fooJJ_synchronized) int gJava_MyClassNatives_fooIOO_calls[kJniKindCount] = {}; jobject Java_MyClassNatives_fooIOO(JNIEnv*, jobject thisObj, jint x, jobject y, jobject z) { gJava_MyClassNatives_fooIOO_calls[gCurrentJni]++; switch (x) { case 1: return y; case 2: return z; default: return thisObj; } } void JniCompilerTest::CompileAndRunIntObjectObjectMethodImpl() { SetUpForTest(false, "fooIOO", "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooIOO)); EXPECT_EQ(0, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, nullptr); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(1, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, nullptr, jklass_); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(2, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, nullptr, jklass_); EXPECT_TRUE(env_->IsSameObject(nullptr, result)); EXPECT_EQ(3, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, nullptr, jklass_); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(4, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 0, jklass_, nullptr); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(5, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 1, jklass_, nullptr); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(6, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr); EXPECT_TRUE(env_->IsSameObject(nullptr, result)); EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls[gCurrentJni]); gJava_MyClassNatives_fooIOO_calls[gCurrentJni] = 0; } JNI_TEST(CompileAndRunIntObjectObjectMethod) int gJava_MyClassNatives_fooSII_calls[kJniKindCount] = {}; jint Java_MyClassNatives_fooSII(JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jint x, jint y) { gJava_MyClassNatives_fooSII_calls[gCurrentJni]++; return x + y; } void JniCompilerTest::CompileAndRunStaticIntIntMethodImpl() { SetUpForTest(true, "fooSII", "(II)I", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSII)); EXPECT_EQ(0, gJava_MyClassNatives_fooSII_calls[gCurrentJni]); jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 20, 30); EXPECT_EQ(50, result); EXPECT_EQ(1, gJava_MyClassNatives_fooSII_calls[gCurrentJni]); gJava_MyClassNatives_fooSII_calls[gCurrentJni] = 0; } JNI_TEST_CRITICAL(CompileAndRunStaticIntIntMethod) int gJava_MyClassNatives_fooSDD_calls[kJniKindCount] = {}; jdouble Java_MyClassNatives_fooSDD(JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED, jdouble x, jdouble y) { gJava_MyClassNatives_fooSDD_calls[gCurrentJni]++; return x - y; // non-commutative operator } void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() { SetUpForTest(true, "fooSDD", "(DD)D", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSDD)); EXPECT_EQ(0, gJava_MyClassNatives_fooSDD_calls[gCurrentJni]); jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 99.0, 10.0); EXPECT_DOUBLE_EQ(99.0 - 10.0, result); EXPECT_EQ(1, gJava_MyClassNatives_fooSDD_calls[gCurrentJni]); jdouble a = 3.14159265358979323846; jdouble b = 0.69314718055994530942; result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b); EXPECT_DOUBLE_EQ(a - b, result); EXPECT_DOUBLE_EQ(2, gJava_MyClassNatives_fooSDD_calls[gCurrentJni]); gJava_MyClassNatives_fooSDD_calls[gCurrentJni] = 0; } JNI_TEST_CRITICAL(CompileAndRunStaticDoubleDoubleMethod) // The x86 generic JNI code had a bug where it assumed a floating // point return value would be in xmm0. We use log, to somehow ensure // the compiler will use the floating point stack. jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) { return log(x); } jdouble Java_MyClassNatives_logD_notNormal(JNIEnv*, jclass, jdouble x) { EXPECT_DOUBLE_EQ(2.0, x); return log(x); } void JniCompilerTest::RunStaticLogDoubleMethodImpl() { void* jni_handler; if (IsCurrentJniNormal()) { // This test seems a bit special, don't use a JNI wrapper here. jni_handler = NORMAL_JNI_ONLY_NOWRAP(Java_MyClassNatives_logD); } else { jni_handler = CURRENT_JNI_WRAPPER(Java_MyClassNatives_logD_notNormal); } SetUpForTest(true, "logD", "(D)D", jni_handler); jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0); EXPECT_DOUBLE_EQ(log(2.0), result); } JNI_TEST_CRITICAL(RunStaticLogDoubleMethod) jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) { return logf(x); } void JniCompilerTest::RunStaticLogFloatMethodImpl() { void* jni_handler; if (IsCurrentJniNormal()) { // This test seems a bit special, don't use a JNI wrapper here. jni_handler = NORMAL_JNI_ONLY_NOWRAP(Java_MyClassNatives_logF); } else { jni_handler = CURRENT_JNI_WRAPPER(Java_MyClassNatives_logF); } SetUpForTest(true, "logF", "(F)F", jni_handler); jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0); EXPECT_FLOAT_EQ(logf(2.0), result); } JNI_TEST_CRITICAL(RunStaticLogFloatMethod) jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) { return JNI_TRUE; } jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) { return JNI_FALSE; } jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) { return 42; } void JniCompilerTest::RunStaticReturnTrueImpl() { SetUpForTest(true, "returnTrue", "()Z", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnTrue)); jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_); EXPECT_TRUE(result); } JNI_TEST_CRITICAL(RunStaticReturnTrue) void JniCompilerTest::RunStaticReturnFalseImpl() { SetUpForTest(true, "returnFalse", "()Z", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnFalse)); jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_); EXPECT_FALSE(result); } JNI_TEST_CRITICAL(RunStaticReturnFalse) void JniCompilerTest::RunGenericStaticReturnIntImpl() { SetUpForTest(true, "returnInt", "()I", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnInt)); jint result = env_->CallStaticIntMethod(jklass_, jmethod_); EXPECT_EQ(42, result); } JNI_TEST_CRITICAL(RunGenericStaticReturnInt) int gJava_MyClassNatives_returnDouble_calls[kJniKindCount] = {}; jdouble Java_MyClassNatives_returnDouble(JNIEnv*, jclass) { gJava_MyClassNatives_returnDouble_calls[gCurrentJni]++; return 4.0; } void JniCompilerTest::RunGenericStaticReturnDoubleImpl() { SetUpForTest(true, "returnDouble", "()D", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnDouble)); jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_); EXPECT_DOUBLE_EQ(4.0, result); EXPECT_EQ(1, gJava_MyClassNatives_returnDouble_calls[gCurrentJni]); gJava_MyClassNatives_returnDouble_calls[gCurrentJni] = 0; } JNI_TEST_CRITICAL(RunGenericStaticReturnDouble) jlong Java_MyClassNatives_returnLong(JNIEnv*, jclass) { return 0xFEEDDEADFEEDL; } void JniCompilerTest::RunGenericStaticReturnLongImpl() { SetUpForTest(true, "returnLong", "()J", CURRENT_JNI_WRAPPER(Java_MyClassNatives_returnLong)); jlong result = env_->CallStaticLongMethod(jklass_, jmethod_); EXPECT_EQ(0xFEEDDEADFEEDL, result); } JNI_TEST_CRITICAL(RunGenericStaticReturnLong) int gJava_MyClassNatives_fooSIOO_calls[kJniKindCount] = {}; jobject Java_MyClassNatives_fooSIOO(JNIEnv*, jclass klass, jint x, jobject y, jobject z) { gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]++; switch (x) { case 1: return y; case 2: return z; default: return klass; } } void JniCompilerTest::CompileAndRunStaticIntObjectObjectMethodImpl() { SetUpForTest(true, "fooSIOO", "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSIOO)); EXPECT_EQ(0, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(1, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(2, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_); EXPECT_TRUE(env_->IsSameObject(nullptr, result)); EXPECT_EQ(3, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(4, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(5, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(6, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr); EXPECT_TRUE(env_->IsSameObject(nullptr, result)); EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls[gCurrentJni]); gJava_MyClassNatives_fooSIOO_calls[gCurrentJni] = 0; } JNI_TEST(CompileAndRunStaticIntObjectObjectMethod) int gJava_MyClassNatives_fooSSIOO_calls[kJniKindCount] = {}; jobject Java_MyClassNatives_fooSSIOO(JNIEnv*, jclass klass, jint x, jobject y, jobject z) { gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]++; switch (x) { case 1: return y; case 2: return z; default: return klass; } } void JniCompilerTest::CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl() { SetUpForTest(true, "fooSSIOO", "(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooSSIOO)); EXPECT_EQ(0, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); jobject result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, nullptr); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(1, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, nullptr, jobj_); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(2, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, nullptr, jobj_); EXPECT_TRUE(env_->IsSameObject(nullptr, result)); EXPECT_EQ(3, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, nullptr, jobj_); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(4, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 0, jobj_, nullptr); EXPECT_TRUE(env_->IsSameObject(jklass_, result)); EXPECT_EQ(5, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 1, jobj_, nullptr); EXPECT_TRUE(env_->IsSameObject(jobj_, result)); EXPECT_EQ(6, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr); EXPECT_TRUE(env_->IsSameObject(nullptr, result)); EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni]); gJava_MyClassNatives_fooSSIOO_calls[gCurrentJni] = 0; } // TODO: Maybe. @FastNative support for returning Objects? JNI_TEST_NORMAL_ONLY(CompileAndRunStaticSynchronizedIntObjectObjectMethod) void Java_MyClassNatives_throwException(JNIEnv* env, jobject) { jclass c = env->FindClass("java/lang/RuntimeException"); env->ThrowNew(c, "hello"); } void JniCompilerTest::ExceptionHandlingImpl() { { ASSERT_FALSE(runtime_->IsStarted()); ScopedObjectAccess soa(Thread::Current()); class_loader_ = LoadDex("MyClassNatives"); // all compilation needs to happen before Runtime::Start CompileForTestWithCurrentJni(class_loader_, false, "foo", "()V"); CompileForTestWithCurrentJni(class_loader_, false, "throwException", "()V"); CompileForTestWithCurrentJni(class_loader_, false, "foo", "()V"); } // Start runtime to avoid re-initialization in SetupForTest. Thread::Current()->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); CHECK(started); gJava_MyClassNatives_foo_calls[gCurrentJni] = 0; // Check a single call of a JNI method is ok SetUpForTest(false, "foo", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_foo)); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(1, gJava_MyClassNatives_foo_calls[gCurrentJni]); EXPECT_FALSE(Thread::Current()->IsExceptionPending()); // Get class for exception we expect to be thrown ScopedLocalRef jlre(env_, env_->FindClass("java/lang/RuntimeException")); SetUpForTest(false, "throwException", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_throwException)); // Call Java_MyClassNatives_throwException (JNI method that throws exception) env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(1, gJava_MyClassNatives_foo_calls[gCurrentJni]); EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); ScopedLocalRef exception(env_, env_->ExceptionOccurred()); env_->ExceptionClear(); EXPECT_TRUE(env_->IsInstanceOf(exception.get(), jlre.get())); // Check a single call of a JNI method is ok SetUpForTest(false, "foo", "()V", reinterpret_cast(&Java_MyClassNatives_foo)); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_); EXPECT_EQ(2, gJava_MyClassNatives_foo_calls[gCurrentJni]); gJava_MyClassNatives_foo_calls[gCurrentJni] = 0; } JNI_TEST(ExceptionHandling) jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { if (i <= 0) { // We want to check raw Object* / Array* below ScopedObjectAccess soa(env); // Build stack trace jobject internal = Thread::Current()->CreateInternalStackTrace(soa); jobjectArray ste_array = Thread::InternalStackTraceToStackTraceElementArray(soa, internal); ObjPtr> trace_array = soa.Decode>(ste_array); EXPECT_TRUE(trace_array != nullptr); EXPECT_EQ(11, trace_array->GetLength()); // Check stack trace entries have expected values for (int32_t j = 0; j < trace_array->GetLength(); ++j) { EXPECT_EQ(-2, trace_array->Get(j)->GetLineNumber()); mirror::StackTraceElement* ste = trace_array->Get(j); EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str()); EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str()); EXPECT_EQ(("fooI" + CurrentJniStringSuffix()), ste->GetMethodName()->ToModifiedUtf8()); } // end recursion return 0; } else { jclass jklass = env->FindClass("MyClassNatives"); EXPECT_TRUE(jklass != nullptr); jmethodID jmethod = env->GetMethodID(jklass, ("fooI" + CurrentJniStringSuffix()).c_str(), "(I)I"); EXPECT_TRUE(jmethod != nullptr); // Recurse with i - 1 jint result = env->CallNonvirtualIntMethod(thisObj, jklass, jmethod, i - 1); // Return sum of all depths return i + result; } } void JniCompilerTest::NativeStackTraceElementImpl() { SetUpForTest(false, "fooI", "(I)I", CURRENT_JNI_WRAPPER(Java_MyClassNatives_nativeUpCall)); // Usual # local references on stack check fails because nativeUpCall calls itself recursively, // each time the # of local references will therefore go up. ScopedDisableCheckNumStackReferences disable_num_stack_check; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 10); EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result); } JNI_TEST(NativeStackTraceElement) jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) { return env->NewGlobalRef(x); } void JniCompilerTest::ReturnGlobalRefImpl() { SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fooO)); jobject result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, jobj_); EXPECT_EQ(JNILocalRefType, env_->GetObjectRefType(result)); EXPECT_TRUE(env_->IsSameObject(result, jobj_)); } JNI_TEST(ReturnGlobalRef) jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) { // Add 10 local references ScopedObjectAccess soa(env); for (int i = 0; i < 10; i++) { soa.AddLocalReference(soa.Decode(thisObj)); } return x+1; } void JniCompilerTest::LocalReferenceTableClearingTestImpl() { SetUpForTest(false, "fooI", "(I)I", CURRENT_JNI_WRAPPER(local_ref_test)); // 1000 invocations of a method that adds 10 local references for (int i = 0; i < 1000; i++) { jint result = env_->CallIntMethod(jobj_, jmethod_, i); EXPECT_TRUE(result == i + 1); } } JNI_TEST(LocalReferenceTableClearingTest) void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject dst, jint dst_pos, jint length) { EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, klass)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, dst)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, src)); EXPECT_EQ(1234, src_pos); EXPECT_EQ(5678, dst_pos); EXPECT_EQ(9876, length); } void JniCompilerTest::JavaLangSystemArrayCopyImpl() { SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V", CURRENT_JNI_WRAPPER(my_arraycopy)); env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876); } JNI_TEST(JavaLangSystemArrayCopy) jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint expected, jint newval) { EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, unsafe)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj)); EXPECT_EQ(INT64_C(0x12345678ABCDEF88), offset); EXPECT_EQ(static_cast(0xCAFEF00D), expected); EXPECT_EQ(static_cast(0xEBADF00D), newval); return JNI_TRUE; } void JniCompilerTest::CompareAndSwapIntImpl() { SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z", CURRENT_JNI_WRAPPER(my_casi)); jboolean result = env_->CallBooleanMethod(jobj_, jmethod_, jobj_, INT64_C(0x12345678ABCDEF88), 0xCAFEF00D, 0xEBADF00D); EXPECT_EQ(result, JNI_TRUE); } JNI_TEST(CompareAndSwapInt) jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, jobject obj2) { EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1)); EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2)); EXPECT_EQ(0x12345678ABCDEF88ll, val1); EXPECT_EQ(0x7FEDCBA987654321ll, val2); return 42; } void JniCompilerTest::GetTextImpl() { SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I", CURRENT_JNI_WRAPPER(my_gettext)); jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_, INT64_C(0x7FEDCBA987654321), jobj_); EXPECT_EQ(result, 42); } JNI_TEST(GetText) int gJava_MyClassNatives_GetSinkProperties_calls[kJniKindCount] = {}; jarray Java_MyClassNatives_GetSinkProperties(JNIEnv*, jobject thisObj, jstring s) { EXPECT_EQ(s, nullptr); gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]++; Thread* self = Thread::Current(); ScopedObjectAccess soa(self); EXPECT_TRUE(self->HoldsLock(soa.Decode(thisObj).Ptr())); return nullptr; } void JniCompilerTest::GetSinkPropertiesNativeImpl() { SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_GetSinkProperties)); EXPECT_EQ(0, gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]); jarray result = down_cast( env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, nullptr)); EXPECT_EQ(nullptr, result); EXPECT_EQ(1, gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni]); gJava_MyClassNatives_GetSinkProperties_calls[gCurrentJni] = 0; } // @FastNative doesn't support 'synchronized' keyword and // never will -- locking functions aren't fast. JNI_TEST_NORMAL_ONLY(GetSinkPropertiesNative) // This should return jclass, but we're imitating a bug pattern. jobject Java_MyClassNatives_instanceMethodThatShouldReturnClass(JNIEnv* env, jobject) { return env->NewStringUTF("not a class!"); } // This should return jclass, but we're imitating a bug pattern. jobject Java_MyClassNatives_staticMethodThatShouldReturnClass(JNIEnv* env, jclass) { return env->NewStringUTF("not a class!"); } void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() { SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_instanceMethodThatShouldReturnClass)); CheckJniAbortCatcher check_jni_abort_catcher; // This native method is bad, and tries to return a jstring as a jclass. env_->CallObjectMethod(jobj_, jmethod_); check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + "of java.lang.String from java.lang.Class " + "MyClassNatives.instanceMethodThatShouldReturnClass" + CurrentJniStringSuffix() + "()"); // Here, we just call the method incorrectly; we should catch that too. env_->CallObjectMethod(jobj_, jmethod_); check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + "of java.lang.String from java.lang.Class " + "MyClassNatives.instanceMethodThatShouldReturnClass" + CurrentJniStringSuffix() + "()"); env_->CallStaticObjectMethod(jklass_, jmethod_); check_jni_abort_catcher.Check(std::string() + "calling non-static method " + "java.lang.Class " + "MyClassNatives.instanceMethodThatShouldReturnClass" + CurrentJniStringSuffix() + "() with CallStaticObjectMethodV"); } JNI_TEST(UpcallReturnTypeChecking_Instance) void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() { SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;", CURRENT_JNI_WRAPPER(Java_MyClassNatives_staticMethodThatShouldReturnClass)); CheckJniAbortCatcher check_jni_abort_catcher; // This native method is bad, and tries to return a jstring as a jclass. env_->CallStaticObjectMethod(jklass_, jmethod_); check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + "of java.lang.String from java.lang.Class " + "MyClassNatives.staticMethodThatShouldReturnClass" + CurrentJniStringSuffix() + "()"); // Here, we just call the method incorrectly; we should catch that too. env_->CallStaticObjectMethod(jklass_, jmethod_); check_jni_abort_catcher.Check(std::string() + "attempt to return an instance " + "of java.lang.String from java.lang.Class " + "MyClassNatives.staticMethodThatShouldReturnClass" + CurrentJniStringSuffix() + "()"); env_->CallObjectMethod(jobj_, jmethod_); check_jni_abort_catcher.Check(std::string() + "calling static method " + "java.lang.Class " + "MyClassNatives.staticMethodThatShouldReturnClass" + CurrentJniStringSuffix() + "() with CallObjectMethodV"); } JNI_TEST(UpcallReturnTypeChecking_Static) // This should take jclass, but we're imitating a bug pattern. void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jclass) { } // This should take jclass, but we're imitating a bug pattern. void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass) { } void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() { // This will lead to error messages in the log. ScopedLogSeverity sls(LogSeverity::FATAL); SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_instanceMethodThatShouldTakeClass)); CheckJniAbortCatcher check_jni_abort_catcher; // We deliberately pass a bad second argument here. env_->CallVoidMethod(jobj_, jmethod_, 123, env_->NewStringUTF("not a class!")); check_jni_abort_catcher.Check(std::string() + "bad arguments passed to void " + "MyClassNatives.instanceMethodThatShouldTakeClass" + CurrentJniStringSuffix() + "(int, java.lang.Class)"); } JNI_TEST(UpcallArgumentTypeChecking_Instance) void JniCompilerTest::UpcallArgumentTypeChecking_StaticImpl() { // This will lead to error messages in the log. ScopedLogSeverity sls(LogSeverity::FATAL); SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_staticMethodThatShouldTakeClass)); CheckJniAbortCatcher check_jni_abort_catcher; // We deliberately pass a bad second argument here. env_->CallStaticVoidMethod(jklass_, jmethod_, 123, env_->NewStringUTF("not a class!")); check_jni_abort_catcher.Check(std::string() + "bad arguments passed to void " + "MyClassNatives.staticMethodThatShouldTakeClass" + CurrentJniStringSuffix() + "(int, java.lang.Class)"); } JNI_TEST(UpcallArgumentTypeChecking_Static) jfloat Java_MyClassNatives_checkFloats(JNIEnv*, jobject, jfloat f1, jfloat f2) { return f1 - f2; // non-commutative operator } void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() { SetUpForTest(false, "checkFloats", "(FF)F", CURRENT_JNI_WRAPPER(Java_MyClassNatives_checkFloats)); jfloat result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, 99.0F, 10.0F); EXPECT_FLOAT_EQ(99.0F - 10.0F, result); jfloat a = 3.14159F; jfloat b = 0.69314F; result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, a, b); EXPECT_FLOAT_EQ(a - b, result); } JNI_TEST(CompileAndRunFloatFloatMethod) void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED, jobject thisObj ATTRIBUTE_UNUSED, jint i1, jlong l1) { EXPECT_EQ(i1, 1234); EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0)); } void JniCompilerTest::CheckParameterAlignImpl() { SetUpForTest(false, "checkParameterAlign", "(IJ)V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_checkParameterAlign)); env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_, 1234, INT64_C(0x12345678ABCDEF0)); } JNI_TEST(CheckParameterAlign) void Java_MyClassNatives_maxParamNumber(JNIEnv* env, jobject, jobject o0, jobject o1, jobject o2, jobject o3, jobject o4, jobject o5, jobject o6, jobject o7, jobject o8, jobject o9, jobject o10, jobject o11, jobject o12, jobject o13, jobject o14, jobject o15, jobject o16, jobject o17, jobject o18, jobject o19, jobject o20, jobject o21, jobject o22, jobject o23, jobject o24, jobject o25, jobject o26, jobject o27, jobject o28, jobject o29, jobject o30, jobject o31, jobject o32, jobject o33, jobject o34, jobject o35, jobject o36, jobject o37, jobject o38, jobject o39, jobject o40, jobject o41, jobject o42, jobject o43, jobject o44, jobject o45, jobject o46, jobject o47, jobject o48, jobject o49, jobject o50, jobject o51, jobject o52, jobject o53, jobject o54, jobject o55, jobject o56, jobject o57, jobject o58, jobject o59, jobject o60, jobject o61, jobject o62, jobject o63, jobject o64, jobject o65, jobject o66, jobject o67, jobject o68, jobject o69, jobject o70, jobject o71, jobject o72, jobject o73, jobject o74, jobject o75, jobject o76, jobject o77, jobject o78, jobject o79, jobject o80, jobject o81, jobject o82, jobject o83, jobject o84, jobject o85, jobject o86, jobject o87, jobject o88, jobject o89, jobject o90, jobject o91, jobject o92, jobject o93, jobject o94, jobject o95, jobject o96, jobject o97, jobject o98, jobject o99, jobject o100, jobject o101, jobject o102, jobject o103, jobject o104, jobject o105, jobject o106, jobject o107, jobject o108, jobject o109, jobject o110, jobject o111, jobject o112, jobject o113, jobject o114, jobject o115, jobject o116, jobject o117, jobject o118, jobject o119, jobject o120, jobject o121, jobject o122, jobject o123, jobject o124, jobject o125, jobject o126, jobject o127, jobject o128, jobject o129, jobject o130, jobject o131, jobject o132, jobject o133, jobject o134, jobject o135, jobject o136, jobject o137, jobject o138, jobject o139, jobject o140, jobject o141, jobject o142, jobject o143, jobject o144, jobject o145, jobject o146, jobject o147, jobject o148, jobject o149, jobject o150, jobject o151, jobject o152, jobject o153, jobject o154, jobject o155, jobject o156, jobject o157, jobject o158, jobject o159, jobject o160, jobject o161, jobject o162, jobject o163, jobject o164, jobject o165, jobject o166, jobject o167, jobject o168, jobject o169, jobject o170, jobject o171, jobject o172, jobject o173, jobject o174, jobject o175, jobject o176, jobject o177, jobject o178, jobject o179, jobject o180, jobject o181, jobject o182, jobject o183, jobject o184, jobject o185, jobject o186, jobject o187, jobject o188, jobject o189, jobject o190, jobject o191, jobject o192, jobject o193, jobject o194, jobject o195, jobject o196, jobject o197, jobject o198, jobject o199, jobject o200, jobject o201, jobject o202, jobject o203, jobject o204, jobject o205, jobject o206, jobject o207, jobject o208, jobject o209, jobject o210, jobject o211, jobject o212, jobject o213, jobject o214, jobject o215, jobject o216, jobject o217, jobject o218, jobject o219, jobject o220, jobject o221, jobject o222, jobject o223, jobject o224, jobject o225, jobject o226, jobject o227, jobject o228, jobject o229, jobject o230, jobject o231, jobject o232, jobject o233, jobject o234, jobject o235, jobject o236, jobject o237, jobject o238, jobject o239, jobject o240, jobject o241, jobject o242, jobject o243, jobject o244, jobject o245, jobject o246, jobject o247, jobject o248, jobject o249, jobject o250, jobject o251, jobject o252, jobject o253) { // two tests possible if (o0 == nullptr) { // 1) everything is null EXPECT_TRUE(o0 == nullptr && o1 == nullptr && o2 == nullptr && o3 == nullptr && o4 == nullptr && o5 == nullptr && o6 == nullptr && o7 == nullptr && o8 == nullptr && o9 == nullptr && o10 == nullptr && o11 == nullptr && o12 == nullptr && o13 == nullptr && o14 == nullptr && o15 == nullptr && o16 == nullptr && o17 == nullptr && o18 == nullptr && o19 == nullptr && o20 == nullptr && o21 == nullptr && o22 == nullptr && o23 == nullptr && o24 == nullptr && o25 == nullptr && o26 == nullptr && o27 == nullptr && o28 == nullptr && o29 == nullptr && o30 == nullptr && o31 == nullptr && o32 == nullptr && o33 == nullptr && o34 == nullptr && o35 == nullptr && o36 == nullptr && o37 == nullptr && o38 == nullptr && o39 == nullptr && o40 == nullptr && o41 == nullptr && o42 == nullptr && o43 == nullptr && o44 == nullptr && o45 == nullptr && o46 == nullptr && o47 == nullptr && o48 == nullptr && o49 == nullptr && o50 == nullptr && o51 == nullptr && o52 == nullptr && o53 == nullptr && o54 == nullptr && o55 == nullptr && o56 == nullptr && o57 == nullptr && o58 == nullptr && o59 == nullptr && o60 == nullptr && o61 == nullptr && o62 == nullptr && o63 == nullptr && o64 == nullptr && o65 == nullptr && o66 == nullptr && o67 == nullptr && o68 == nullptr && o69 == nullptr && o70 == nullptr && o71 == nullptr && o72 == nullptr && o73 == nullptr && o74 == nullptr && o75 == nullptr && o76 == nullptr && o77 == nullptr && o78 == nullptr && o79 == nullptr && o80 == nullptr && o81 == nullptr && o82 == nullptr && o83 == nullptr && o84 == nullptr && o85 == nullptr && o86 == nullptr && o87 == nullptr && o88 == nullptr && o89 == nullptr && o90 == nullptr && o91 == nullptr && o92 == nullptr && o93 == nullptr && o94 == nullptr && o95 == nullptr && o96 == nullptr && o97 == nullptr && o98 == nullptr && o99 == nullptr && o100 == nullptr && o101 == nullptr && o102 == nullptr && o103 == nullptr && o104 == nullptr && o105 == nullptr && o106 == nullptr && o107 == nullptr && o108 == nullptr && o109 == nullptr && o110 == nullptr && o111 == nullptr && o112 == nullptr && o113 == nullptr && o114 == nullptr && o115 == nullptr && o116 == nullptr && o117 == nullptr && o118 == nullptr && o119 == nullptr && o120 == nullptr && o121 == nullptr && o122 == nullptr && o123 == nullptr && o124 == nullptr && o125 == nullptr && o126 == nullptr && o127 == nullptr && o128 == nullptr && o129 == nullptr && o130 == nullptr && o131 == nullptr && o132 == nullptr && o133 == nullptr && o134 == nullptr && o135 == nullptr && o136 == nullptr && o137 == nullptr && o138 == nullptr && o139 == nullptr && o140 == nullptr && o141 == nullptr && o142 == nullptr && o143 == nullptr && o144 == nullptr && o145 == nullptr && o146 == nullptr && o147 == nullptr && o148 == nullptr && o149 == nullptr && o150 == nullptr && o151 == nullptr && o152 == nullptr && o153 == nullptr && o154 == nullptr && o155 == nullptr && o156 == nullptr && o157 == nullptr && o158 == nullptr && o159 == nullptr && o160 == nullptr && o161 == nullptr && o162 == nullptr && o163 == nullptr && o164 == nullptr && o165 == nullptr && o166 == nullptr && o167 == nullptr && o168 == nullptr && o169 == nullptr && o170 == nullptr && o171 == nullptr && o172 == nullptr && o173 == nullptr && o174 == nullptr && o175 == nullptr && o176 == nullptr && o177 == nullptr && o178 == nullptr && o179 == nullptr && o180 == nullptr && o181 == nullptr && o182 == nullptr && o183 == nullptr && o184 == nullptr && o185 == nullptr && o186 == nullptr && o187 == nullptr && o188 == nullptr && o189 == nullptr && o190 == nullptr && o191 == nullptr && o192 == nullptr && o193 == nullptr && o194 == nullptr && o195 == nullptr && o196 == nullptr && o197 == nullptr && o198 == nullptr && o199 == nullptr && o200 == nullptr && o201 == nullptr && o202 == nullptr && o203 == nullptr && o204 == nullptr && o205 == nullptr && o206 == nullptr && o207 == nullptr && o208 == nullptr && o209 == nullptr && o210 == nullptr && o211 == nullptr && o212 == nullptr && o213 == nullptr && o214 == nullptr && o215 == nullptr && o216 == nullptr && o217 == nullptr && o218 == nullptr && o219 == nullptr && o220 == nullptr && o221 == nullptr && o222 == nullptr && o223 == nullptr && o224 == nullptr && o225 == nullptr && o226 == nullptr && o227 == nullptr && o228 == nullptr && o229 == nullptr && o230 == nullptr && o231 == nullptr && o232 == nullptr && o233 == nullptr && o234 == nullptr && o235 == nullptr && o236 == nullptr && o237 == nullptr && o238 == nullptr && o239 == nullptr && o240 == nullptr && o241 == nullptr && o242 == nullptr && o243 == nullptr && o244 == nullptr && o245 == nullptr && o246 == nullptr && o247 == nullptr && o248 == nullptr && o249 == nullptr && o250 == nullptr && o251 == nullptr && o252 == nullptr && o253 == nullptr); } else { EXPECT_EQ(0, env->GetArrayLength(reinterpret_cast(o0))); EXPECT_EQ(1, env->GetArrayLength(reinterpret_cast(o1))); EXPECT_EQ(2, env->GetArrayLength(reinterpret_cast(o2))); EXPECT_EQ(3, env->GetArrayLength(reinterpret_cast(o3))); EXPECT_EQ(4, env->GetArrayLength(reinterpret_cast(o4))); EXPECT_EQ(5, env->GetArrayLength(reinterpret_cast(o5))); EXPECT_EQ(6, env->GetArrayLength(reinterpret_cast(o6))); EXPECT_EQ(7, env->GetArrayLength(reinterpret_cast(o7))); EXPECT_EQ(8, env->GetArrayLength(reinterpret_cast(o8))); EXPECT_EQ(9, env->GetArrayLength(reinterpret_cast(o9))); EXPECT_EQ(10, env->GetArrayLength(reinterpret_cast(o10))); EXPECT_EQ(11, env->GetArrayLength(reinterpret_cast(o11))); EXPECT_EQ(12, env->GetArrayLength(reinterpret_cast(o12))); EXPECT_EQ(13, env->GetArrayLength(reinterpret_cast(o13))); EXPECT_EQ(14, env->GetArrayLength(reinterpret_cast(o14))); EXPECT_EQ(15, env->GetArrayLength(reinterpret_cast(o15))); EXPECT_EQ(16, env->GetArrayLength(reinterpret_cast(o16))); EXPECT_EQ(17, env->GetArrayLength(reinterpret_cast(o17))); EXPECT_EQ(18, env->GetArrayLength(reinterpret_cast(o18))); EXPECT_EQ(19, env->GetArrayLength(reinterpret_cast(o19))); EXPECT_EQ(20, env->GetArrayLength(reinterpret_cast(o20))); EXPECT_EQ(21, env->GetArrayLength(reinterpret_cast(o21))); EXPECT_EQ(22, env->GetArrayLength(reinterpret_cast(o22))); EXPECT_EQ(23, env->GetArrayLength(reinterpret_cast(o23))); EXPECT_EQ(24, env->GetArrayLength(reinterpret_cast(o24))); EXPECT_EQ(25, env->GetArrayLength(reinterpret_cast(o25))); EXPECT_EQ(26, env->GetArrayLength(reinterpret_cast(o26))); EXPECT_EQ(27, env->GetArrayLength(reinterpret_cast(o27))); EXPECT_EQ(28, env->GetArrayLength(reinterpret_cast(o28))); EXPECT_EQ(29, env->GetArrayLength(reinterpret_cast(o29))); EXPECT_EQ(30, env->GetArrayLength(reinterpret_cast(o30))); EXPECT_EQ(31, env->GetArrayLength(reinterpret_cast(o31))); EXPECT_EQ(32, env->GetArrayLength(reinterpret_cast(o32))); EXPECT_EQ(33, env->GetArrayLength(reinterpret_cast(o33))); EXPECT_EQ(34, env->GetArrayLength(reinterpret_cast(o34))); EXPECT_EQ(35, env->GetArrayLength(reinterpret_cast(o35))); EXPECT_EQ(36, env->GetArrayLength(reinterpret_cast(o36))); EXPECT_EQ(37, env->GetArrayLength(reinterpret_cast(o37))); EXPECT_EQ(38, env->GetArrayLength(reinterpret_cast(o38))); EXPECT_EQ(39, env->GetArrayLength(reinterpret_cast(o39))); EXPECT_EQ(40, env->GetArrayLength(reinterpret_cast(o40))); EXPECT_EQ(41, env->GetArrayLength(reinterpret_cast(o41))); EXPECT_EQ(42, env->GetArrayLength(reinterpret_cast(o42))); EXPECT_EQ(43, env->GetArrayLength(reinterpret_cast(o43))); EXPECT_EQ(44, env->GetArrayLength(reinterpret_cast(o44))); EXPECT_EQ(45, env->GetArrayLength(reinterpret_cast(o45))); EXPECT_EQ(46, env->GetArrayLength(reinterpret_cast(o46))); EXPECT_EQ(47, env->GetArrayLength(reinterpret_cast(o47))); EXPECT_EQ(48, env->GetArrayLength(reinterpret_cast(o48))); EXPECT_EQ(49, env->GetArrayLength(reinterpret_cast(o49))); EXPECT_EQ(50, env->GetArrayLength(reinterpret_cast(o50))); EXPECT_EQ(51, env->GetArrayLength(reinterpret_cast(o51))); EXPECT_EQ(52, env->GetArrayLength(reinterpret_cast(o52))); EXPECT_EQ(53, env->GetArrayLength(reinterpret_cast(o53))); EXPECT_EQ(54, env->GetArrayLength(reinterpret_cast(o54))); EXPECT_EQ(55, env->GetArrayLength(reinterpret_cast(o55))); EXPECT_EQ(56, env->GetArrayLength(reinterpret_cast(o56))); EXPECT_EQ(57, env->GetArrayLength(reinterpret_cast(o57))); EXPECT_EQ(58, env->GetArrayLength(reinterpret_cast(o58))); EXPECT_EQ(59, env->GetArrayLength(reinterpret_cast(o59))); EXPECT_EQ(60, env->GetArrayLength(reinterpret_cast(o60))); EXPECT_EQ(61, env->GetArrayLength(reinterpret_cast(o61))); EXPECT_EQ(62, env->GetArrayLength(reinterpret_cast(o62))); EXPECT_EQ(63, env->GetArrayLength(reinterpret_cast(o63))); EXPECT_EQ(64, env->GetArrayLength(reinterpret_cast(o64))); EXPECT_EQ(65, env->GetArrayLength(reinterpret_cast(o65))); EXPECT_EQ(66, env->GetArrayLength(reinterpret_cast(o66))); EXPECT_EQ(67, env->GetArrayLength(reinterpret_cast(o67))); EXPECT_EQ(68, env->GetArrayLength(reinterpret_cast(o68))); EXPECT_EQ(69, env->GetArrayLength(reinterpret_cast(o69))); EXPECT_EQ(70, env->GetArrayLength(reinterpret_cast(o70))); EXPECT_EQ(71, env->GetArrayLength(reinterpret_cast(o71))); EXPECT_EQ(72, env->GetArrayLength(reinterpret_cast(o72))); EXPECT_EQ(73, env->GetArrayLength(reinterpret_cast(o73))); EXPECT_EQ(74, env->GetArrayLength(reinterpret_cast(o74))); EXPECT_EQ(75, env->GetArrayLength(reinterpret_cast(o75))); EXPECT_EQ(76, env->GetArrayLength(reinterpret_cast(o76))); EXPECT_EQ(77, env->GetArrayLength(reinterpret_cast(o77))); EXPECT_EQ(78, env->GetArrayLength(reinterpret_cast(o78))); EXPECT_EQ(79, env->GetArrayLength(reinterpret_cast(o79))); EXPECT_EQ(80, env->GetArrayLength(reinterpret_cast(o80))); EXPECT_EQ(81, env->GetArrayLength(reinterpret_cast(o81))); EXPECT_EQ(82, env->GetArrayLength(reinterpret_cast(o82))); EXPECT_EQ(83, env->GetArrayLength(reinterpret_cast(o83))); EXPECT_EQ(84, env->GetArrayLength(reinterpret_cast(o84))); EXPECT_EQ(85, env->GetArrayLength(reinterpret_cast(o85))); EXPECT_EQ(86, env->GetArrayLength(reinterpret_cast(o86))); EXPECT_EQ(87, env->GetArrayLength(reinterpret_cast(o87))); EXPECT_EQ(88, env->GetArrayLength(reinterpret_cast(o88))); EXPECT_EQ(89, env->GetArrayLength(reinterpret_cast(o89))); EXPECT_EQ(90, env->GetArrayLength(reinterpret_cast(o90))); EXPECT_EQ(91, env->GetArrayLength(reinterpret_cast(o91))); EXPECT_EQ(92, env->GetArrayLength(reinterpret_cast(o92))); EXPECT_EQ(93, env->GetArrayLength(reinterpret_cast(o93))); EXPECT_EQ(94, env->GetArrayLength(reinterpret_cast(o94))); EXPECT_EQ(95, env->GetArrayLength(reinterpret_cast(o95))); EXPECT_EQ(96, env->GetArrayLength(reinterpret_cast(o96))); EXPECT_EQ(97, env->GetArrayLength(reinterpret_cast(o97))); EXPECT_EQ(98, env->GetArrayLength(reinterpret_cast(o98))); EXPECT_EQ(99, env->GetArrayLength(reinterpret_cast(o99))); EXPECT_EQ(100, env->GetArrayLength(reinterpret_cast(o100))); EXPECT_EQ(101, env->GetArrayLength(reinterpret_cast(o101))); EXPECT_EQ(102, env->GetArrayLength(reinterpret_cast(o102))); EXPECT_EQ(103, env->GetArrayLength(reinterpret_cast(o103))); EXPECT_EQ(104, env->GetArrayLength(reinterpret_cast(o104))); EXPECT_EQ(105, env->GetArrayLength(reinterpret_cast(o105))); EXPECT_EQ(106, env->GetArrayLength(reinterpret_cast(o106))); EXPECT_EQ(107, env->GetArrayLength(reinterpret_cast(o107))); EXPECT_EQ(108, env->GetArrayLength(reinterpret_cast(o108))); EXPECT_EQ(109, env->GetArrayLength(reinterpret_cast(o109))); EXPECT_EQ(110, env->GetArrayLength(reinterpret_cast(o110))); EXPECT_EQ(111, env->GetArrayLength(reinterpret_cast(o111))); EXPECT_EQ(112, env->GetArrayLength(reinterpret_cast(o112))); EXPECT_EQ(113, env->GetArrayLength(reinterpret_cast(o113))); EXPECT_EQ(114, env->GetArrayLength(reinterpret_cast(o114))); EXPECT_EQ(115, env->GetArrayLength(reinterpret_cast(o115))); EXPECT_EQ(116, env->GetArrayLength(reinterpret_cast(o116))); EXPECT_EQ(117, env->GetArrayLength(reinterpret_cast(o117))); EXPECT_EQ(118, env->GetArrayLength(reinterpret_cast(o118))); EXPECT_EQ(119, env->GetArrayLength(reinterpret_cast(o119))); EXPECT_EQ(120, env->GetArrayLength(reinterpret_cast(o120))); EXPECT_EQ(121, env->GetArrayLength(reinterpret_cast(o121))); EXPECT_EQ(122, env->GetArrayLength(reinterpret_cast(o122))); EXPECT_EQ(123, env->GetArrayLength(reinterpret_cast(o123))); EXPECT_EQ(124, env->GetArrayLength(reinterpret_cast(o124))); EXPECT_EQ(125, env->GetArrayLength(reinterpret_cast(o125))); EXPECT_EQ(126, env->GetArrayLength(reinterpret_cast(o126))); EXPECT_EQ(127, env->GetArrayLength(reinterpret_cast(o127))); EXPECT_EQ(128, env->GetArrayLength(reinterpret_cast(o128))); EXPECT_EQ(129, env->GetArrayLength(reinterpret_cast(o129))); EXPECT_EQ(130, env->GetArrayLength(reinterpret_cast(o130))); EXPECT_EQ(131, env->GetArrayLength(reinterpret_cast(o131))); EXPECT_EQ(132, env->GetArrayLength(reinterpret_cast(o132))); EXPECT_EQ(133, env->GetArrayLength(reinterpret_cast(o133))); EXPECT_EQ(134, env->GetArrayLength(reinterpret_cast(o134))); EXPECT_EQ(135, env->GetArrayLength(reinterpret_cast(o135))); EXPECT_EQ(136, env->GetArrayLength(reinterpret_cast(o136))); EXPECT_EQ(137, env->GetArrayLength(reinterpret_cast(o137))); EXPECT_EQ(138, env->GetArrayLength(reinterpret_cast(o138))); EXPECT_EQ(139, env->GetArrayLength(reinterpret_cast(o139))); EXPECT_EQ(140, env->GetArrayLength(reinterpret_cast(o140))); EXPECT_EQ(141, env->GetArrayLength(reinterpret_cast(o141))); EXPECT_EQ(142, env->GetArrayLength(reinterpret_cast(o142))); EXPECT_EQ(143, env->GetArrayLength(reinterpret_cast(o143))); EXPECT_EQ(144, env->GetArrayLength(reinterpret_cast(o144))); EXPECT_EQ(145, env->GetArrayLength(reinterpret_cast(o145))); EXPECT_EQ(146, env->GetArrayLength(reinterpret_cast(o146))); EXPECT_EQ(147, env->GetArrayLength(reinterpret_cast(o147))); EXPECT_EQ(148, env->GetArrayLength(reinterpret_cast(o148))); EXPECT_EQ(149, env->GetArrayLength(reinterpret_cast(o149))); EXPECT_EQ(150, env->GetArrayLength(reinterpret_cast(o150))); EXPECT_EQ(151, env->GetArrayLength(reinterpret_cast(o151))); EXPECT_EQ(152, env->GetArrayLength(reinterpret_cast(o152))); EXPECT_EQ(153, env->GetArrayLength(reinterpret_cast(o153))); EXPECT_EQ(154, env->GetArrayLength(reinterpret_cast(o154))); EXPECT_EQ(155, env->GetArrayLength(reinterpret_cast(o155))); EXPECT_EQ(156, env->GetArrayLength(reinterpret_cast(o156))); EXPECT_EQ(157, env->GetArrayLength(reinterpret_cast(o157))); EXPECT_EQ(158, env->GetArrayLength(reinterpret_cast(o158))); EXPECT_EQ(159, env->GetArrayLength(reinterpret_cast(o159))); EXPECT_EQ(160, env->GetArrayLength(reinterpret_cast(o160))); EXPECT_EQ(161, env->GetArrayLength(reinterpret_cast(o161))); EXPECT_EQ(162, env->GetArrayLength(reinterpret_cast(o162))); EXPECT_EQ(163, env->GetArrayLength(reinterpret_cast(o163))); EXPECT_EQ(164, env->GetArrayLength(reinterpret_cast(o164))); EXPECT_EQ(165, env->GetArrayLength(reinterpret_cast(o165))); EXPECT_EQ(166, env->GetArrayLength(reinterpret_cast(o166))); EXPECT_EQ(167, env->GetArrayLength(reinterpret_cast(o167))); EXPECT_EQ(168, env->GetArrayLength(reinterpret_cast(o168))); EXPECT_EQ(169, env->GetArrayLength(reinterpret_cast(o169))); EXPECT_EQ(170, env->GetArrayLength(reinterpret_cast(o170))); EXPECT_EQ(171, env->GetArrayLength(reinterpret_cast(o171))); EXPECT_EQ(172, env->GetArrayLength(reinterpret_cast(o172))); EXPECT_EQ(173, env->GetArrayLength(reinterpret_cast(o173))); EXPECT_EQ(174, env->GetArrayLength(reinterpret_cast(o174))); EXPECT_EQ(175, env->GetArrayLength(reinterpret_cast(o175))); EXPECT_EQ(176, env->GetArrayLength(reinterpret_cast(o176))); EXPECT_EQ(177, env->GetArrayLength(reinterpret_cast(o177))); EXPECT_EQ(178, env->GetArrayLength(reinterpret_cast(o178))); EXPECT_EQ(179, env->GetArrayLength(reinterpret_cast(o179))); EXPECT_EQ(180, env->GetArrayLength(reinterpret_cast(o180))); EXPECT_EQ(181, env->GetArrayLength(reinterpret_cast(o181))); EXPECT_EQ(182, env->GetArrayLength(reinterpret_cast(o182))); EXPECT_EQ(183, env->GetArrayLength(reinterpret_cast(o183))); EXPECT_EQ(184, env->GetArrayLength(reinterpret_cast(o184))); EXPECT_EQ(185, env->GetArrayLength(reinterpret_cast(o185))); EXPECT_EQ(186, env->GetArrayLength(reinterpret_cast(o186))); EXPECT_EQ(187, env->GetArrayLength(reinterpret_cast(o187))); EXPECT_EQ(188, env->GetArrayLength(reinterpret_cast(o188))); EXPECT_EQ(189, env->GetArrayLength(reinterpret_cast(o189))); EXPECT_EQ(190, env->GetArrayLength(reinterpret_cast(o190))); EXPECT_EQ(191, env->GetArrayLength(reinterpret_cast(o191))); EXPECT_EQ(192, env->GetArrayLength(reinterpret_cast(o192))); EXPECT_EQ(193, env->GetArrayLength(reinterpret_cast(o193))); EXPECT_EQ(194, env->GetArrayLength(reinterpret_cast(o194))); EXPECT_EQ(195, env->GetArrayLength(reinterpret_cast(o195))); EXPECT_EQ(196, env->GetArrayLength(reinterpret_cast(o196))); EXPECT_EQ(197, env->GetArrayLength(reinterpret_cast(o197))); EXPECT_EQ(198, env->GetArrayLength(reinterpret_cast(o198))); EXPECT_EQ(199, env->GetArrayLength(reinterpret_cast(o199))); EXPECT_EQ(200, env->GetArrayLength(reinterpret_cast(o200))); EXPECT_EQ(201, env->GetArrayLength(reinterpret_cast(o201))); EXPECT_EQ(202, env->GetArrayLength(reinterpret_cast(o202))); EXPECT_EQ(203, env->GetArrayLength(reinterpret_cast(o203))); EXPECT_EQ(204, env->GetArrayLength(reinterpret_cast(o204))); EXPECT_EQ(205, env->GetArrayLength(reinterpret_cast(o205))); EXPECT_EQ(206, env->GetArrayLength(reinterpret_cast(o206))); EXPECT_EQ(207, env->GetArrayLength(reinterpret_cast(o207))); EXPECT_EQ(208, env->GetArrayLength(reinterpret_cast(o208))); EXPECT_EQ(209, env->GetArrayLength(reinterpret_cast(o209))); EXPECT_EQ(210, env->GetArrayLength(reinterpret_cast(o210))); EXPECT_EQ(211, env->GetArrayLength(reinterpret_cast(o211))); EXPECT_EQ(212, env->GetArrayLength(reinterpret_cast(o212))); EXPECT_EQ(213, env->GetArrayLength(reinterpret_cast(o213))); EXPECT_EQ(214, env->GetArrayLength(reinterpret_cast(o214))); EXPECT_EQ(215, env->GetArrayLength(reinterpret_cast(o215))); EXPECT_EQ(216, env->GetArrayLength(reinterpret_cast(o216))); EXPECT_EQ(217, env->GetArrayLength(reinterpret_cast(o217))); EXPECT_EQ(218, env->GetArrayLength(reinterpret_cast(o218))); EXPECT_EQ(219, env->GetArrayLength(reinterpret_cast(o219))); EXPECT_EQ(220, env->GetArrayLength(reinterpret_cast(o220))); EXPECT_EQ(221, env->GetArrayLength(reinterpret_cast(o221))); EXPECT_EQ(222, env->GetArrayLength(reinterpret_cast(o222))); EXPECT_EQ(223, env->GetArrayLength(reinterpret_cast(o223))); EXPECT_EQ(224, env->GetArrayLength(reinterpret_cast(o224))); EXPECT_EQ(225, env->GetArrayLength(reinterpret_cast(o225))); EXPECT_EQ(226, env->GetArrayLength(reinterpret_cast(o226))); EXPECT_EQ(227, env->GetArrayLength(reinterpret_cast(o227))); EXPECT_EQ(228, env->GetArrayLength(reinterpret_cast(o228))); EXPECT_EQ(229, env->GetArrayLength(reinterpret_cast(o229))); EXPECT_EQ(230, env->GetArrayLength(reinterpret_cast(o230))); EXPECT_EQ(231, env->GetArrayLength(reinterpret_cast(o231))); EXPECT_EQ(232, env->GetArrayLength(reinterpret_cast(o232))); EXPECT_EQ(233, env->GetArrayLength(reinterpret_cast(o233))); EXPECT_EQ(234, env->GetArrayLength(reinterpret_cast(o234))); EXPECT_EQ(235, env->GetArrayLength(reinterpret_cast(o235))); EXPECT_EQ(236, env->GetArrayLength(reinterpret_cast(o236))); EXPECT_EQ(237, env->GetArrayLength(reinterpret_cast(o237))); EXPECT_EQ(238, env->GetArrayLength(reinterpret_cast(o238))); EXPECT_EQ(239, env->GetArrayLength(reinterpret_cast(o239))); EXPECT_EQ(240, env->GetArrayLength(reinterpret_cast(o240))); EXPECT_EQ(241, env->GetArrayLength(reinterpret_cast(o241))); EXPECT_EQ(242, env->GetArrayLength(reinterpret_cast(o242))); EXPECT_EQ(243, env->GetArrayLength(reinterpret_cast(o243))); EXPECT_EQ(244, env->GetArrayLength(reinterpret_cast(o244))); EXPECT_EQ(245, env->GetArrayLength(reinterpret_cast(o245))); EXPECT_EQ(246, env->GetArrayLength(reinterpret_cast(o246))); EXPECT_EQ(247, env->GetArrayLength(reinterpret_cast(o247))); EXPECT_EQ(248, env->GetArrayLength(reinterpret_cast(o248))); EXPECT_EQ(249, env->GetArrayLength(reinterpret_cast(o249))); EXPECT_EQ(250, env->GetArrayLength(reinterpret_cast(o250))); EXPECT_EQ(251, env->GetArrayLength(reinterpret_cast(o251))); EXPECT_EQ(252, env->GetArrayLength(reinterpret_cast(o252))); EXPECT_EQ(253, env->GetArrayLength(reinterpret_cast(o253))); } } const char* longSig = "(Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;" "Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)V"; void JniCompilerTest::MaxParamNumberImpl() { SetUpForTest(false, "maxParamNumber", longSig, CURRENT_JNI_WRAPPER(Java_MyClassNatives_maxParamNumber)); jvalue args[254]; // First test: test with all arguments null. for (int i = 0; i < 254; ++i) { args[i].l = nullptr; } env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args); // Second test: test with int[] objects with increasing lengths for (int i = 0; i < 254; ++i) { jintArray tmp = env_->NewIntArray(i); args[i].l = tmp; EXPECT_NE(args[i].l, nullptr); } env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args); } JNI_TEST(MaxParamNumber) void JniCompilerTest::WithoutImplementationImpl() { // This will lead to error messages in the log. ScopedLogSeverity sls(LogSeverity::FATAL); SetUpForTest(false, "withoutImplementation", "()V", NORMAL_JNI_ONLY_NULLPTR); env_->CallVoidMethod(jobj_, jmethod_); EXPECT_TRUE(Thread::Current()->IsExceptionPending()); EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); } // TODO: Don't test @FastNative here since it goes through a stub lookup (unsupported) which would // normally fail with an exception, but fails with an assert. JNI_TEST_NORMAL_ONLY(WithoutImplementation) void JniCompilerTest::WithoutImplementationRefReturnImpl() { // This will lead to error messages in the log. ScopedLogSeverity sls(LogSeverity::FATAL); SetUpForTest(false, "withoutImplementationRefReturn", "()Ljava/lang/Object;", NORMAL_JNI_ONLY_NULLPTR); env_->CallObjectMethod(jobj_, jmethod_); EXPECT_TRUE(Thread::Current()->IsExceptionPending()); EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE); } // TODO: Should work for @FastNative too. JNI_TEST_NORMAL_ONLY(WithoutImplementationRefReturn) void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3, jint i4, jint i5, jint i6, jint i7, jint i8, jint i9, jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9, jfloat f10) { EXPECT_EQ(i1, 1); EXPECT_EQ(i2, 2); EXPECT_EQ(i3, 3); EXPECT_EQ(i4, 4); EXPECT_EQ(i5, 5); EXPECT_EQ(i6, 6); EXPECT_EQ(i7, 7); EXPECT_EQ(i8, 8); EXPECT_EQ(i9, 9); EXPECT_EQ(i10, 10); jint i11 = bit_cast(f1); EXPECT_EQ(i11, 11); jint i12 = bit_cast(f2); EXPECT_EQ(i12, 12); jint i13 = bit_cast(f3); EXPECT_EQ(i13, 13); jint i14 = bit_cast(f4); EXPECT_EQ(i14, 14); jint i15 = bit_cast(f5); EXPECT_EQ(i15, 15); jint i16 = bit_cast(f6); EXPECT_EQ(i16, 16); jint i17 = bit_cast(f7); EXPECT_EQ(i17, 17); jint i18 = bit_cast(f8); EXPECT_EQ(i18, 18); jint i19 = bit_cast(f9); EXPECT_EQ(i19, 19); jint i20 = bit_cast(f10); EXPECT_EQ(i20, 20); } void JniCompilerTest::StackArgsIntsFirstImpl() { SetUpForTest(true, "stackArgsIntsFirst", "(IIIIIIIIIIFFFFFFFFFF)V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsIntsFirst)); jint i1 = 1; jint i2 = 2; jint i3 = 3; jint i4 = 4; jint i5 = 5; jint i6 = 6; jint i7 = 7; jint i8 = 8; jint i9 = 9; jint i10 = 10; jfloat f1 = bit_cast(11); jfloat f2 = bit_cast(12); jfloat f3 = bit_cast(13); jfloat f4 = bit_cast(14); jfloat f5 = bit_cast(15); jfloat f6 = bit_cast(16); jfloat f7 = bit_cast(17); jfloat f8 = bit_cast(18); jfloat f9 = bit_cast(19); jfloat f10 = bit_cast(20); env_->CallStaticVoidMethod(jklass_, jmethod_, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10); } JNI_TEST_CRITICAL(StackArgsIntsFirst) void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2, jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2, jint i3, jint i4, jint i5, jint i6, jint i7, jint i8, jint i9, jint i10) { EXPECT_EQ(i1, 1); EXPECT_EQ(i2, 2); EXPECT_EQ(i3, 3); EXPECT_EQ(i4, 4); EXPECT_EQ(i5, 5); EXPECT_EQ(i6, 6); EXPECT_EQ(i7, 7); EXPECT_EQ(i8, 8); EXPECT_EQ(i9, 9); EXPECT_EQ(i10, 10); jint i11 = bit_cast(f1); EXPECT_EQ(i11, 11); jint i12 = bit_cast(f2); EXPECT_EQ(i12, 12); jint i13 = bit_cast(f3); EXPECT_EQ(i13, 13); jint i14 = bit_cast(f4); EXPECT_EQ(i14, 14); jint i15 = bit_cast(f5); EXPECT_EQ(i15, 15); jint i16 = bit_cast(f6); EXPECT_EQ(i16, 16); jint i17 = bit_cast(f7); EXPECT_EQ(i17, 17); jint i18 = bit_cast(f8); EXPECT_EQ(i18, 18); jint i19 = bit_cast(f9); EXPECT_EQ(i19, 19); jint i20 = bit_cast(f10); EXPECT_EQ(i20, 20); } void JniCompilerTest::StackArgsFloatsFirstImpl() { SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsFloatsFirst)); jint i1 = 1; jint i2 = 2; jint i3 = 3; jint i4 = 4; jint i5 = 5; jint i6 = 6; jint i7 = 7; jint i8 = 8; jint i9 = 9; jint i10 = 10; jfloat f1 = bit_cast(11); jfloat f2 = bit_cast(12); jfloat f3 = bit_cast(13); jfloat f4 = bit_cast(14); jfloat f5 = bit_cast(15); jfloat f6 = bit_cast(16); jfloat f7 = bit_cast(17); jfloat f8 = bit_cast(18); jfloat f9 = bit_cast(19); jfloat f10 = bit_cast(20); env_->CallStaticVoidMethod(jklass_, jmethod_, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10); } JNI_TEST_CRITICAL(StackArgsFloatsFirst) void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2, jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5, jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8, jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) { EXPECT_EQ(i1, 1); EXPECT_EQ(i2, 2); EXPECT_EQ(i3, 3); EXPECT_EQ(i4, 4); EXPECT_EQ(i5, 5); EXPECT_EQ(i6, 6); EXPECT_EQ(i7, 7); EXPECT_EQ(i8, 8); EXPECT_EQ(i9, 9); EXPECT_EQ(i10, 10); jint i11 = bit_cast(f1); EXPECT_EQ(i11, 11); jint i12 = bit_cast(f2); EXPECT_EQ(i12, 12); jint i13 = bit_cast(f3); EXPECT_EQ(i13, 13); jint i14 = bit_cast(f4); EXPECT_EQ(i14, 14); jint i15 = bit_cast(f5); EXPECT_EQ(i15, 15); jint i16 = bit_cast(f6); EXPECT_EQ(i16, 16); jint i17 = bit_cast(f7); EXPECT_EQ(i17, 17); jint i18 = bit_cast(f8); EXPECT_EQ(i18, 18); jint i19 = bit_cast(f9); EXPECT_EQ(i19, 19); jint i20 = bit_cast(f10); EXPECT_EQ(i20, 20); } void JniCompilerTest::StackArgsMixedImpl() { SetUpForTest(true, "stackArgsMixed", "(IFIFIFIFIFIFIFIFIFIF)V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_stackArgsMixed)); jint i1 = 1; jint i2 = 2; jint i3 = 3; jint i4 = 4; jint i5 = 5; jint i6 = 6; jint i7 = 7; jint i8 = 8; jint i9 = 9; jint i10 = 10; jfloat f1 = bit_cast(11); jfloat f2 = bit_cast(12); jfloat f3 = bit_cast(13); jfloat f4 = bit_cast(14); jfloat f5 = bit_cast(15); jfloat f6 = bit_cast(16); jfloat f7 = bit_cast(17); jfloat f8 = bit_cast(18); jfloat f9 = bit_cast(19); jfloat f10 = bit_cast(20); env_->CallStaticVoidMethod(jklass_, jmethod_, i1, f1, i2, f2, i3, f3, i4, f4, i5, f5, i6, f6, i7, f7, i8, f8, i9, f9, i10, f10); } JNI_TEST_CRITICAL(StackArgsMixed) #if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) // Function will fetch the last argument passed from caller that is now on top of the stack and // return it as a 8B long. That way we can test if the caller has properly sign-extended the // value when placing it on the stack. __attribute__((naked)) jlong Java_MyClassNatives_getStackArgSignExtendedMips64( JNIEnv*, jclass, // Arguments passed from caller jint, jint, jint, jint, jint, jint, // through regs a0 to a7. jint) { // The last argument will be passed on the stack. __asm__( ".set noreorder\n\t" // Just return and store 8 bytes from the top of the stack "jr $ra\n\t" // in v0 (in branch delay slot). This should be the last "ld $v0, 0($sp)\n\t"); // argument. It is a 32-bit int, but it should be sign // extended and it occupies 64-bit location. } void JniCompilerTest::StackArgsSignExtendedMips64Impl() { uint64_t ret; SetUpForTest(true, "getStackArgSignExtendedMips64", "(IIIIIII)J", // Don't use wrapper because this is raw assembly function. reinterpret_cast(&Java_MyClassNatives_getStackArgSignExtendedMips64)); // Mips64 ABI requires that arguments passed through stack be sign-extended 8B slots. // First 8 arguments are passed through registers. // Final argument's value is 7. When sign-extended, higher stack bits should be 0. ret = env_->CallStaticLongMethod(jklass_, jmethod_, 1, 2, 3, 4, 5, 6, 7); EXPECT_EQ(High32Bits(ret), static_cast(0)); // Final argument's value is -8. When sign-extended, higher stack bits should be 0xffffffff. ret = env_->CallStaticLongMethod(jklass_, jmethod_, 1, 2, 3, 4, 5, 6, -8); EXPECT_EQ(High32Bits(ret), static_cast(0xffffffff)); } JNI_TEST(StackArgsSignExtendedMips64) #endif void Java_MyClassNatives_normalNative(JNIEnv*, jclass) { // Intentionally left empty. } // Methods not annotated with anything are not considered "fast native" // -- Check that the annotation lookup does not find it. void JniCompilerTest::NormalNativeImpl() { SetUpForTest(/* direct */ true, "normalNative", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative)); ArtMethod* method = jni::DecodeArtMethod(jmethod_); ASSERT_TRUE(method != nullptr); EXPECT_FALSE(method->IsAnnotatedWithCriticalNative()); EXPECT_FALSE(method->IsAnnotatedWithFastNative()); } // TODO: just rename the java functions to the standard convention and remove duplicated tests JNI_TEST_NORMAL_ONLY(NormalNative) // Methods annotated with @FastNative are considered "fast native" // -- Check that the annotation lookup succeeds. void Java_MyClassNatives_fastNative(JNIEnv*, jclass) { // Intentionally left empty. } void JniCompilerTest::FastNativeImpl() { SetUpForTest(/* direct */ true, "fastNative", "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative)); ArtMethod* method = jni::DecodeArtMethod(jmethod_); ASSERT_TRUE(method != nullptr); EXPECT_FALSE(method->IsAnnotatedWithCriticalNative()); EXPECT_TRUE(method->IsAnnotatedWithFastNative()); } // TODO: just rename the java functions to the standard convention and remove duplicated tests JNI_TEST_NORMAL_ONLY(FastNative) int gJava_myClassNatives_criticalNative_calls[kJniKindCount] = {}; // Methods annotated with @CriticalNative are considered "critical native" // -- Check that the annotation lookup succeeds. void Java_MyClassNatives_criticalNative() { gJava_myClassNatives_criticalNative_calls[gCurrentJni]++; } void JniCompilerTest::CriticalNativeImpl() { SetUpForTest(/* direct */ true, // Important: Don't change the "current jni" yet to avoid a method name suffix. "criticalNative", "()V", // TODO: Use CURRENT_JNI_WRAPPER instead which is more generic. reinterpret_cast(&Java_MyClassNatives_criticalNative)); // TODO: remove this manual updating of the current JNI. Merge with the other tests. UpdateCurrentJni(JniKind::kCritical); ASSERT_TRUE(IsCurrentJniCritical()); ArtMethod* method = jni::DecodeArtMethod(jmethod_); ASSERT_TRUE(method != nullptr); EXPECT_TRUE(method->IsAnnotatedWithCriticalNative()); EXPECT_FALSE(method->IsAnnotatedWithFastNative()); EXPECT_EQ(0, gJava_myClassNatives_criticalNative_calls[gCurrentJni]); env_->CallStaticVoidMethod(jklass_, jmethod_); EXPECT_EQ(1, gJava_myClassNatives_criticalNative_calls[gCurrentJni]); gJava_myClassNatives_criticalNative_calls[gCurrentJni] = 0; } // TODO: just rename the java functions to the standard convention and remove duplicated tests JNI_TEST_NORMAL_ONLY(CriticalNative) } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/000077500000000000000000000000001336577252300216735ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/arm/000077500000000000000000000000001336577252300224525ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/arm/calling_convention_arm.cc000066400000000000000000000430641336577252300275020ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "base/logging.h" #include "calling_convention_arm.h" #include "handle_scope-inl.h" #include "utils/arm/managed_register_arm.h" namespace art { namespace arm { static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size"); // // JNI calling convention constants. // // List of parameters passed via registers for JNI. // JNI uses soft-float, so there is only a GPR list. static const Register kJniArgumentRegisters[] = { R0, R1, R2, R3 }; static const size_t kJniArgumentRegisterCount = arraysize(kJniArgumentRegisters); // // Managed calling convention constants. // // Used by hard float. (General purpose registers.) static const Register kHFCoreArgumentRegisters[] = { R0, R1, R2, R3 }; // (VFP single-precision registers.) static const SRegister kHFSArgumentRegisters[] = { S0, S1, S2, S3, S4, S5, S6, S7, S8, S9, S10, S11, S12, S13, S14, S15 }; // (VFP double-precision registers.) static const DRegister kHFDArgumentRegisters[] = { D0, D1, D2, D3, D4, D5, D6, D7 }; static_assert(arraysize(kHFDArgumentRegisters) * 2 == arraysize(kHFSArgumentRegisters), "ks d argument registers mismatch"); // // Shared managed+JNI calling convention constants. // static constexpr ManagedRegister kCalleeSaveRegisters[] = { // Core registers. ArmManagedRegister::FromCoreRegister(R5), ArmManagedRegister::FromCoreRegister(R6), ArmManagedRegister::FromCoreRegister(R7), ArmManagedRegister::FromCoreRegister(R8), ArmManagedRegister::FromCoreRegister(R10), ArmManagedRegister::FromCoreRegister(R11), // Hard float registers. ArmManagedRegister::FromSRegister(S16), ArmManagedRegister::FromSRegister(S17), ArmManagedRegister::FromSRegister(S18), ArmManagedRegister::FromSRegister(S19), ArmManagedRegister::FromSRegister(S20), ArmManagedRegister::FromSRegister(S21), ArmManagedRegister::FromSRegister(S22), ArmManagedRegister::FromSRegister(S23), ArmManagedRegister::FromSRegister(S24), ArmManagedRegister::FromSRegister(S25), ArmManagedRegister::FromSRegister(S26), ArmManagedRegister::FromSRegister(S27), ArmManagedRegister::FromSRegister(S28), ArmManagedRegister::FromSRegister(S29), ArmManagedRegister::FromSRegister(S30), ArmManagedRegister::FromSRegister(S31) }; static constexpr uint32_t CalculateCoreCalleeSpillMask() { // LR is a special callee save which is not reported by CalleeSaveRegisters(). uint32_t result = 1 << LR; for (auto&& r : kCalleeSaveRegisters) { if (r.AsArm().IsCoreRegister()) { result |= (1 << r.AsArm().AsCoreRegister()); } } return result; } static constexpr uint32_t CalculateFpCalleeSpillMask() { uint32_t result = 0; for (auto&& r : kCalleeSaveRegisters) { if (r.AsArm().IsSRegister()) { result |= (1 << r.AsArm().AsSRegister()); } } return result; } static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(); static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(); // Calling convention ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() { return ArmManagedRegister::FromCoreRegister(IP); // R12 } ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() { return ArmManagedRegister::FromCoreRegister(IP); // R12 } ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() { if (kArm32QuickCodeUseSoftFloat) { switch (GetShorty()[0]) { case 'V': return ArmManagedRegister::NoRegister(); case 'D': case 'J': return ArmManagedRegister::FromRegisterPair(R0_R1); default: return ArmManagedRegister::FromCoreRegister(R0); } } else { switch (GetShorty()[0]) { case 'V': return ArmManagedRegister::NoRegister(); case 'D': return ArmManagedRegister::FromDRegister(D0); case 'F': return ArmManagedRegister::FromSRegister(S0); case 'J': return ArmManagedRegister::FromRegisterPair(R0_R1); default: return ArmManagedRegister::FromCoreRegister(R0); } } } ManagedRegister ArmJniCallingConvention::ReturnRegister() { switch (GetShorty()[0]) { case 'V': return ArmManagedRegister::NoRegister(); case 'D': case 'J': return ArmManagedRegister::FromRegisterPair(R0_R1); default: return ArmManagedRegister::FromCoreRegister(R0); } } ManagedRegister ArmJniCallingConvention::IntReturnRegister() { return ArmManagedRegister::FromCoreRegister(R0); } // Managed runtime calling convention ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() { return ArmManagedRegister::FromCoreRegister(R0); } bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() { return false; // Everything moved to stack on entry. } bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() { return true; } ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() { LOG(FATAL) << "Should not reach here"; return ManagedRegister::NoRegister(); } FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); FrameOffset result = FrameOffset(displacement_.Int32Value() + // displacement kFramePointerSize + // Method* (itr_slots_ * kFramePointerSize)); // offset into in args return result; } const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() { // We spill the argument registers on ARM to free them up for scratch use, we then assume // all arguments are on the stack. if (kArm32QuickCodeUseSoftFloat) { if (entry_spills_.size() == 0) { size_t num_spills = NumArgs() + NumLongOrDoubleArgs(); if (num_spills > 0) { entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1)); if (num_spills > 1) { entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2)); if (num_spills > 2) { entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3)); } } } } } else { if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { uint32_t gpr_index = 1; // R0 ~ R3. Reserve r0 for ArtMethod*. uint32_t fpr_index = 0; // S0 ~ S15. uint32_t fpr_double_index = 0; // D0 ~ D7. ResetIterator(FrameOffset(0)); while (HasNext()) { if (IsCurrentParamAFloatOrDouble()) { if (IsCurrentParamADouble()) { // Double. // Double should not overlap with float. fpr_double_index = (std::max(fpr_double_index * 2, RoundUp(fpr_index, 2))) / 2; if (fpr_double_index < arraysize(kHFDArgumentRegisters)) { entry_spills_.push_back( ArmManagedRegister::FromDRegister(kHFDArgumentRegisters[fpr_double_index++])); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 8); } } else { // Float. // Float should not overlap with double. if (fpr_index % 2 == 0) { fpr_index = std::max(fpr_double_index * 2, fpr_index); } if (fpr_index < arraysize(kHFSArgumentRegisters)) { entry_spills_.push_back( ArmManagedRegister::FromSRegister(kHFSArgumentRegisters[fpr_index++])); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } } else { // FIXME: Pointer this returns as both reference and long. if (IsCurrentParamALong() && !IsCurrentParamAReference()) { // Long. if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) { // Skip R1, and use R2_R3 if the long is the first parameter. if (gpr_index == 1) { gpr_index++; } } // If it spans register and memory, we must use the value in memory. if (gpr_index < arraysize(kHFCoreArgumentRegisters) - 1) { entry_spills_.push_back( ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++])); } else if (gpr_index == arraysize(kHFCoreArgumentRegisters) - 1) { gpr_index++; entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } // High part of long or 32-bit argument. if (gpr_index < arraysize(kHFCoreArgumentRegisters)) { entry_spills_.push_back( ArmManagedRegister::FromCoreRegister(kHFCoreArgumentRegisters[gpr_index++])); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } Next(); } } } return entry_spills_; } // JNI calling convention ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty) : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kArmPointerSize) { // AAPCS 4.1 specifies fundamental alignments for each type. All of our stack arguments are // usually 4-byte aligned, however longs and doubles must be 8 bytes aligned. Add padding to // maintain 8-byte alignment invariant. // // Compute padding to ensure longs and doubles are not split in AAPCS. size_t shift = 0; size_t cur_arg, cur_reg; if (LIKELY(HasExtraArgumentsForJni())) { // Ignore the 'this' jobject or jclass for static methods and the JNIEnv. // We start at the aligned register r2. // // Ignore the first 2 parameters because they are guaranteed to be aligned. cur_arg = NumImplicitArgs(); // skip the "this" arg. cur_reg = 2; // skip {r0=JNIEnv, r1=jobject} / {r0=JNIEnv, r1=jclass} parameters (start at r2). } else { // Check every parameter. cur_arg = 0; cur_reg = 0; } // TODO: Maybe should just use IsCurrentParamALongOrDouble instead to be cleaner? // (this just seems like an unnecessary micro-optimization). // Shift across a logical register mapping that looks like: // // | r0 | r1 | r2 | r3 | SP | SP+4| SP+8 | SP+12 | ... | SP+n | SP+n+4 | // // (where SP is some arbitrary stack pointer that our 0th stack arg would go into). // // Any time there would normally be a long/double in an odd logical register, // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment. // // This works for both physical register pairs {r0, r1}, {r2, r3} and for when // the value is on the stack. // // For example: // (a) long would normally go into r1, but we shift it into r2 // | INT | (PAD) | LONG | // | r0 | r1 | r2 | r3 | // // (b) long would normally go into r3, but we shift it into SP // | INT | INT | INT | (PAD) | LONG | // | r0 | r1 | r2 | r3 | SP+4 SP+8| // // where INT is any <=4 byte arg, and LONG is any 8-byte arg. for (; cur_arg < NumArgs(); cur_arg++) { if (IsParamALongOrDouble(cur_arg)) { if ((cur_reg & 1) != 0) { // check that it's in a logical contiguous register pair shift += 4; cur_reg++; // additional bump to ensure alignment } cur_reg += 2; // bump the iterator twice for every long argument } else { cur_reg++; // bump the iterator for every non-long argument } } if (cur_reg < kJniArgumentRegisterCount) { // As a special case when, as a result of shifting (or not) there are no arguments on the stack, // we actually have 0 stack padding. // // For example with @CriticalNative and: // (int, long) -> shifts the long but doesn't need to pad the stack // // shift // \/ // | INT | (PAD) | LONG | (EMPTY) ... // | r0 | r1 | r2 | r3 | SP ... // /\ // no stack padding padding_ = 0; } else { padding_ = shift; } // TODO: add some new JNI tests for @CriticalNative that introduced new edge cases // (a) Using r0,r1 pair = f(long,...) // (b) Shifting r1 long into r2,r3 pair = f(int, long, int, ...); // (c) Shifting but not introducing a stack padding = f(int, long); } uint32_t ArmJniCallingConvention::CoreSpillMask() const { // Compute spill mask to agree with callee saves initialized in the constructor return kCoreCalleeSpillMask; } uint32_t ArmJniCallingConvention::FpSpillMask() const { return kFpCalleeSpillMask; } ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const { return ArmManagedRegister::FromCoreRegister(R2); } size_t ArmJniCallingConvention::FrameSize() { // Method*, LR and callee save area size, local reference segment state const size_t method_ptr_size = static_cast(kArmPointerSize); const size_t lr_return_addr_size = kFramePointerSize; const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; size_t frame_data_size = method_ptr_size + lr_return_addr_size + callee_save_area_size; if (LIKELY(HasLocalReferenceSegmentState())) { // local reference segment state frame_data_size += kFramePointerSize; // TODO: Probably better to use sizeof(IRTSegmentState) here... } // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header const size_t handle_scope_size = HandleScope::SizeOf(kArmPointerSize, ReferenceCount()); size_t total_size = frame_data_size; if (LIKELY(HasHandleScope())) { // HandleScope is sometimes excluded. total_size += handle_scope_size; // handle scope size } // Plus return value spill area size total_size += SizeOfReturnValue(); return RoundUp(total_size, kStackAlignment); } size_t ArmJniCallingConvention::OutArgSize() { // TODO: Identical to x86_64 except for also adding additional padding. return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_, kStackAlignment); } ArrayRef ArmJniCallingConvention::CalleeSaveRegisters() const { return ArrayRef(kCalleeSaveRegisters); } // JniCallingConvention ABI follows AAPCS where longs and doubles must occur // in even register numbers and stack slots void ArmJniCallingConvention::Next() { // Update the iterator by usual JNI rules. JniCallingConvention::Next(); if (LIKELY(HasNext())) { // Avoid CHECK failure for IsCurrentParam // Ensure slot is 8-byte aligned for longs/doubles (AAPCS). if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) { // itr_slots_ needs to be an even number, according to AAPCS. itr_slots_++; } } } bool ArmJniCallingConvention::IsCurrentParamInRegister() { return itr_slots_ < kJniArgumentRegisterCount; } bool ArmJniCallingConvention::IsCurrentParamOnStack() { return !IsCurrentParamInRegister(); } ManagedRegister ArmJniCallingConvention::CurrentParamRegister() { CHECK_LT(itr_slots_, kJniArgumentRegisterCount); if (IsCurrentParamALongOrDouble()) { // AAPCS 5.1.1 requires 64-bit values to be in a consecutive register pair: // "A double-word sized type is passed in two consecutive registers (e.g., r0 and r1, or r2 and // r3). The content of the registers is as if the value had been loaded from memory // representation with a single LDM instruction." if (itr_slots_ == 0u) { return ArmManagedRegister::FromRegisterPair(R0_R1); } else if (itr_slots_ == 2u) { return ArmManagedRegister::FromRegisterPair(R2_R3); } else { // The register can either be R0 (+R1) or R2 (+R3). Cannot be other values. LOG(FATAL) << "Invalid iterator register position for a long/double " << itr_args_; UNREACHABLE(); } } else { // All other types can fit into one register. return ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]); } } FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() { CHECK_GE(itr_slots_, kJniArgumentRegisterCount); size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - kJniArgumentRegisterCount) * kFramePointerSize); CHECK_LT(offset, OutArgSize()); return FrameOffset(offset); } size_t ArmJniCallingConvention::NumberOfOutgoingStackArgs() { size_t static_args = HasSelfClass() ? 1 : 0; // count jclass // regular argument parameters and this size_t param_args = NumArgs() + NumLongOrDoubleArgs(); // twice count 8-byte args // XX: Why is the long/ordouble counted twice but not JNIEnv* ??? // count JNIEnv* less arguments in registers size_t internal_args = (HasJniEnv() ? 1 : 0 /* jni env */); size_t total_args = static_args + param_args + internal_args; return total_args - std::min(kJniArgumentRegisterCount, static_cast(total_args)); // TODO: Very similar to x86_64 except for the return pc. } } // namespace arm } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/arm/calling_convention_arm.h000066400000000000000000000065771336577252300273540ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ #define ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ #include "base/enums.h" #include "jni/quick/calling_convention.h" namespace art { namespace arm { constexpr size_t kFramePointerSize = static_cast(PointerSize::k32); class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, PointerSize::k32) {} ~ArmManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // Managed runtime calling convention ManagedRegister MethodRegister() OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; private: ManagedRegisterEntrySpills entry_spills_; DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention); }; class ArmJniCallingConvention FINAL : public JniCallingConvention { public: ArmJniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty); ~ArmJniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister IntReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // JNI calling convention void Next() OVERRIDE; // Override default behavior for AAPCS size_t FrameSize() OVERRIDE; size_t OutArgSize() OVERRIDE; ArrayRef CalleeSaveRegisters() const OVERRIDE; ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; // AAPCS mandates return values are extended. bool RequiresSmallResultTypeExtension() const OVERRIDE { return false; } protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; private: // Padding to ensure longs and doubles are not split in AAPCS size_t padding_; DISALLOW_COPY_AND_ASSIGN(ArmJniCallingConvention); }; } // namespace arm } // namespace art #endif // ART_COMPILER_JNI_QUICK_ARM_CALLING_CONVENTION_ARM_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/arm64/000077500000000000000000000000001336577252300226245ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/arm64/calling_convention_arm64.cc000066400000000000000000000330401336577252300300170ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "base/logging.h" #include "calling_convention_arm64.h" #include "handle_scope-inl.h" #include "utils/arm64/managed_register_arm64.h" namespace art { namespace arm64 { static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size"); // Up to how many float-like (float, double) args can be enregistered. // The rest of the args must go on the stack. constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u; // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be // enregistered. The rest of the args must go on the stack. constexpr size_t kMaxIntLikeRegisterArguments = 8u; static const XRegister kXArgumentRegisters[] = { X0, X1, X2, X3, X4, X5, X6, X7 }; static const WRegister kWArgumentRegisters[] = { W0, W1, W2, W3, W4, W5, W6, W7 }; static const DRegister kDArgumentRegisters[] = { D0, D1, D2, D3, D4, D5, D6, D7 }; static const SRegister kSArgumentRegisters[] = { S0, S1, S2, S3, S4, S5, S6, S7 }; static constexpr ManagedRegister kCalleeSaveRegisters[] = { // Core registers. // Note: The native jni function may call to some VM runtime functions which may suspend // or trigger GC. And the jni method frame will become top quick frame in those cases. // So we need to satisfy GC to save LR and callee-save registers which is similar to // CalleeSaveMethod(RefOnly) frame. // Jni function is the native function which the java code wants to call. // Jni method is the method that is compiled by jni compiler. // Call chain: managed code(java) --> jni method --> jni function. // Thread register(X19) is saved on stack. Arm64ManagedRegister::FromXRegister(X19), Arm64ManagedRegister::FromXRegister(X20), Arm64ManagedRegister::FromXRegister(X21), Arm64ManagedRegister::FromXRegister(X22), Arm64ManagedRegister::FromXRegister(X23), Arm64ManagedRegister::FromXRegister(X24), Arm64ManagedRegister::FromXRegister(X25), Arm64ManagedRegister::FromXRegister(X26), Arm64ManagedRegister::FromXRegister(X27), Arm64ManagedRegister::FromXRegister(X28), Arm64ManagedRegister::FromXRegister(X29), Arm64ManagedRegister::FromXRegister(LR), // Hard float registers. // Considering the case, java_method_1 --> jni method --> jni function --> java_method_2, // we may break on java_method_2 and we still need to find out the values of DEX registers // in java_method_1. So all callee-saves(in managed code) need to be saved. Arm64ManagedRegister::FromDRegister(D8), Arm64ManagedRegister::FromDRegister(D9), Arm64ManagedRegister::FromDRegister(D10), Arm64ManagedRegister::FromDRegister(D11), Arm64ManagedRegister::FromDRegister(D12), Arm64ManagedRegister::FromDRegister(D13), Arm64ManagedRegister::FromDRegister(D14), Arm64ManagedRegister::FromDRegister(D15), }; static constexpr uint32_t CalculateCoreCalleeSpillMask() { uint32_t result = 0u; for (auto&& r : kCalleeSaveRegisters) { if (r.AsArm64().IsXRegister()) { result |= (1 << r.AsArm64().AsXRegister()); } } return result; } static constexpr uint32_t CalculateFpCalleeSpillMask() { uint32_t result = 0; for (auto&& r : kCalleeSaveRegisters) { if (r.AsArm64().IsDRegister()) { result |= (1 << r.AsArm64().AsDRegister()); } } return result; } static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(); static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(); // Calling convention ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { // X20 is safe to use as a scratch register: // - with Baker read barriers, it is reserved as Marking Register, // and thus does not actually need to be saved/restored; it is // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame); // - in other cases, it is saved on entry (in // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in // Arm64JNIMacroAssembler::RemoveFrame). return Arm64ManagedRegister::FromXRegister(X20); } ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() { // X20 is safe to use as a scratch register: // - with Baker read barriers, it is reserved as Marking Register, // and thus does not actually need to be saved/restored; it is // refreshed on exit (see Arm64JNIMacroAssembler::RemoveFrame); // - in other cases, it is saved on entry (in // Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in // Arm64JNIMacroAssembler::RemoveFrame). return Arm64ManagedRegister::FromXRegister(X20); } static ManagedRegister ReturnRegisterForShorty(const char* shorty) { if (shorty[0] == 'F') { return Arm64ManagedRegister::FromSRegister(S0); } else if (shorty[0] == 'D') { return Arm64ManagedRegister::FromDRegister(D0); } else if (shorty[0] == 'J') { return Arm64ManagedRegister::FromXRegister(X0); } else if (shorty[0] == 'V') { return Arm64ManagedRegister::NoRegister(); } else { return Arm64ManagedRegister::FromWRegister(W0); } } ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } ManagedRegister Arm64JniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } ManagedRegister Arm64JniCallingConvention::IntReturnRegister() { return Arm64ManagedRegister::FromWRegister(W0); } // Managed runtime calling convention ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() { return Arm64ManagedRegister::FromXRegister(X0); } bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { return false; // Everything moved to stack on entry. } bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { return true; } ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() { LOG(FATAL) << "Should not reach here"; return ManagedRegister::NoRegister(); } FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); FrameOffset result = FrameOffset(displacement_.Int32Value() + // displacement kFramePointerSize + // Method ref (itr_slots_ * sizeof(uint32_t))); // offset into in args return result; } const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() { // We spill the argument registers on ARM64 to free them up for scratch use, we then assume // all arguments are on the stack. if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*. int fp_reg_index = 0; // D0/S0. // We need to choose the correct register (D/S or X/W) since the managed // stack uses 32bit stack slots. ResetIterator(FrameOffset(0)); while (HasNext()) { if (IsCurrentParamAFloatOrDouble()) { // FP regs. if (fp_reg_index < 8) { if (!IsCurrentParamADouble()) { entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index])); } else { entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index])); } fp_reg_index++; } else { // just increase the stack offset. if (!IsCurrentParamADouble()) { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 8); } } } else { // GP regs. if (gp_reg_index < 8) { if (IsCurrentParamALong() && (!IsCurrentParamAReference())) { entry_spills_.push_back(Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg_index])); } else { entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index])); } gp_reg_index++; } else { // just increase the stack offset. if (IsCurrentParamALong() && (!IsCurrentParamAReference())) { entry_spills_.push_back(ManagedRegister::NoRegister(), 8); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } } Next(); } } return entry_spills_; } // JNI calling convention Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty) : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kArm64PointerSize) { } uint32_t Arm64JniCallingConvention::CoreSpillMask() const { return kCoreCalleeSpillMask; } uint32_t Arm64JniCallingConvention::FpSpillMask() const { return kFpCalleeSpillMask; } ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const { return ManagedRegister::NoRegister(); } size_t Arm64JniCallingConvention::FrameSize() { // Method*, callee save area size, local reference segment state // // (Unlike x86_64, do not include return address, and the segment state is uint32 // instead of pointer). size_t method_ptr_size = static_cast(kFramePointerSize); size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; size_t frame_data_size = method_ptr_size + callee_save_area_size; if (LIKELY(HasLocalReferenceSegmentState())) { frame_data_size += sizeof(uint32_t); } // References plus 2 words for HandleScope header size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount()); size_t total_size = frame_data_size; if (LIKELY(HasHandleScope())) { // HandleScope is sometimes excluded. total_size += handle_scope_size; // handle scope size } // Plus return value spill area size total_size += SizeOfReturnValue(); return RoundUp(total_size, kStackAlignment); } size_t Arm64JniCallingConvention::OutArgSize() { // Same as X86_64 return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); } ArrayRef Arm64JniCallingConvention::CalleeSaveRegisters() const { // Same as X86_64 return ArrayRef(kCalleeSaveRegisters); } bool Arm64JniCallingConvention::IsCurrentParamInRegister() { if (IsCurrentParamAFloatOrDouble()) { return (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments); } else { return ((itr_args_ - itr_float_and_doubles_) < kMaxIntLikeRegisterArguments); } // TODO: Can we just call CurrentParamRegister to figure this out? } bool Arm64JniCallingConvention::IsCurrentParamOnStack() { // Is this ever not the same for all the architectures? return !IsCurrentParamInRegister(); } ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() { CHECK(IsCurrentParamInRegister()); if (IsCurrentParamAFloatOrDouble()) { CHECK_LT(itr_float_and_doubles_, kMaxFloatOrDoubleRegisterArguments); if (IsCurrentParamADouble()) { return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]); } else { return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]); } } else { int gp_reg = itr_args_ - itr_float_and_doubles_; CHECK_LT(static_cast(gp_reg), kMaxIntLikeRegisterArguments); if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) { return Arm64ManagedRegister::FromXRegister(kXArgumentRegisters[gp_reg]); } else { return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]); } } } FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); size_t args_on_stack = itr_args_ - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast(itr_float_and_doubles_)) - std::min(kMaxIntLikeRegisterArguments, static_cast(itr_args_ - itr_float_and_doubles_)); size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize); CHECK_LT(offset, OutArgSize()); return FrameOffset(offset); // TODO: Seems identical to X86_64 code. } size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() { // all arguments including JNI args size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni(); DCHECK_GE(all_args, NumFloatOrDoubleArgs()); size_t all_stack_args = all_args - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast(NumFloatOrDoubleArgs())) - std::min(kMaxIntLikeRegisterArguments, static_cast((all_args - NumFloatOrDoubleArgs()))); // TODO: Seems similar to X86_64 code except it doesn't count return pc. return all_stack_args; } } // namespace arm64 } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/arm64/calling_convention_arm64.h000066400000000000000000000064351336577252300276710ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ #define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ #include "base/enums.h" #include "jni/quick/calling_convention.h" namespace art { namespace arm64 { constexpr size_t kFramePointerSize = static_cast(PointerSize::k64); class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, PointerSize::k64) {} ~Arm64ManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // Managed runtime calling convention ManagedRegister MethodRegister() OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; private: ManagedRegisterEntrySpills entry_spills_; DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention); }; class Arm64JniCallingConvention FINAL : public JniCallingConvention { public: Arm64JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty); ~Arm64JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister IntReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // JNI calling convention size_t FrameSize() OVERRIDE; size_t OutArgSize() OVERRIDE; ArrayRef CalleeSaveRegisters() const OVERRIDE; ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; // aarch64 calling convention leaves upper bits undefined. bool RequiresSmallResultTypeExtension() const OVERRIDE { return true; } protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; private: DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention); }; } // namespace arm64 } // namespace art #endif // ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/calling_convention.cc000066400000000000000000000347501336577252300260660ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "calling_convention.h" #include "base/logging.h" #ifdef ART_ENABLE_CODEGEN_arm #include "jni/quick/arm/calling_convention_arm.h" #endif #ifdef ART_ENABLE_CODEGEN_arm64 #include "jni/quick/arm64/calling_convention_arm64.h" #endif #ifdef ART_ENABLE_CODEGEN_mips #include "jni/quick/mips/calling_convention_mips.h" #endif #ifdef ART_ENABLE_CODEGEN_mips64 #include "jni/quick/mips64/calling_convention_mips64.h" #endif #ifdef ART_ENABLE_CODEGEN_x86 #include "jni/quick/x86/calling_convention_x86.h" #endif #ifdef ART_ENABLE_CODEGEN_x86_64 #include "jni/quick/x86_64/calling_convention_x86_64.h" #endif namespace art { // Managed runtime calling convention std::unique_ptr ManagedRuntimeCallingConvention::Create( ArenaAllocator* arena, bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) { switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: return std::unique_ptr( new (arena) arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return std::unique_ptr( new (arena) arm64::Arm64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: return std::unique_ptr( new (arena) mips::MipsManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return std::unique_ptr( new (arena) mips64::Mips64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: return std::unique_ptr( new (arena) x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return std::unique_ptr( new (arena) x86_64::X86_64ManagedRuntimeCallingConvention( is_static, is_synchronized, shorty)); #endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; UNREACHABLE(); } } bool ManagedRuntimeCallingConvention::HasNext() { return itr_args_ < NumArgs(); } void ManagedRuntimeCallingConvention::Next() { CHECK(HasNext()); if (IsCurrentArgExplicit() && // don't query parameter type of implicit args IsParamALongOrDouble(itr_args_)) { itr_longs_and_doubles_++; itr_slots_++; } if (IsParamAFloatOrDouble(itr_args_)) { itr_float_and_doubles_++; } if (IsCurrentParamAReference()) { itr_refs_++; } itr_args_++; itr_slots_++; } bool ManagedRuntimeCallingConvention::IsCurrentArgExplicit() { // Static methods have no implicit arguments, others implicitly pass this return IsStatic() || (itr_args_ != 0); } bool ManagedRuntimeCallingConvention::IsCurrentArgPossiblyNull() { return IsCurrentArgExplicit(); // any user parameter may be null } size_t ManagedRuntimeCallingConvention::CurrentParamSize() { return ParamSize(itr_args_); } bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() { return IsParamAReference(itr_args_); } bool ManagedRuntimeCallingConvention::IsCurrentParamAFloatOrDouble() { return IsParamAFloatOrDouble(itr_args_); } bool ManagedRuntimeCallingConvention::IsCurrentParamADouble() { return IsParamADouble(itr_args_); } bool ManagedRuntimeCallingConvention::IsCurrentParamALong() { return IsParamALong(itr_args_); } // JNI calling convention std::unique_ptr JniCallingConvention::Create(ArenaAllocator* arena, bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty, InstructionSet instruction_set) { switch (instruction_set) { #ifdef ART_ENABLE_CODEGEN_arm case kArm: case kThumb2: return std::unique_ptr( new (arena) arm::ArmJniCallingConvention(is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_arm64 case kArm64: return std::unique_ptr( new (arena) arm64::Arm64JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips case kMips: return std::unique_ptr( new (arena) mips::MipsJniCallingConvention(is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_mips64 case kMips64: return std::unique_ptr( new (arena) mips64::Mips64JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: return std::unique_ptr( new (arena) x86::X86JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty)); #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case kX86_64: return std::unique_ptr( new (arena) x86_64::X86_64JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty)); #endif default: LOG(FATAL) << "Unknown InstructionSet: " << instruction_set; UNREACHABLE(); } } size_t JniCallingConvention::ReferenceCount() const { return NumReferenceArgs() + (IsStatic() ? 1 : 0); } FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const { size_t references_size = handle_scope_pointer_size_ * ReferenceCount(); // size excluding header return FrameOffset(HandleReferencesOffset().Int32Value() + references_size); } FrameOffset JniCallingConvention::ReturnValueSaveLocation() const { if (LIKELY(HasHandleScope())) { // Initial offset already includes the displacement. // -- Remove the additional local reference cookie offset if we don't have a handle scope. const size_t saved_local_reference_cookie_offset = SavedLocalReferenceCookieOffset().Int32Value(); // Segment state is 4 bytes long const size_t segment_state_size = 4; return FrameOffset(saved_local_reference_cookie_offset + segment_state_size); } else { // Include only the initial Method* as part of the offset. CHECK_LT(displacement_.SizeValue(), static_cast(std::numeric_limits::max())); return FrameOffset(displacement_.Int32Value() + static_cast(frame_pointer_size_)); } } bool JniCallingConvention::HasNext() { if (IsCurrentArgExtraForJni()) { return true; } else { unsigned int arg_pos = GetIteratorPositionWithinShorty(); return arg_pos < NumArgs(); } } void JniCallingConvention::Next() { CHECK(HasNext()); if (IsCurrentParamALong() || IsCurrentParamADouble()) { itr_longs_and_doubles_++; itr_slots_++; } if (IsCurrentParamAFloatOrDouble()) { itr_float_and_doubles_++; } if (IsCurrentParamAReference()) { itr_refs_++; } // This default/fallthrough case also covers the extra JNIEnv* argument, // as well as any other single-slot primitives. itr_args_++; itr_slots_++; } bool JniCallingConvention::IsCurrentParamAReference() { bool return_value; if (SwitchExtraJniArguments(itr_args_, false, // JNIEnv* true, // jobject or jclass /* out parameters */ &return_value)) { return return_value; } else { int arg_pos = GetIteratorPositionWithinShorty(); return IsParamAReference(arg_pos); } } bool JniCallingConvention::IsCurrentParamJniEnv() { if (UNLIKELY(!HasJniEnv())) { return false; } return (itr_args_ == kJniEnv); } bool JniCallingConvention::IsCurrentParamAFloatOrDouble() { bool return_value; if (SwitchExtraJniArguments(itr_args_, false, // jnienv* false, // jobject or jclass /* out parameters */ &return_value)) { return return_value; } else { int arg_pos = GetIteratorPositionWithinShorty(); return IsParamAFloatOrDouble(arg_pos); } } bool JniCallingConvention::IsCurrentParamADouble() { bool return_value; if (SwitchExtraJniArguments(itr_args_, false, // jnienv* false, // jobject or jclass /* out parameters */ &return_value)) { return return_value; } else { int arg_pos = GetIteratorPositionWithinShorty(); return IsParamADouble(arg_pos); } } bool JniCallingConvention::IsCurrentParamALong() { bool return_value; if (SwitchExtraJniArguments(itr_args_, false, // jnienv* false, // jobject or jclass /* out parameters */ &return_value)) { return return_value; } else { int arg_pos = GetIteratorPositionWithinShorty(); return IsParamALong(arg_pos); } } // Return position of handle scope entry holding reference at the current iterator // position FrameOffset JniCallingConvention::CurrentParamHandleScopeEntryOffset() { CHECK(IsCurrentParamAReference()); CHECK_LT(HandleScopeLinkOffset(), HandleScopeNumRefsOffset()); int result = HandleReferencesOffset().Int32Value() + itr_refs_ * handle_scope_pointer_size_; CHECK_GT(result, HandleScopeNumRefsOffset().Int32Value()); return FrameOffset(result); } size_t JniCallingConvention::CurrentParamSize() const { if (IsCurrentArgExtraForJni()) { return static_cast(frame_pointer_size_); // JNIEnv or jobject/jclass } else { int arg_pos = GetIteratorPositionWithinShorty(); return ParamSize(arg_pos); } } size_t JniCallingConvention::NumberOfExtraArgumentsForJni() const { if (LIKELY(HasExtraArgumentsForJni())) { // The first argument is the JNIEnv*. // Static methods have an extra argument which is the jclass. return IsStatic() ? 2 : 1; } else { // Critical natives exclude the JNIEnv and the jclass/this parameters. return 0; } } bool JniCallingConvention::HasHandleScope() const { // Exclude HandleScope for @CriticalNative methods for optimization speed. return is_critical_native_ == false; } bool JniCallingConvention::HasLocalReferenceSegmentState() const { // Exclude local reference segment states for @CriticalNative methods for optimization speed. return is_critical_native_ == false; } bool JniCallingConvention::HasJniEnv() const { // Exclude "JNIEnv*" parameter for @CriticalNative methods. return HasExtraArgumentsForJni(); } bool JniCallingConvention::HasSelfClass() const { if (!IsStatic()) { // Virtual functions: There is never an implicit jclass parameter. return false; } else { // Static functions: There is an implicit jclass parameter unless it's @CriticalNative. return HasExtraArgumentsForJni(); } } bool JniCallingConvention::HasExtraArgumentsForJni() const { // @CriticalNative jni implementations exclude both JNIEnv* and the jclass/jobject parameters. return is_critical_native_ == false; } unsigned int JniCallingConvention::GetIteratorPositionWithinShorty() const { // We need to subtract out the extra JNI arguments if we want to use this iterator position // with the inherited CallingConvention member functions, which rely on scanning the shorty. // Note that our shorty does *not* include the JNIEnv, jclass/jobject parameters. DCHECK_GE(itr_args_, NumberOfExtraArgumentsForJni()); return itr_args_ - NumberOfExtraArgumentsForJni(); } bool JniCallingConvention::IsCurrentArgExtraForJni() const { if (UNLIKELY(!HasExtraArgumentsForJni())) { return false; // If there are no extra args, we can never be an extra. } // Only parameters kJniEnv and kObjectOrClass are considered extra. return itr_args_ <= kObjectOrClass; } bool JniCallingConvention::SwitchExtraJniArguments(size_t switch_value, bool case_jni_env, bool case_object_or_class, /* out parameters */ bool* return_value) const { DCHECK(return_value != nullptr); if (UNLIKELY(!HasExtraArgumentsForJni())) { return false; } switch (switch_value) { case kJniEnv: *return_value = case_jni_env; return true; case kObjectOrClass: *return_value = case_object_or_class; return true; default: return false; } } } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/calling_convention.h000066400000000000000000000360351336577252300257260ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ #define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ #include "base/arena_object.h" #include "base/array_ref.h" #include "base/enums.h" #include "handle_scope.h" #include "primitive.h" #include "thread.h" #include "utils/managed_register.h" namespace art { // Top-level abstraction for different calling conventions. class CallingConvention : public DeletableArenaObject { public: bool IsReturnAReference() const { return shorty_[0] == 'L'; } Primitive::Type GetReturnType() const { return Primitive::GetType(shorty_[0]); } size_t SizeOfReturnValue() const { size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[0])); if (result >= 1 && result < 4) { result = 4; } return result; } // Register that holds result of this method invocation. virtual ManagedRegister ReturnRegister() = 0; // Register reserved for scratch usage during procedure calls. virtual ManagedRegister InterproceduralScratchRegister() = 0; // Offset of Method within the frame. FrameOffset MethodStackOffset() { return displacement_; } // Iterator interface // Place iterator at start of arguments. The displacement is applied to // frame offset methods to account for frames which may be on the stack // below the one being iterated over. void ResetIterator(FrameOffset displacement) { displacement_ = displacement; itr_slots_ = 0; itr_args_ = 0; itr_refs_ = 0; itr_longs_and_doubles_ = 0; itr_float_and_doubles_ = 0; } virtual ~CallingConvention() {} protected: CallingConvention(bool is_static, bool is_synchronized, const char* shorty, PointerSize frame_pointer_size) : itr_slots_(0), itr_refs_(0), itr_args_(0), itr_longs_and_doubles_(0), itr_float_and_doubles_(0), displacement_(0), frame_pointer_size_(frame_pointer_size), handle_scope_pointer_size_(sizeof(StackReference)), is_static_(is_static), is_synchronized_(is_synchronized), shorty_(shorty) { num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1; num_ref_args_ = is_static ? 0 : 1; // The implicit this pointer. num_float_or_double_args_ = 0; num_long_or_double_args_ = 0; for (size_t i = 1; i < strlen(shorty); i++) { char ch = shorty_[i]; switch (ch) { case 'L': num_ref_args_++; break; case 'J': num_long_or_double_args_++; break; case 'D': num_long_or_double_args_++; num_float_or_double_args_++; break; case 'F': num_float_or_double_args_++; break; } } } bool IsStatic() const { return is_static_; } bool IsSynchronized() const { return is_synchronized_; } bool IsParamALongOrDouble(unsigned int param) const { DCHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { return false; // this argument } char ch = shorty_[param]; return (ch == 'J' || ch == 'D'); } bool IsParamAFloatOrDouble(unsigned int param) const { DCHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { return false; // this argument } char ch = shorty_[param]; return (ch == 'F' || ch == 'D'); } bool IsParamADouble(unsigned int param) const { DCHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { return false; // this argument } return shorty_[param] == 'D'; } bool IsParamALong(unsigned int param) const { DCHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { return false; // this argument } return shorty_[param] == 'J'; } bool IsParamAReference(unsigned int param) const { DCHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { return true; // this argument } return shorty_[param] == 'L'; } size_t NumArgs() const { return num_args_; } // Implicit argument count: 1 for instance functions, 0 for static functions. // (The implicit argument is only relevant to the shorty, i.e. // the 0th arg is not in the shorty if it's implicit). size_t NumImplicitArgs() const { return IsStatic() ? 0 : 1; } size_t NumLongOrDoubleArgs() const { return num_long_or_double_args_; } size_t NumFloatOrDoubleArgs() const { return num_float_or_double_args_; } size_t NumReferenceArgs() const { return num_ref_args_; } size_t ParamSize(unsigned int param) const { DCHECK_LT(param, NumArgs()); if (IsStatic()) { param++; // 0th argument must skip return value at start of the shorty } else if (param == 0) { return sizeof(mirror::HeapReference); // this argument } size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param])); if (result >= 1 && result < 4) { result = 4; } return result; } const char* GetShorty() const { return shorty_.c_str(); } // The slot number for current calling_convention argument. // Note that each slot is 32-bit. When the current argument is bigger // than 32 bits, return the first slot number for this argument. unsigned int itr_slots_; // The number of references iterated past. unsigned int itr_refs_; // The argument number along argument list for current argument. unsigned int itr_args_; // Number of longs and doubles seen along argument list. unsigned int itr_longs_and_doubles_; // Number of float and doubles seen along argument list. unsigned int itr_float_and_doubles_; // Space for frames below this on the stack. FrameOffset displacement_; // The size of a pointer. const PointerSize frame_pointer_size_; // The size of a reference entry within the handle scope. const size_t handle_scope_pointer_size_; private: const bool is_static_; const bool is_synchronized_; std::string shorty_; size_t num_args_; size_t num_ref_args_; size_t num_float_or_double_args_; size_t num_long_or_double_args_; }; // Abstraction for managed code's calling conventions // | { Incoming stack args } | // | { Prior Method* } | <-- Prior SP // | { Return address } | // | { Callee saves } | // | { Spills ... } | // | { Outgoing stack args } | // | { Method* } | <-- SP class ManagedRuntimeCallingConvention : public CallingConvention { public: static std::unique_ptr Create(ArenaAllocator* arena, bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set); // Register that holds the incoming method argument virtual ManagedRegister MethodRegister() = 0; // Iterator interface bool HasNext(); void Next(); bool IsCurrentParamAReference(); bool IsCurrentParamAFloatOrDouble(); bool IsCurrentParamADouble(); bool IsCurrentParamALong(); bool IsCurrentArgExplicit(); // ie a non-implict argument such as this bool IsCurrentArgPossiblyNull(); size_t CurrentParamSize(); virtual bool IsCurrentParamInRegister() = 0; virtual bool IsCurrentParamOnStack() = 0; virtual ManagedRegister CurrentParamRegister() = 0; virtual FrameOffset CurrentParamStackOffset() = 0; virtual ~ManagedRuntimeCallingConvention() {} // Registers to spill to caller's out registers on entry. virtual const ManagedRegisterEntrySpills& EntrySpills() = 0; protected: ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty, PointerSize frame_pointer_size) : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size) {} }; // Abstraction for JNI calling conventions // | { Incoming stack args } | <-- Prior SP // | { Return address } | // | { Callee saves } | ([1]) // | { Return value spill } | (live on return slow paths) // | { Local Ref. Table State } | // | { Stack Indirect Ref. Table | // | num. refs./link } | (here to prior SP is frame size) // | { Method* } | <-- Anchor SP written to thread // | { Outgoing stack args } | <-- SP at point of call // | Native frame | // // [1] We must save all callee saves here to enable any exception throws to restore // callee saves for frames above this one. class JniCallingConvention : public CallingConvention { public: static std::unique_ptr Create(ArenaAllocator* arena, bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty, InstructionSet instruction_set); // Size of frame excluding space for outgoing args (its assumed Method* is // always at the bottom of a frame, but this doesn't work for outgoing // native args). Includes alignment. virtual size_t FrameSize() = 0; // Size of outgoing arguments (stack portion), including alignment. // -- Arguments that are passed via registers are excluded from this size. virtual size_t OutArgSize() = 0; // Number of references in stack indirect reference table size_t ReferenceCount() const; // Location where the segment state of the local indirect reference table is saved FrameOffset SavedLocalReferenceCookieOffset() const; // Location where the return value of a call can be squirreled if another // call is made following the native call FrameOffset ReturnValueSaveLocation() const; // Register that holds result if it is integer. virtual ManagedRegister IntReturnRegister() = 0; // Whether the compiler needs to ensure zero-/sign-extension of a small result type virtual bool RequiresSmallResultTypeExtension() const = 0; // Callee save registers to spill prior to native code (which may clobber) virtual ArrayRef CalleeSaveRegisters() const = 0; // Spill mask values virtual uint32_t CoreSpillMask() const = 0; virtual uint32_t FpSpillMask() const = 0; // An extra scratch register live after the call virtual ManagedRegister ReturnScratchRegister() const = 0; // Iterator interface bool HasNext(); virtual void Next(); bool IsCurrentParamAReference(); bool IsCurrentParamAFloatOrDouble(); bool IsCurrentParamADouble(); bool IsCurrentParamALong(); bool IsCurrentParamALongOrDouble() { return IsCurrentParamALong() || IsCurrentParamADouble(); } bool IsCurrentParamJniEnv(); size_t CurrentParamSize() const; virtual bool IsCurrentParamInRegister() = 0; virtual bool IsCurrentParamOnStack() = 0; virtual ManagedRegister CurrentParamRegister() = 0; virtual FrameOffset CurrentParamStackOffset() = 0; // Iterator interface extension for JNI FrameOffset CurrentParamHandleScopeEntryOffset(); // Position of handle scope and interior fields FrameOffset HandleScopeOffset() const { return FrameOffset(this->displacement_.Int32Value() + static_cast(frame_pointer_size_)); // above Method reference } FrameOffset HandleScopeLinkOffset() const { return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::LinkOffset(frame_pointer_size_)); } FrameOffset HandleScopeNumRefsOffset() const { return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::NumberOfReferencesOffset(frame_pointer_size_)); } FrameOffset HandleReferencesOffset() const { return FrameOffset(HandleScopeOffset().Int32Value() + HandleScope::ReferencesOffset(frame_pointer_size_)); } virtual ~JniCallingConvention() {} protected: // Named iterator positions enum IteratorPos { kJniEnv = 0, kObjectOrClass = 1 }; JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty, PointerSize frame_pointer_size) : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size), is_critical_native_(is_critical_native) {} // Number of stack slots for outgoing arguments, above which the handle scope is // located virtual size_t NumberOfOutgoingStackArgs() = 0; protected: size_t NumberOfExtraArgumentsForJni() const; // Does the transition have a StackHandleScope? bool HasHandleScope() const; // Does the transition have a local reference segment state? bool HasLocalReferenceSegmentState() const; // Has a JNIEnv* parameter implicitly? bool HasJniEnv() const; // Has a 'jclass' parameter implicitly? bool HasSelfClass() const; // Are there extra JNI arguments (JNIEnv* and maybe jclass)? bool HasExtraArgumentsForJni() const; // Returns the position of itr_args_, fixed up by removing the offset of extra JNI arguments. unsigned int GetIteratorPositionWithinShorty() const; // Is the current argument (at the iterator) an extra argument for JNI? bool IsCurrentArgExtraForJni() const; const bool is_critical_native_; private: // Shorthand for switching on the switch value but only IF there are extra JNI arguments. // // Puts the case value into return_value. // * (switch_value == kJniEnv) => case_jni_env // * (switch_value == kObjectOrClass) => case_object_or_class // // Returns false otherwise (or if there are no extra JNI arguments). bool SwitchExtraJniArguments(size_t switch_value, bool case_jni_env, bool case_object_or_class, /* out parameters */ bool* return_value) const; }; } // namespace art #endif // ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/jni_compiler.cc000066400000000000000000001114501336577252300246560ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "jni_compiler.h" #include #include #include #include #include #include "art_method.h" #include "base/arena_allocator.h" #include "base/enums.h" #include "base/logging.h" #include "base/macros.h" #include "memory_region.h" #include "calling_convention.h" #include "class_linker.h" #include "compiled_method.h" #include "dex_file-inl.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" #include "entrypoints/quick/quick_entrypoints.h" #include "jni_env_ext.h" #include "debug/dwarf/debug_frame_opcode_writer.h" #include "utils/assembler.h" #include "utils/jni_macro_assembler.h" #include "utils/managed_register.h" #include "utils/arm/managed_register_arm.h" #include "utils/arm64/managed_register_arm64.h" #include "utils/mips/managed_register_mips.h" #include "utils/mips64/managed_register_mips64.h" #include "utils/x86/managed_register_x86.h" #include "utils.h" #include "thread.h" #define __ jni_asm-> namespace art { using JniOptimizationFlags = Compiler::JniOptimizationFlags; template static void CopyParameter(JNIMacroAssembler* jni_asm, ManagedRuntimeCallingConvention* mr_conv, JniCallingConvention* jni_conv, size_t frame_size, size_t out_arg_size); template static void SetNativeParameter(JNIMacroAssembler* jni_asm, JniCallingConvention* jni_conv, ManagedRegister in_reg); template static std::unique_ptr> GetMacroAssembler( ArenaAllocator* arena, InstructionSet isa, const InstructionSetFeatures* features) { return JNIMacroAssembler::Create(arena, isa, features); } enum class JniEntrypoint { kStart, kEnd }; template static ThreadOffset GetJniEntrypointThreadOffset(JniEntrypoint which, bool reference_return, bool is_synchronized, bool is_fast_native) { if (which == JniEntrypoint::kStart) { // JniMethodStart ThreadOffset jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStartSynchronized) : (is_fast_native ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastStart) : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodStart)); return jni_start; } else { // JniMethodEnd ThreadOffset jni_end(-1); if (reference_return) { // Pass result. jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReferenceSynchronized) : (is_fast_native ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastEndWithReference) : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndWithReference)); } else { jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEndSynchronized) : (is_fast_native ? QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodFastEnd) : QUICK_ENTRYPOINT_OFFSET(kPointerSize, pJniMethodEnd)); } return jni_end; } } // Generate the JNI bridge for the given method, general contract: // - Arguments are in the managed runtime format, either on stack or in // registers, a reference to the method object is supplied as part of this // convention. // template static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file, JniOptimizationFlags optimization_flags) { const bool is_native = (access_flags & kAccNative) != 0; CHECK(is_native); const bool is_static = (access_flags & kAccStatic) != 0; const bool is_synchronized = (access_flags & kAccSynchronized) != 0; const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx)); InstructionSet instruction_set = driver->GetInstructionSet(); const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures(); // i.e. if the method was annotated with @FastNative const bool is_fast_native = (optimization_flags == Compiler::kFastNative); // i.e. if the method was annotated with @CriticalNative bool is_critical_native = (optimization_flags == Compiler::kCriticalNative); VLOG(jni) << "JniCompile: Method :: " << dex_file.PrettyMethod(method_idx, /* with signature */ true) << " :: access_flags = " << std::hex << access_flags << std::dec; if (UNLIKELY(is_fast_native)) { VLOG(jni) << "JniCompile: Fast native method detected :: " << dex_file.PrettyMethod(method_idx, /* with signature */ true); } if (UNLIKELY(is_critical_native)) { VLOG(jni) << "JniCompile: Critical native method detected :: " << dex_file.PrettyMethod(method_idx, /* with signature */ true); } if (kIsDebugBuild) { // Don't allow both @FastNative and @CriticalNative. They are mutually exclusive. if (UNLIKELY(is_fast_native && is_critical_native)) { LOG(FATAL) << "JniCompile: Method cannot be both @CriticalNative and @FastNative" << dex_file.PrettyMethod(method_idx, /* with_signature */ true); } // @CriticalNative - extra checks: // -- Don't allow virtual criticals // -- Don't allow synchronized criticals // -- Don't allow any objects as parameter or return value if (UNLIKELY(is_critical_native)) { CHECK(is_static) << "@CriticalNative functions cannot be virtual since that would" << "require passing a reference parameter (this), which is illegal " << dex_file.PrettyMethod(method_idx, /* with_signature */ true); CHECK(!is_synchronized) << "@CriticalNative functions cannot be synchronized since that would" << "require passing a (class and/or this) reference parameter, which is illegal " << dex_file.PrettyMethod(method_idx, /* with_signature */ true); for (size_t i = 0; i < strlen(shorty); ++i) { CHECK_NE(Primitive::kPrimNot, Primitive::GetType(shorty[i])) << "@CriticalNative methods' shorty types must not have illegal references " << dex_file.PrettyMethod(method_idx, /* with_signature */ true); } } } ArenaPool pool; ArenaAllocator arena(&pool); // Calling conventions used to iterate over parameters to method std::unique_ptr main_jni_conv = JniCallingConvention::Create(&arena, is_static, is_synchronized, is_critical_native, shorty, instruction_set); bool reference_return = main_jni_conv->IsReturnAReference(); std::unique_ptr mr_conv( ManagedRuntimeCallingConvention::Create( &arena, is_static, is_synchronized, shorty, instruction_set)); // Calling conventions to call into JNI method "end" possibly passing a returned reference, the // method and the current thread. const char* jni_end_shorty; if (reference_return && is_synchronized) { jni_end_shorty = "ILL"; } else if (reference_return) { jni_end_shorty = "IL"; } else if (is_synchronized) { jni_end_shorty = "VL"; } else { jni_end_shorty = "V"; } std::unique_ptr end_jni_conv( JniCallingConvention::Create(&arena, is_static, is_synchronized, is_critical_native, jni_end_shorty, instruction_set)); // Assembler that holds generated instructions std::unique_ptr> jni_asm = GetMacroAssembler(&arena, instruction_set, instruction_set_features); jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GenerateAnyDebugInfo()); // Offsets into data structures // TODO: if cross compiling these offsets are for the host not the target const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions)); const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter)); const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit)); // 1. Build the frame saving all callee saves, Method*, and PC return address. const size_t frame_size(main_jni_conv->FrameSize()); // Excludes outgoing args. ArrayRef callee_save_regs = main_jni_conv->CalleeSaveRegisters(); __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills()); DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(frame_size)); if (LIKELY(!is_critical_native)) { // NOTE: @CriticalNative methods don't have a HandleScope // because they can't have any reference parameters or return values. // 2. Set up the HandleScope mr_conv->ResetIterator(FrameOffset(frame_size)); main_jni_conv->ResetIterator(FrameOffset(0)); __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(), main_jni_conv->ReferenceCount(), mr_conv->InterproceduralScratchRegister()); __ CopyRawPtrFromThread(main_jni_conv->HandleScopeLinkOffset(), Thread::TopHandleScopeOffset(), mr_conv->InterproceduralScratchRegister()); __ StoreStackOffsetToThread(Thread::TopHandleScopeOffset(), main_jni_conv->HandleScopeOffset(), mr_conv->InterproceduralScratchRegister()); // 3. Place incoming reference arguments into handle scope main_jni_conv->Next(); // Skip JNIEnv* // 3.5. Create Class argument for static methods out of passed method if (is_static) { FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); // Check handle scope offset is within frame CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod. // Note this LoadRef() does not include read barrier. It will be handled below. // // scratchRegister = *method[DeclaringClassOffset()]; __ LoadRef(main_jni_conv->InterproceduralScratchRegister(), mr_conv->MethodRegister(), ArtMethod::DeclaringClassOffset(), false); __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false); // *handleScopeOffset = scratchRegister __ StoreRef(handle_scope_offset, main_jni_conv->InterproceduralScratchRegister()); main_jni_conv->Next(); // in handle scope so move to next argument } // Place every reference into the handle scope (ignore other parameters). while (mr_conv->HasNext()) { CHECK(main_jni_conv->HasNext()); bool ref_param = main_jni_conv->IsCurrentParamAReference(); CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); // References need placing in handle scope and the entry value passing if (ref_param) { // Compute handle scope entry, note null is placed in the handle scope but its boxed value // must be null. FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); // Check handle scope offset is within frame and doesn't run into the saved segment state. CHECK_LT(handle_scope_offset.Uint32Value(), frame_size); CHECK_NE(handle_scope_offset.Uint32Value(), main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value()); bool input_in_reg = mr_conv->IsCurrentParamInRegister(); bool input_on_stack = mr_conv->IsCurrentParamOnStack(); CHECK(input_in_reg || input_on_stack); if (input_in_reg) { ManagedRegister in_reg = mr_conv->CurrentParamRegister(); __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull()); __ StoreRef(handle_scope_offset, in_reg); } else if (input_on_stack) { FrameOffset in_off = mr_conv->CurrentParamStackOffset(); __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull()); __ CopyRef(handle_scope_offset, in_off, mr_conv->InterproceduralScratchRegister()); } } mr_conv->Next(); main_jni_conv->Next(); } // 4. Write out the end of the quick frames. __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset()); // NOTE: @CriticalNative does not need to store the stack pointer to the thread // because garbage collections are disabled within the execution of a // @CriticalNative method. // (TODO: We could probably disable it for @FastNative too). } // if (!is_critical_native) // 5. Move frame down to allow space for out going args. const size_t main_out_arg_size = main_jni_conv->OutArgSize(); size_t current_out_arg_size = main_out_arg_size; __ IncreaseFrameSize(main_out_arg_size); // Call the read barrier for the declaring class loaded from the method for a static call. // Skip this for @CriticalNative because we didn't build a HandleScope to begin with. // Note that we always have outgoing param space available for at least two params. if (kUseReadBarrier && is_static && !is_critical_native) { const bool kReadBarrierFastPath = (instruction_set != kMips) && (instruction_set != kMips64); std::unique_ptr skip_cold_path_label; if (kReadBarrierFastPath) { skip_cold_path_label = __ CreateLabel(); // Fast path for supported targets. // // Check if gc_is_marking is set -- if it's not, we don't need // a read barrier so skip it. __ LoadFromThread(main_jni_conv->InterproceduralScratchRegister(), Thread::IsGcMarkingOffset(), Thread::IsGcMarkingSize()); // Jump over the slow path if gc is marking is false. __ Jump(skip_cold_path_label.get(), JNIMacroUnaryCondition::kZero, main_jni_conv->InterproceduralScratchRegister()); } // Construct slow path for read barrier: // // Call into the runtime's ReadBarrierJni and have it fix up // the object address if it was moved. ThreadOffset read_barrier = QUICK_ENTRYPOINT_OFFSET(kPointerSize, pReadBarrierJni); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); main_jni_conv->Next(); // Skip JNIEnv. FrameOffset class_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Pass the handle for the class as the first argument. if (main_jni_conv->IsCurrentParamOnStack()) { FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); __ CreateHandleScopeEntry(out_off, class_handle_scope_offset, mr_conv->InterproceduralScratchRegister(), false); } else { ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); __ CreateHandleScopeEntry(out_reg, class_handle_scope_offset, ManagedRegister::NoRegister(), false); } main_jni_conv->Next(); // Pass the current thread as the second argument and call. if (main_jni_conv->IsCurrentParamInRegister()) { __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); __ Call(main_jni_conv->CurrentParamRegister(), Offset(read_barrier), main_jni_conv->InterproceduralScratchRegister()); } else { __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), main_jni_conv->InterproceduralScratchRegister()); __ CallFromThread(read_barrier, main_jni_conv->InterproceduralScratchRegister()); } main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Reset. if (kReadBarrierFastPath) { __ Bind(skip_cold_path_label.get()); } } // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable // can occur. The result is the saved JNI local state that is restored by the exit call. We // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer // arguments. FrameOffset locked_object_handle_scope_offset(0xBEEFDEAD); if (LIKELY(!is_critical_native)) { // Skip this for @CriticalNative methods. They do not call JniMethodStart. ThreadOffset jni_start( GetJniEntrypointThreadOffset(JniEntrypoint::kStart, reference_return, is_synchronized, is_fast_native).SizeValue()); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); locked_object_handle_scope_offset = FrameOffset(0); if (is_synchronized) { // Pass object for locking. main_jni_conv->Next(); // Skip JNIEnv. locked_object_handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); if (main_jni_conv->IsCurrentParamOnStack()) { FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, mr_conv->InterproceduralScratchRegister(), false); } else { ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, ManagedRegister::NoRegister(), false); } main_jni_conv->Next(); } if (main_jni_conv->IsCurrentParamInRegister()) { __ GetCurrentThread(main_jni_conv->CurrentParamRegister()); __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start), main_jni_conv->InterproceduralScratchRegister()); } else { __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(), main_jni_conv->InterproceduralScratchRegister()); __ CallFromThread(jni_start, main_jni_conv->InterproceduralScratchRegister()); } if (is_synchronized) { // Check for exceptions from monitor enter. __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size); } } // Store into stack_frame[saved_cookie_offset] the return value of JniMethodStart. FrameOffset saved_cookie_offset( FrameOffset(0xDEADBEEFu)); // @CriticalNative - use obviously bad value for debugging if (LIKELY(!is_critical_native)) { saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset(); __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4 /* sizeof cookie */); } // 7. Iterate over arguments placing values from managed calling convention in // to the convention required for a native call (shuffling). For references // place an index/pointer to the reference after checking whether it is // null (which must be encoded as null). // Note: we do this prior to materializing the JNIEnv* and static's jclass to // give as many free registers for the shuffle as possible. mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); uint32_t args_count = 0; while (mr_conv->HasNext()) { args_count++; mr_conv->Next(); } // Do a backward pass over arguments, so that the generated code will be "mov // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3." // TODO: A reverse iterator to improve readability. for (uint32_t i = 0; i < args_count; ++i) { mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); // Skip the extra JNI parameters for now. if (LIKELY(!is_critical_native)) { main_jni_conv->Next(); // Skip JNIEnv*. if (is_static) { main_jni_conv->Next(); // Skip Class for now. } } // Skip to the argument we're interested in. for (uint32_t j = 0; j < args_count - i - 1; ++j) { mr_conv->Next(); main_jni_conv->Next(); } CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get(), frame_size, main_out_arg_size); } if (is_static && !is_critical_native) { // Create argument for Class mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size)); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); main_jni_conv->Next(); // Skip JNIEnv* FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset(); if (main_jni_conv->IsCurrentParamOnStack()) { FrameOffset out_off = main_jni_conv->CurrentParamStackOffset(); __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), false); } else { ManagedRegister out_reg = main_jni_conv->CurrentParamRegister(); __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), false); } } // Set the iterator back to the incoming Method*. main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); if (LIKELY(!is_critical_native)) { // 8. Create 1st argument, the JNI environment ptr. // Register that will hold local indirect reference table if (main_jni_conv->IsCurrentParamInRegister()) { ManagedRegister jni_env = main_jni_conv->CurrentParamRegister(); DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister())); __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset()); } else { FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset(); __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(), main_jni_conv->InterproceduralScratchRegister()); } } // 9. Plant call to native code associated with method. MemberOffset jni_entrypoint_offset = ArtMethod::EntryPointFromJniOffset(InstructionSetPointerSize(instruction_set)); // FIXME: Not sure if MethodStackOffset will work here. What does it even do? __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset, // XX: Why not the jni conv scratch register? mr_conv->InterproceduralScratchRegister()); // 10. Fix differences in result widths. if (main_jni_conv->RequiresSmallResultTypeExtension()) { if (main_jni_conv->GetReturnType() == Primitive::kPrimByte || main_jni_conv->GetReturnType() == Primitive::kPrimShort) { __ SignExtend(main_jni_conv->ReturnRegister(), Primitive::ComponentSize(main_jni_conv->GetReturnType())); } else if (main_jni_conv->GetReturnType() == Primitive::kPrimBoolean || main_jni_conv->GetReturnType() == Primitive::kPrimChar) { __ ZeroExtend(main_jni_conv->ReturnRegister(), Primitive::ComponentSize(main_jni_conv->GetReturnType())); } } // 11. Process return value FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation(); if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { if (LIKELY(!is_critical_native)) { // For normal JNI, store the return value on the stack because the call to // JniMethodEnd will clobber the return value. It will be restored in (13). if ((instruction_set == kMips || instruction_set == kMips64) && main_jni_conv->GetReturnType() == Primitive::kPrimDouble && return_save_location.Uint32Value() % 8 != 0) { // Ensure doubles are 8-byte aligned for MIPS return_save_location = FrameOffset(return_save_location.Uint32Value() + static_cast(kMipsPointerSize)); // TODO: refactor this into the JniCallingConvention code // as a return value alignment requirement. } CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size); __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue()); } else { // For @CriticalNative only, // move the JNI return register into the managed return register (if they don't match). ManagedRegister jni_return_reg = main_jni_conv->ReturnRegister(); ManagedRegister mr_return_reg = mr_conv->ReturnRegister(); // Check if the JNI return register matches the managed return register. // If they differ, only then do we have to do anything about it. // Otherwise the return value is already in the right place when we return. if (!jni_return_reg.Equals(mr_return_reg)) { // This is typically only necessary on ARM32 due to native being softfloat // while managed is hardfloat. // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0. __ Move(mr_return_reg, jni_return_reg, main_jni_conv->SizeOfReturnValue()); } else if (jni_return_reg.IsNoRegister() && mr_return_reg.IsNoRegister()) { // Sanity check: If the return value is passed on the stack for some reason, // then make sure the size matches. CHECK_EQ(main_jni_conv->SizeOfReturnValue(), mr_conv->SizeOfReturnValue()); } } } // Increase frame size for out args if needed by the end_jni_conv. const size_t end_out_arg_size = end_jni_conv->OutArgSize(); if (end_out_arg_size > current_out_arg_size) { size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size; current_out_arg_size = end_out_arg_size; // TODO: This is redundant for @CriticalNative but we need to // conditionally do __DecreaseFrameSize below. __ IncreaseFrameSize(out_arg_size_diff); saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff); locked_object_handle_scope_offset = FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff); return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff); } // thread. end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size)); if (LIKELY(!is_critical_native)) { // 12. Call JniMethodEnd ThreadOffset jni_end( GetJniEntrypointThreadOffset(JniEntrypoint::kEnd, reference_return, is_synchronized, is_fast_native).SizeValue()); if (reference_return) { // Pass result. SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); end_jni_conv->Next(); } // Pass saved local reference state. if (end_jni_conv->IsCurrentParamOnStack()) { FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4); } else { ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); __ Load(out_reg, saved_cookie_offset, 4); } end_jni_conv->Next(); if (is_synchronized) { // Pass object for unlocking. if (end_jni_conv->IsCurrentParamOnStack()) { FrameOffset out_off = end_jni_conv->CurrentParamStackOffset(); __ CreateHandleScopeEntry(out_off, locked_object_handle_scope_offset, end_jni_conv->InterproceduralScratchRegister(), false); } else { ManagedRegister out_reg = end_jni_conv->CurrentParamRegister(); __ CreateHandleScopeEntry(out_reg, locked_object_handle_scope_offset, ManagedRegister::NoRegister(), false); } end_jni_conv->Next(); } if (end_jni_conv->IsCurrentParamInRegister()) { __ GetCurrentThread(end_jni_conv->CurrentParamRegister()); __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end), end_jni_conv->InterproceduralScratchRegister()); } else { __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(), end_jni_conv->InterproceduralScratchRegister()); __ CallFromThread(jni_end, end_jni_conv->InterproceduralScratchRegister()); } // 13. Reload return value if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) { __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue()); // NIT: If it's @CriticalNative then we actually only need to do this IF // the calling convention's native return register doesn't match the managed convention's // return register. } } // if (!is_critical_native) // 14. Move frame up now we're done with the out arg space. __ DecreaseFrameSize(current_out_arg_size); // 15. Process pending exceptions from JNI call or monitor exit. __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust */); // 16. Remove activation - need to restore callee save registers since the GC may have changed // them. DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(frame_size)); __ RemoveFrame(frame_size, callee_save_regs); DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast(frame_size)); // 17. Finalize code generation __ FinalizeCode(); size_t cs = __ CodeSize(); std::vector managed_code(cs); MemoryRegion code(&managed_code[0], managed_code.size()); __ FinalizeInstructions(code); return CompiledMethod::SwapAllocCompiledMethod(driver, instruction_set, ArrayRef(managed_code), frame_size, main_jni_conv->CoreSpillMask(), main_jni_conv->FpSpillMask(), /* method_info */ ArrayRef(), /* vmap_table */ ArrayRef(), ArrayRef(*jni_asm->cfi().data()), ArrayRef()); } // Copy a single parameter from the managed to the JNI calling convention. template static void CopyParameter(JNIMacroAssembler* jni_asm, ManagedRuntimeCallingConvention* mr_conv, JniCallingConvention* jni_conv, size_t frame_size, size_t out_arg_size) { bool input_in_reg = mr_conv->IsCurrentParamInRegister(); bool output_in_reg = jni_conv->IsCurrentParamInRegister(); FrameOffset handle_scope_offset(0); bool null_allowed = false; bool ref_param = jni_conv->IsCurrentParamAReference(); CHECK(!ref_param || mr_conv->IsCurrentParamAReference()); // input may be in register, on stack or both - but not none! CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack()); if (output_in_reg) { // output shouldn't straddle registers and stack CHECK(!jni_conv->IsCurrentParamOnStack()); } else { CHECK(jni_conv->IsCurrentParamOnStack()); } // References need placing in handle scope and the entry address passing. if (ref_param) { null_allowed = mr_conv->IsCurrentArgPossiblyNull(); // Compute handle scope offset. Note null is placed in the handle scope but the jobject // passed to the native code must be null (not a pointer into the handle scope // as with regular references). handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset(); // Check handle scope offset is within frame. CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size)); } if (input_in_reg && output_in_reg) { ManagedRegister in_reg = mr_conv->CurrentParamRegister(); ManagedRegister out_reg = jni_conv->CurrentParamRegister(); if (ref_param) { __ CreateHandleScopeEntry(out_reg, handle_scope_offset, in_reg, null_allowed); } else { if (!mr_conv->IsCurrentParamOnStack()) { // regular non-straddling move __ Move(out_reg, in_reg, mr_conv->CurrentParamSize()); } else { UNIMPLEMENTED(FATAL); // we currently don't expect to see this case } } } else if (!input_in_reg && !output_in_reg) { FrameOffset out_off = jni_conv->CurrentParamStackOffset(); if (ref_param) { __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), null_allowed); } else { FrameOffset in_off = mr_conv->CurrentParamStackOffset(); size_t param_size = mr_conv->CurrentParamSize(); CHECK_EQ(param_size, jni_conv->CurrentParamSize()); __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size); } } else if (!input_in_reg && output_in_reg) { FrameOffset in_off = mr_conv->CurrentParamStackOffset(); ManagedRegister out_reg = jni_conv->CurrentParamRegister(); // Check that incoming stack arguments are above the current stack frame. CHECK_GT(in_off.Uint32Value(), frame_size); if (ref_param) { __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed); } else { size_t param_size = mr_conv->CurrentParamSize(); CHECK_EQ(param_size, jni_conv->CurrentParamSize()); __ Load(out_reg, in_off, param_size); } } else { CHECK(input_in_reg && !output_in_reg); ManagedRegister in_reg = mr_conv->CurrentParamRegister(); FrameOffset out_off = jni_conv->CurrentParamStackOffset(); // Check outgoing argument is within frame CHECK_LT(out_off.Uint32Value(), frame_size); if (ref_param) { // TODO: recycle value in in_reg rather than reload from handle scope __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(), null_allowed); } else { size_t param_size = mr_conv->CurrentParamSize(); CHECK_EQ(param_size, jni_conv->CurrentParamSize()); if (!mr_conv->IsCurrentParamOnStack()) { // regular non-straddling store __ Store(out_off, in_reg, param_size); } else { // store where input straddles registers and stack CHECK_EQ(param_size, 8u); FrameOffset in_off = mr_conv->CurrentParamStackOffset(); __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister()); } } } } template static void SetNativeParameter(JNIMacroAssembler* jni_asm, JniCallingConvention* jni_conv, ManagedRegister in_reg) { if (jni_conv->IsCurrentParamOnStack()) { FrameOffset dest = jni_conv->CurrentParamStackOffset(); __ StoreRawPtr(dest, in_reg); } else { if (!jni_conv->CurrentParamRegister().Equals(in_reg)) { __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize()); } } } CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file, Compiler::JniOptimizationFlags optimization_flags) { if (Is64BitInstructionSet(compiler->GetInstructionSet())) { return ArtJniCompileMethodInternal( compiler, access_flags, method_idx, dex_file, optimization_flags); } else { return ArtJniCompileMethodInternal( compiler, access_flags, method_idx, dex_file, optimization_flags); } } } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/jni_compiler.h000066400000000000000000000023371336577252300245230ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ #define ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ #include "compiler.h" #include "dex_file.h" namespace art { class CompilerDriver; class CompiledMethod; CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags, uint32_t method_idx, const DexFile& dex_file, Compiler::JniOptimizationFlags optimization_flags); } // namespace art #endif // ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/mips/000077500000000000000000000000001336577252300226435ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/mips/calling_convention_mips.cc000066400000000000000000000437111336577252300300630ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "calling_convention_mips.h" #include "base/logging.h" #include "handle_scope-inl.h" #include "utils/mips/managed_register_mips.h" namespace art { namespace mips { // // JNI calling convention constants. // // Up to how many float-like (float, double) args can be enregistered in floating-point registers. // The rest of the args must go in integer registers or on the stack. constexpr size_t kMaxFloatOrDoubleRegisterArguments = 2u; // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be // enregistered. The rest of the args must go on the stack. constexpr size_t kMaxIntLikeRegisterArguments = 4u; static const Register kJniCoreArgumentRegisters[] = { A0, A1, A2, A3 }; static const FRegister kJniFArgumentRegisters[] = { F12, F14 }; static const DRegister kJniDArgumentRegisters[] = { D6, D7 }; // // Managed calling convention constants. // static const Register kManagedCoreArgumentRegisters[] = { A0, A1, A2, A3, T0, T1 }; static const FRegister kManagedFArgumentRegisters[] = { F8, F10, F12, F14, F16, F18 }; static const DRegister kManagedDArgumentRegisters[] = { D4, D5, D6, D7, D8, D9 }; static constexpr ManagedRegister kCalleeSaveRegisters[] = { // Core registers. MipsManagedRegister::FromCoreRegister(S2), MipsManagedRegister::FromCoreRegister(S3), MipsManagedRegister::FromCoreRegister(S4), MipsManagedRegister::FromCoreRegister(S5), MipsManagedRegister::FromCoreRegister(S6), MipsManagedRegister::FromCoreRegister(S7), MipsManagedRegister::FromCoreRegister(FP), // No hard float callee saves. }; static constexpr uint32_t CalculateCoreCalleeSpillMask() { // RA is a special callee save which is not reported by CalleeSaveRegisters(). uint32_t result = 1 << RA; for (auto&& r : kCalleeSaveRegisters) { if (r.AsMips().IsCoreRegister()) { result |= (1 << r.AsMips().AsCoreRegister()); } } return result; } static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(); static constexpr uint32_t kFpCalleeSpillMask = 0u; // Calling convention ManagedRegister MipsManagedRuntimeCallingConvention::InterproceduralScratchRegister() { return MipsManagedRegister::FromCoreRegister(T9); } ManagedRegister MipsJniCallingConvention::InterproceduralScratchRegister() { return MipsManagedRegister::FromCoreRegister(T9); } static ManagedRegister ReturnRegisterForShorty(const char* shorty) { if (shorty[0] == 'F') { return MipsManagedRegister::FromFRegister(F0); } else if (shorty[0] == 'D') { return MipsManagedRegister::FromDRegister(D0); } else if (shorty[0] == 'J') { return MipsManagedRegister::FromRegisterPair(V0_V1); } else if (shorty[0] == 'V') { return MipsManagedRegister::NoRegister(); } else { return MipsManagedRegister::FromCoreRegister(V0); } } ManagedRegister MipsManagedRuntimeCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } ManagedRegister MipsJniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } ManagedRegister MipsJniCallingConvention::IntReturnRegister() { return MipsManagedRegister::FromCoreRegister(V0); } // Managed runtime calling convention ManagedRegister MipsManagedRuntimeCallingConvention::MethodRegister() { return MipsManagedRegister::FromCoreRegister(A0); } bool MipsManagedRuntimeCallingConvention::IsCurrentParamInRegister() { return false; // Everything moved to stack on entry. } bool MipsManagedRuntimeCallingConvention::IsCurrentParamOnStack() { return true; } ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() { LOG(FATAL) << "Should not reach here"; return ManagedRegister::NoRegister(); } FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); FrameOffset result = FrameOffset(displacement_.Int32Value() + // displacement kFramePointerSize + // Method* (itr_slots_ * kFramePointerSize)); // offset into in args return result; } const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() { // We spill the argument registers on MIPS to free them up for scratch use, we then assume // all arguments are on the stack. if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { uint32_t gpr_index = 1; // Skip A0, it is used for ArtMethod*. uint32_t fpr_index = 0; for (ResetIterator(FrameOffset(0)); HasNext(); Next()) { if (IsCurrentParamAFloatOrDouble()) { if (IsCurrentParamADouble()) { if (fpr_index < arraysize(kManagedDArgumentRegisters)) { entry_spills_.push_back( MipsManagedRegister::FromDRegister(kManagedDArgumentRegisters[fpr_index++])); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 8); } } else { if (fpr_index < arraysize(kManagedFArgumentRegisters)) { entry_spills_.push_back( MipsManagedRegister::FromFRegister(kManagedFArgumentRegisters[fpr_index++])); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } } else { if (IsCurrentParamALong() && !IsCurrentParamAReference()) { if (gpr_index == 1 || gpr_index == 3) { // Don't use A1-A2(A3-T0) as a register pair, move to A2-A3(T0-T1) instead. gpr_index++; } if (gpr_index < arraysize(kManagedCoreArgumentRegisters) - 1) { entry_spills_.push_back( MipsManagedRegister::FromCoreRegister(kManagedCoreArgumentRegisters[gpr_index++])); } else if (gpr_index == arraysize(kManagedCoreArgumentRegisters) - 1) { gpr_index++; entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } if (gpr_index < arraysize(kManagedCoreArgumentRegisters)) { entry_spills_.push_back( MipsManagedRegister::FromCoreRegister(kManagedCoreArgumentRegisters[gpr_index++])); } else { entry_spills_.push_back(ManagedRegister::NoRegister(), 4); } } } } return entry_spills_; } // JNI calling convention MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty) : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kMipsPointerSize) { // SYSTEM V - Application Binary Interface (MIPS RISC Processor): // Data Representation - Fundamental Types (3-4) specifies fundamental alignments for each type. // "Each member is assigned to the lowest available offset with the appropriate alignment. This // may require internal padding, depending on the previous member." // // All of our stack arguments are usually 4-byte aligned, however longs and doubles must be 8 // bytes aligned. Add padding to maintain 8-byte alignment invariant. // // Compute padding to ensure longs and doubles are not split in o32. size_t padding = 0; size_t cur_arg, cur_reg; if (LIKELY(HasExtraArgumentsForJni())) { // Ignore the 'this' jobject or jclass for static methods and the JNIEnv. // We start at the aligned register A2. // // Ignore the first 2 parameters because they are guaranteed to be aligned. cur_arg = NumImplicitArgs(); // Skip the "this" argument. cur_reg = 2; // Skip {A0=JNIEnv, A1=jobject} / {A0=JNIEnv, A1=jclass} parameters (start at A2). } else { // Check every parameter. cur_arg = 0; cur_reg = 0; } // Shift across a logical register mapping that looks like: // // | A0 | A1 | A2 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 | // // or some of variants with floating-point registers (F12 and F14), for example // // | F12 | F14 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 | // // (where SP is the stack pointer at the start of called function). // // Any time there would normally be a long/double in an odd logical register, // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment. // // This works for both physical register pairs {A0, A1}, {A2, A3}, // floating-point registers F12, F14 and for when the value is on the stack. // // For example: // (a) long would normally go into A1, but we shift it into A2 // | INT | (PAD) | LONG | // | A0 | A1 | A2 | A3 | // // (b) long would normally go into A3, but we shift it into SP // | INT | INT | INT | (PAD) | LONG | // | A0 | A1 | A2 | A3 | SP+16 SP+20 | // // where INT is any <=4 byte arg, and LONG is any 8-byte arg. for (; cur_arg < NumArgs(); cur_arg++) { if (IsParamALongOrDouble(cur_arg)) { if ((cur_reg & 1) != 0) { padding += 4; cur_reg++; // Additional bump to ensure alignment. } cur_reg += 2; // Bump the iterator twice for every long argument. } else { cur_reg++; // Bump the iterator for every argument. } } if (cur_reg < kMaxIntLikeRegisterArguments) { // As a special case when, as a result of shifting (or not) there are no arguments on the stack, // we actually have 0 stack padding. // // For example with @CriticalNative and: // (int, long) -> shifts the long but doesn't need to pad the stack // // shift // \/ // | INT | (PAD) | LONG | (EMPTY) ... // | r0 | r1 | r2 | r3 | SP ... // /\ // no stack padding padding_ = 0; } else { padding_ = padding; } // Argument Passing (3-17): // "When the first argument is integral, the remaining arguments are passed in the integer // registers." // // "The rules that determine which arguments go into registers and which ones must be passed on // the stack are most easily explained by considering the list of arguments as a structure, // aligned according to normal structure rules. Mapping of this structure into the combination of // stack and registers is as follows: up to two leading floating-point arguments can be passed in // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset. // Holes left in the structure for alignment are unused, whether in registers or in the stack." // // For example with @CriticalNative and: // (a) first argument is not floating-point, so all go into integer registers // | INT | FLOAT | DOUBLE | // | A0 | A1 | A2 | A3 | // (b) first argument is floating-point, but 2nd is integer // | FLOAT | INT | DOUBLE | // | F12 | A1 | A2 | A3 | // (c) first two arguments are floating-point (float, double) // | FLOAT | (PAD) | DOUBLE | INT | // | F12 | | F14 | SP+16 | // (d) first two arguments are floating-point (double, float) // | DOUBLE | FLOAT | INT | // | F12 | F14 | A3 | // (e) first three arguments are floating-point, but just first two will go into fp registers // | DOUBLE | FLOAT | FLOAT | // | F12 | F14 | A3 | // // Find out if the first argument is a floating-point. In that case, floating-point registers will // be used for up to two leading floating-point arguments. Otherwise, all arguments will be passed // using integer registers. use_fp_arg_registers_ = false; if (is_critical_native) { if (NumArgs() > 0) { if (IsParamAFloatOrDouble(0)) { use_fp_arg_registers_ = true; } } } } uint32_t MipsJniCallingConvention::CoreSpillMask() const { return kCoreCalleeSpillMask; } uint32_t MipsJniCallingConvention::FpSpillMask() const { return kFpCalleeSpillMask; } ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const { return MipsManagedRegister::FromCoreRegister(AT); } size_t MipsJniCallingConvention::FrameSize() { // ArtMethod*, RA and callee save area size, local reference segment state. const size_t method_ptr_size = static_cast(kMipsPointerSize); const size_t ra_return_addr_size = kFramePointerSize; const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; size_t frame_data_size = method_ptr_size + ra_return_addr_size + callee_save_area_size; if (LIKELY(HasLocalReferenceSegmentState())) { // Local reference segment state. frame_data_size += kFramePointerSize; } // References plus 2 words for HandleScope header. const size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount()); size_t total_size = frame_data_size; if (LIKELY(HasHandleScope())) { // HandleScope is sometimes excluded. total_size += handle_scope_size; // Handle scope size. } // Plus return value spill area size. total_size += SizeOfReturnValue(); return RoundUp(total_size, kStackAlignment); } size_t MipsJniCallingConvention::OutArgSize() { // Argument Passing (3-17): // "Despite the fact that some or all of the arguments to a function are passed in registers, // always allocate space on the stack for all arguments. This stack space should be a structure // large enough to contain all the arguments, aligned according to normal structure rules (after // promotion and structure return pointer insertion). The locations within the stack frame used // for arguments are called the home locations." // // Allocate 16 bytes for home locations + space needed for stack arguments. return RoundUp( (kMaxIntLikeRegisterArguments + NumberOfOutgoingStackArgs()) * kFramePointerSize + padding_, kStackAlignment); } ArrayRef MipsJniCallingConvention::CalleeSaveRegisters() const { return ArrayRef(kCalleeSaveRegisters); } // JniCallingConvention ABI follows o32 where longs and doubles must occur // in even register numbers and stack slots. void MipsJniCallingConvention::Next() { JniCallingConvention::Next(); if (LIKELY(HasNext())) { // Avoid CHECK failure for IsCurrentParam // Ensure slot is 8-byte aligned for longs/doubles (o32). if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) { // itr_slots_ needs to be an even number, according to o32. itr_slots_++; } } } bool MipsJniCallingConvention::IsCurrentParamInRegister() { // Argument Passing (3-17): // "The rules that determine which arguments go into registers and which ones must be passed on // the stack are most easily explained by considering the list of arguments as a structure, // aligned according to normal structure rules. Mapping of this structure into the combination of // stack and registers is as follows: up to two leading floating-point arguments can be passed in // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset. // Holes left in the structure for alignment are unused, whether in registers or in the stack." // // Even when floating-point registers are used, there can be up to 4 arguments passed in // registers. return itr_slots_ < kMaxIntLikeRegisterArguments; } bool MipsJniCallingConvention::IsCurrentParamOnStack() { return !IsCurrentParamInRegister(); } ManagedRegister MipsJniCallingConvention::CurrentParamRegister() { CHECK_LT(itr_slots_, kMaxIntLikeRegisterArguments); // Up to two leading floating-point arguments can be passed in floating-point registers. if (use_fp_arg_registers_ && (itr_args_ < kMaxFloatOrDoubleRegisterArguments)) { if (IsCurrentParamAFloatOrDouble()) { if (IsCurrentParamADouble()) { return MipsManagedRegister::FromDRegister(kJniDArgumentRegisters[itr_args_]); } else { return MipsManagedRegister::FromFRegister(kJniFArgumentRegisters[itr_args_]); } } } // All other arguments (including other floating-point arguments) will be passed in integer // registers. if (IsCurrentParamALongOrDouble()) { if (itr_slots_ == 0u) { return MipsManagedRegister::FromRegisterPair(A0_A1); } else { CHECK_EQ(itr_slots_, 2u); return MipsManagedRegister::FromRegisterPair(A2_A3); } } else { return MipsManagedRegister::FromCoreRegister(kJniCoreArgumentRegisters[itr_slots_]); } } FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() { CHECK_GE(itr_slots_, kMaxIntLikeRegisterArguments); size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize); CHECK_LT(offset, OutArgSize()); return FrameOffset(offset); } size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() { size_t static_args = HasSelfClass() ? 1 : 0; // Count jclass. // Regular argument parameters and this. size_t param_args = NumArgs() + NumLongOrDoubleArgs(); // Twice count 8-byte args. // Count JNIEnv* less arguments in registers. size_t internal_args = (HasJniEnv() ? 1 : 0); size_t total_args = static_args + param_args + internal_args; return total_args - std::min(kMaxIntLikeRegisterArguments, static_cast(total_args)); } } // namespace mips } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/mips/calling_convention_mips.h000066400000000000000000000070111336577252300277160ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_ #define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_ #include "base/enums.h" #include "jni/quick/calling_convention.h" namespace art { namespace mips { constexpr size_t kFramePointerSize = 4; static_assert(kFramePointerSize == static_cast(PointerSize::k32), "Invalid frame pointer size"); class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, PointerSize::k32) {} ~MipsManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // Managed runtime calling convention ManagedRegister MethodRegister() OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; private: ManagedRegisterEntrySpills entry_spills_; DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention); }; class MipsJniCallingConvention FINAL : public JniCallingConvention { public: MipsJniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty); ~MipsJniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister IntReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // JNI calling convention void Next() OVERRIDE; // Override default behavior for o32. size_t FrameSize() OVERRIDE; size_t OutArgSize() OVERRIDE; ArrayRef CalleeSaveRegisters() const OVERRIDE; ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; // Mips does not need to extend small return types. bool RequiresSmallResultTypeExtension() const OVERRIDE { return false; } protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; private: // Padding to ensure longs and doubles are not split in o32. size_t padding_; size_t use_fp_arg_registers_; DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention); }; } // namespace mips } // namespace art #endif // ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/mips64/000077500000000000000000000000001336577252300230155ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/mips64/calling_convention_mips64.cc000066400000000000000000000207621336577252300304100ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "calling_convention_mips64.h" #include "base/logging.h" #include "handle_scope-inl.h" #include "utils/mips64/managed_register_mips64.h" namespace art { namespace mips64 { // Up to kow many args can be enregistered. The rest of the args must go on the stack. constexpr size_t kMaxRegisterArguments = 8u; static const GpuRegister kGpuArgumentRegisters[] = { A0, A1, A2, A3, A4, A5, A6, A7 }; static const FpuRegister kFpuArgumentRegisters[] = { F12, F13, F14, F15, F16, F17, F18, F19 }; static constexpr ManagedRegister kCalleeSaveRegisters[] = { // Core registers. Mips64ManagedRegister::FromGpuRegister(S2), Mips64ManagedRegister::FromGpuRegister(S3), Mips64ManagedRegister::FromGpuRegister(S4), Mips64ManagedRegister::FromGpuRegister(S5), Mips64ManagedRegister::FromGpuRegister(S6), Mips64ManagedRegister::FromGpuRegister(S7), Mips64ManagedRegister::FromGpuRegister(GP), Mips64ManagedRegister::FromGpuRegister(S8), // No hard float callee saves. }; static constexpr uint32_t CalculateCoreCalleeSpillMask() { // RA is a special callee save which is not reported by CalleeSaveRegisters(). uint32_t result = 1 << RA; for (auto&& r : kCalleeSaveRegisters) { if (r.AsMips64().IsGpuRegister()) { result |= (1 << r.AsMips64().AsGpuRegister()); } } return result; } static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(); static constexpr uint32_t kFpCalleeSpillMask = 0u; // Calling convention ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { return Mips64ManagedRegister::FromGpuRegister(T9); } ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() { return Mips64ManagedRegister::FromGpuRegister(T9); } static ManagedRegister ReturnRegisterForShorty(const char* shorty) { if (shorty[0] == 'F' || shorty[0] == 'D') { return Mips64ManagedRegister::FromFpuRegister(F0); } else if (shorty[0] == 'V') { return Mips64ManagedRegister::NoRegister(); } else { return Mips64ManagedRegister::FromGpuRegister(V0); } } ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } ManagedRegister Mips64JniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty()); } ManagedRegister Mips64JniCallingConvention::IntReturnRegister() { return Mips64ManagedRegister::FromGpuRegister(V0); } // Managed runtime calling convention ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() { return Mips64ManagedRegister::FromGpuRegister(A0); } bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { return false; // Everything moved to stack on entry. } bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { return true; } ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() { LOG(FATAL) << "Should not reach here"; return ManagedRegister::NoRegister(); } FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); FrameOffset result = FrameOffset(displacement_.Int32Value() + // displacement kFramePointerSize + // Method ref (itr_slots_ * sizeof(uint32_t))); // offset into in args return result; } const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() { // We spill the argument registers on MIPS64 to free them up for scratch use, // we then assume all arguments are on the stack. if ((entry_spills_.size() == 0) && (NumArgs() > 0)) { int reg_index = 1; // we start from A1, A0 holds ArtMethod*. // We need to choose the correct register size since the managed // stack uses 32bit stack slots. ResetIterator(FrameOffset(0)); while (HasNext()) { if (reg_index < 8) { if (IsCurrentParamAFloatOrDouble()) { // FP regs. FpuRegister arg = kFpuArgumentRegisters[reg_index]; Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg); entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4); } else { // GP regs. GpuRegister arg = kGpuArgumentRegisters[reg_index]; Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg); entry_spills_.push_back(reg, (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4); } // e.g. A1, A2, F3, A4, F5, F6, A7 reg_index++; } Next(); } } return entry_spills_; } // JNI calling convention Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty) : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kMips64PointerSize) { } uint32_t Mips64JniCallingConvention::CoreSpillMask() const { return kCoreCalleeSpillMask; } uint32_t Mips64JniCallingConvention::FpSpillMask() const { return kFpCalleeSpillMask; } ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const { return Mips64ManagedRegister::FromGpuRegister(AT); } size_t Mips64JniCallingConvention::FrameSize() { // ArtMethod*, RA and callee save area size, local reference segment state. size_t method_ptr_size = static_cast(kFramePointerSize); size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize; size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size; if (LIKELY(HasLocalReferenceSegmentState())) { // Local ref. segment state. // Local reference segment state is sometimes excluded. frame_data_size += sizeof(uint32_t); } // References plus 2 words for HandleScope header. size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount()); size_t total_size = frame_data_size; if (LIKELY(HasHandleScope())) { // HandleScope is sometimes excluded. total_size += handle_scope_size; // Handle scope size. } // Plus return value spill area size. total_size += SizeOfReturnValue(); return RoundUp(total_size, kStackAlignment); } size_t Mips64JniCallingConvention::OutArgSize() { return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); } ArrayRef Mips64JniCallingConvention::CalleeSaveRegisters() const { return ArrayRef(kCalleeSaveRegisters); } bool Mips64JniCallingConvention::IsCurrentParamInRegister() { return itr_args_ < kMaxRegisterArguments; } bool Mips64JniCallingConvention::IsCurrentParamOnStack() { return !IsCurrentParamInRegister(); } ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() { CHECK(IsCurrentParamInRegister()); if (IsCurrentParamAFloatOrDouble()) { return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]); } else { return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]); } } FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); size_t args_on_stack = itr_args_ - kMaxRegisterArguments; size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize); CHECK_LT(offset, OutArgSize()); return FrameOffset(offset); } size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() { // all arguments including JNI args size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni(); // Nothing on the stack unless there are more than 8 arguments return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0; } } // namespace mips64 } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/mips64/calling_convention_mips64.h000066400000000000000000000065771336577252300302620ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_ #define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_ #include "base/enums.h" #include "jni/quick/calling_convention.h" namespace art { namespace mips64 { constexpr size_t kFramePointerSize = 8; static_assert(kFramePointerSize == static_cast(PointerSize::k64), "Invalid frame pointer size"); class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, PointerSize::k64) {} ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // Managed runtime calling convention ManagedRegister MethodRegister() OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; private: ManagedRegisterEntrySpills entry_spills_; DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention); }; class Mips64JniCallingConvention FINAL : public JniCallingConvention { public: Mips64JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty); ~Mips64JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister IntReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // JNI calling convention size_t FrameSize() OVERRIDE; size_t OutArgSize() OVERRIDE; ArrayRef CalleeSaveRegisters() const OVERRIDE; ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; // Mips64 does not need to extend small return types. bool RequiresSmallResultTypeExtension() const OVERRIDE { return false; } protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; private: DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention); }; } // namespace mips64 } // namespace art #endif // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/x86/000077500000000000000000000000001336577252300223205ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/x86/calling_convention_x86.cc000066400000000000000000000232201336577252300272060ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "calling_convention_x86.h" #include "base/logging.h" #include "handle_scope-inl.h" #include "utils/x86/managed_register_x86.h" namespace art { namespace x86 { static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size"); static_assert(kStackAlignment >= 16u, "IA-32 cdecl requires at least 16 byte stack alignment"); static constexpr ManagedRegister kCalleeSaveRegisters[] = { // Core registers. X86ManagedRegister::FromCpuRegister(EBP), X86ManagedRegister::FromCpuRegister(ESI), X86ManagedRegister::FromCpuRegister(EDI), // No hard float callee saves. }; static constexpr uint32_t CalculateCoreCalleeSpillMask() { // The spilled PC gets a special marker. uint32_t result = 1 << kNumberOfCpuRegisters; for (auto&& r : kCalleeSaveRegisters) { if (r.AsX86().IsCpuRegister()) { result |= (1 << r.AsX86().AsCpuRegister()); } } return result; } static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(); static constexpr uint32_t kFpCalleeSpillMask = 0u; // Calling convention ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { return X86ManagedRegister::FromCpuRegister(ECX); } ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() { return X86ManagedRegister::FromCpuRegister(ECX); } ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const { return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop } static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) { if (shorty[0] == 'F' || shorty[0] == 'D') { if (jni) { return X86ManagedRegister::FromX87Register(ST0); } else { return X86ManagedRegister::FromXmmRegister(XMM0); } } else if (shorty[0] == 'J') { return X86ManagedRegister::FromRegisterPair(EAX_EDX); } else if (shorty[0] == 'V') { return ManagedRegister::NoRegister(); } else { return X86ManagedRegister::FromCpuRegister(EAX); } } ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty(), false); } ManagedRegister X86JniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty(), true); } ManagedRegister X86JniCallingConvention::IntReturnRegister() { return X86ManagedRegister::FromCpuRegister(EAX); } // Managed runtime calling convention ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() { return X86ManagedRegister::FromCpuRegister(EAX); } bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { return false; // Everything is passed by stack } bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { // We assume all parameters are on stack, args coming via registers are spilled as entry_spills. return true; } ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() { ManagedRegister res = ManagedRegister::NoRegister(); if (!IsCurrentParamAFloatOrDouble()) { switch (gpr_arg_count_) { case 0: res = X86ManagedRegister::FromCpuRegister(ECX); break; case 1: res = X86ManagedRegister::FromCpuRegister(EDX); break; case 2: // Don't split a long between the last register and the stack. if (IsCurrentParamALong()) { return ManagedRegister::NoRegister(); } res = X86ManagedRegister::FromCpuRegister(EBX); break; } } else if (itr_float_and_doubles_ < 4) { // First four float parameters are passed via XMM0..XMM3 res = X86ManagedRegister::FromXmmRegister( static_cast(XMM0 + itr_float_and_doubles_)); } return res; } ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() { ManagedRegister res = ManagedRegister::NoRegister(); DCHECK(IsCurrentParamALong()); switch (gpr_arg_count_) { case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break; case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break; } return res; } FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() { return FrameOffset(displacement_.Int32Value() + // displacement kFramePointerSize + // Method* (itr_slots_ * kFramePointerSize)); // offset into in args } const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() { // We spill the argument registers on X86 to free them up for scratch use, we then assume // all arguments are on the stack. if (entry_spills_.size() == 0) { ResetIterator(FrameOffset(0)); while (HasNext()) { ManagedRegister in_reg = CurrentParamRegister(); bool is_long = IsCurrentParamALong(); if (!in_reg.IsNoRegister()) { int32_t size = IsParamADouble(itr_args_) ? 8 : 4; int32_t spill_offset = CurrentParamStackOffset().Uint32Value(); ManagedRegisterSpill spill(in_reg, size, spill_offset); entry_spills_.push_back(spill); if (is_long) { // special case, as we need a second register here. in_reg = CurrentParamHighLongRegister(); DCHECK(!in_reg.IsNoRegister()); // We have to spill the second half of the long. ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4); entry_spills_.push_back(spill2); } // Keep track of the number of GPRs allocated. if (!IsCurrentParamAFloatOrDouble()) { if (is_long) { // Long was allocated in 2 registers. gpr_arg_count_ += 2; } else { gpr_arg_count_++; } } } else if (is_long) { // We need to skip the unused last register, which is empty. // If we are already out of registers, this is harmless. gpr_arg_count_ += 2; } Next(); } } return entry_spills_; } // JNI calling convention X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty) : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kX86PointerSize) { } uint32_t X86JniCallingConvention::CoreSpillMask() const { return kCoreCalleeSpillMask; } uint32_t X86JniCallingConvention::FpSpillMask() const { return kFpCalleeSpillMask; } size_t X86JniCallingConvention::FrameSize() { // Method*, PC return address and callee save area size, local reference segment state const size_t method_ptr_size = static_cast(kX86PointerSize); const size_t pc_return_addr_size = kFramePointerSize; const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size; if (LIKELY(HasLocalReferenceSegmentState())) { // local ref. segment state // Local reference segment state is sometimes excluded. frame_data_size += kFramePointerSize; } // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header const size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount()); size_t total_size = frame_data_size; if (LIKELY(HasHandleScope())) { // HandleScope is sometimes excluded. total_size += handle_scope_size; // handle scope size } // Plus return value spill area size total_size += SizeOfReturnValue(); return RoundUp(total_size, kStackAlignment); // TODO: Same thing as x64 except using different pointer size. Refactor? } size_t X86JniCallingConvention::OutArgSize() { return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); } ArrayRef X86JniCallingConvention::CalleeSaveRegisters() const { return ArrayRef(kCalleeSaveRegisters); } bool X86JniCallingConvention::IsCurrentParamInRegister() { return false; // Everything is passed by stack. } bool X86JniCallingConvention::IsCurrentParamOnStack() { return true; // Everything is passed by stack. } ManagedRegister X86JniCallingConvention::CurrentParamRegister() { LOG(FATAL) << "Should not reach here"; return ManagedRegister::NoRegister(); } FrameOffset X86JniCallingConvention::CurrentParamStackOffset() { return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize)); } size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() { size_t static_args = HasSelfClass() ? 1 : 0; // count jclass // regular argument parameters and this size_t param_args = NumArgs() + NumLongOrDoubleArgs(); // count JNIEnv* and return pc (pushed after Method*) size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */); // No register args. size_t total_args = static_args + param_args + internal_args; return total_args; } } // namespace x86 } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/x86/calling_convention_x86.h000066400000000000000000000065711336577252300270620ustar00rootroot00000000000000/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ #define ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ #include "base/enums.h" #include "jni/quick/calling_convention.h" namespace art { namespace x86 { constexpr size_t kFramePointerSize = static_cast(PointerSize::k32); class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, PointerSize::k32), gpr_arg_count_(0) {} ~X86ManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // Managed runtime calling convention ManagedRegister MethodRegister() OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; private: int gpr_arg_count_; ManagedRegister CurrentParamHighLongRegister(); ManagedRegisterEntrySpills entry_spills_; DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention); }; // Implements the x86 cdecl calling convention. class X86JniCallingConvention FINAL : public JniCallingConvention { public: X86JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty); ~X86JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister IntReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // JNI calling convention size_t FrameSize() OVERRIDE; size_t OutArgSize() OVERRIDE; ArrayRef CalleeSaveRegisters() const OVERRIDE; ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; // x86 needs to extend small return types. bool RequiresSmallResultTypeExtension() const OVERRIDE { return true; } protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; private: DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention); }; } // namespace x86 } // namespace art #endif // ART_COMPILER_JNI_QUICK_X86_CALLING_CONVENTION_X86_H_ android-platform-art-8.1.0+r23/compiler/jni/quick/x86_64/000077500000000000000000000000001336577252300226315ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/jni/quick/x86_64/calling_convention_x86_64.cc000066400000000000000000000267261336577252300300460ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "calling_convention_x86_64.h" #include "base/bit_utils.h" #include "base/logging.h" #include "handle_scope-inl.h" #include "utils/x86_64/managed_register_x86_64.h" namespace art { namespace x86_64 { constexpr size_t kFramePointerSize = static_cast(PointerSize::k64); static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size"); static_assert(kStackAlignment >= 16u, "System V AMD64 ABI requires at least 16 byte stack alignment"); // XMM0..XMM7 can be used to pass the first 8 floating args. The rest must go on the stack. // -- Managed and JNI calling conventions. constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u; // Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be // enregistered. The rest of the args must go on the stack. // -- JNI calling convention only (Managed excludes RDI, so it's actually 5). constexpr size_t kMaxIntLikeRegisterArguments = 6u; static constexpr ManagedRegister kCalleeSaveRegisters[] = { // Core registers. X86_64ManagedRegister::FromCpuRegister(RBX), X86_64ManagedRegister::FromCpuRegister(RBP), X86_64ManagedRegister::FromCpuRegister(R12), X86_64ManagedRegister::FromCpuRegister(R13), X86_64ManagedRegister::FromCpuRegister(R14), X86_64ManagedRegister::FromCpuRegister(R15), // Hard float registers. X86_64ManagedRegister::FromXmmRegister(XMM12), X86_64ManagedRegister::FromXmmRegister(XMM13), X86_64ManagedRegister::FromXmmRegister(XMM14), X86_64ManagedRegister::FromXmmRegister(XMM15), }; static constexpr uint32_t CalculateCoreCalleeSpillMask() { // The spilled PC gets a special marker. uint32_t result = 1 << kNumberOfCpuRegisters; for (auto&& r : kCalleeSaveRegisters) { if (r.AsX86_64().IsCpuRegister()) { result |= (1 << r.AsX86_64().AsCpuRegister().AsRegister()); } } return result; } static constexpr uint32_t CalculateFpCalleeSpillMask() { uint32_t result = 0; for (auto&& r : kCalleeSaveRegisters) { if (r.AsX86_64().IsXmmRegister()) { result |= (1 << r.AsX86_64().AsXmmRegister().AsFloatRegister()); } } return result; } static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(); static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(); // Calling convention ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() { return X86_64ManagedRegister::FromCpuRegister(RAX); } ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() { return X86_64ManagedRegister::FromCpuRegister(RAX); } ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const { return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop } static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) { if (shorty[0] == 'F' || shorty[0] == 'D') { return X86_64ManagedRegister::FromXmmRegister(XMM0); } else if (shorty[0] == 'J') { return X86_64ManagedRegister::FromCpuRegister(RAX); } else if (shorty[0] == 'V') { return ManagedRegister::NoRegister(); } else { return X86_64ManagedRegister::FromCpuRegister(RAX); } } ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty(), false); } ManagedRegister X86_64JniCallingConvention::ReturnRegister() { return ReturnRegisterForShorty(GetShorty(), true); } ManagedRegister X86_64JniCallingConvention::IntReturnRegister() { return X86_64ManagedRegister::FromCpuRegister(RAX); } // Managed runtime calling convention ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() { return X86_64ManagedRegister::FromCpuRegister(RDI); } bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() { return !IsCurrentParamOnStack(); } bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() { // We assume all parameters are on stack, args coming via registers are spilled as entry_spills return true; } ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() { ManagedRegister res = ManagedRegister::NoRegister(); if (!IsCurrentParamAFloatOrDouble()) { switch (itr_args_ - itr_float_and_doubles_) { case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break; case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break; case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break; case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break; case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break; } } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) { // First eight float parameters are passed via XMM0..XMM7 res = X86_64ManagedRegister::FromXmmRegister( static_cast(XMM0 + itr_float_and_doubles_)); } return res; } FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() { return FrameOffset(displacement_.Int32Value() + // displacement static_cast(kX86_64PointerSize) + // Method ref itr_slots_ * sizeof(uint32_t)); // offset into in args } const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() { // We spill the argument registers on X86 to free them up for scratch use, we then assume // all arguments are on the stack. if (entry_spills_.size() == 0) { ResetIterator(FrameOffset(0)); while (HasNext()) { ManagedRegister in_reg = CurrentParamRegister(); if (!in_reg.IsNoRegister()) { int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4; int32_t spill_offset = CurrentParamStackOffset().Uint32Value(); ManagedRegisterSpill spill(in_reg, size, spill_offset); entry_spills_.push_back(spill); } Next(); } } return entry_spills_; } // JNI calling convention X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty) : JniCallingConvention(is_static, is_synchronized, is_critical_native, shorty, kX86_64PointerSize) { } uint32_t X86_64JniCallingConvention::CoreSpillMask() const { return kCoreCalleeSpillMask; } uint32_t X86_64JniCallingConvention::FpSpillMask() const { return kFpCalleeSpillMask; } size_t X86_64JniCallingConvention::FrameSize() { // Method*, PC return address and callee save area size, local reference segment state const size_t method_ptr_size = static_cast(kX86_64PointerSize); const size_t pc_return_addr_size = kFramePointerSize; const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize; size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size; if (LIKELY(HasLocalReferenceSegmentState())) { // local ref. segment state // Local reference segment state is sometimes excluded. frame_data_size += kFramePointerSize; } // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header const size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount()); size_t total_size = frame_data_size; if (LIKELY(HasHandleScope())) { // HandleScope is sometimes excluded. total_size += handle_scope_size; // handle scope size } // Plus return value spill area size total_size += SizeOfReturnValue(); return RoundUp(total_size, kStackAlignment); } size_t X86_64JniCallingConvention::OutArgSize() { return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment); } ArrayRef X86_64JniCallingConvention::CalleeSaveRegisters() const { return ArrayRef(kCalleeSaveRegisters); } bool X86_64JniCallingConvention::IsCurrentParamInRegister() { return !IsCurrentParamOnStack(); } bool X86_64JniCallingConvention::IsCurrentParamOnStack() { return CurrentParamRegister().IsNoRegister(); } ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() { ManagedRegister res = ManagedRegister::NoRegister(); if (!IsCurrentParamAFloatOrDouble()) { switch (itr_args_ - itr_float_and_doubles_) { case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break; case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break; case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break; case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break; case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break; case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break; static_assert(5u == kMaxIntLikeRegisterArguments - 1, "Missing case statement(s)"); } } else if (itr_float_and_doubles_ < kMaxFloatOrDoubleRegisterArguments) { // First eight float parameters are passed via XMM0..XMM7 res = X86_64ManagedRegister::FromXmmRegister( static_cast(XMM0 + itr_float_and_doubles_)); } return res; } FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() { CHECK(IsCurrentParamOnStack()); size_t args_on_stack = itr_args_ - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast(itr_float_and_doubles_)) // Float arguments passed through Xmm0..Xmm7 - std::min(kMaxIntLikeRegisterArguments, static_cast(itr_args_ - itr_float_and_doubles_)); // Integer arguments passed through GPR size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize); CHECK_LT(offset, OutArgSize()); return FrameOffset(offset); } // TODO: Calling this "NumberArgs" is misleading. // It's really more like NumberSlots (like itr_slots_) // because doubles/longs get counted twice. size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() { size_t static_args = HasSelfClass() ? 1 : 0; // count jclass // regular argument parameters and this size_t param_args = NumArgs() + NumLongOrDoubleArgs(); // count JNIEnv* and return pc (pushed after Method*) size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */); size_t total_args = static_args + param_args + internal_args; // Float arguments passed through Xmm0..Xmm7 // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9) size_t total_stack_args = total_args - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast(NumFloatOrDoubleArgs())) - std::min(kMaxIntLikeRegisterArguments, static_cast(NumArgs() - NumFloatOrDoubleArgs())); return total_stack_args; } } // namespace x86_64 } // namespace art android-platform-art-8.1.0+r23/compiler/jni/quick/x86_64/calling_convention_x86_64.h000066400000000000000000000063241336577252300277000ustar00rootroot00000000000000/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ #define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ #include "base/enums.h" #include "jni/quick/calling_convention.h" namespace art { namespace x86_64 { class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention { public: X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, PointerSize::k64) {} ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // Managed runtime calling convention ManagedRegister MethodRegister() OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE; private: ManagedRegisterEntrySpills entry_spills_; DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention); }; class X86_64JniCallingConvention FINAL : public JniCallingConvention { public: X86_64JniCallingConvention(bool is_static, bool is_synchronized, bool is_critical_native, const char* shorty); ~X86_64JniCallingConvention() OVERRIDE {} // Calling convention ManagedRegister ReturnRegister() OVERRIDE; ManagedRegister IntReturnRegister() OVERRIDE; ManagedRegister InterproceduralScratchRegister() OVERRIDE; // JNI calling convention size_t FrameSize() OVERRIDE; size_t OutArgSize() OVERRIDE; ArrayRef CalleeSaveRegisters() const OVERRIDE; ManagedRegister ReturnScratchRegister() const OVERRIDE; uint32_t CoreSpillMask() const OVERRIDE; uint32_t FpSpillMask() const OVERRIDE; bool IsCurrentParamInRegister() OVERRIDE; bool IsCurrentParamOnStack() OVERRIDE; ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; // x86-64 needs to extend small return types. bool RequiresSmallResultTypeExtension() const OVERRIDE { return true; } protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; private: DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention); }; } // namespace x86_64 } // namespace art #endif // ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_ android-platform-art-8.1.0+r23/compiler/linker/000077500000000000000000000000001336577252300212635ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/linker/arm/000077500000000000000000000000001336577252300220425ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/linker/arm/relative_patcher_arm_base.cc000066400000000000000000000455531336577252300275370ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "linker/arm/relative_patcher_arm_base.h" #include "base/stl_util.h" #include "compiled_method.h" #include "linker/output_stream.h" #include "oat.h" #include "oat_quick_method_header.h" namespace art { namespace linker { class ArmBaseRelativePatcher::ThunkData { public: ThunkData(std::vector code, uint32_t max_next_offset) : code_(code), offsets_(), max_next_offset_(max_next_offset), pending_offset_(0u) { DCHECK(NeedsNextThunk()); // The data is constructed only when we expect to need the thunk. } ThunkData(ThunkData&& src) = default; size_t CodeSize() const { return code_.size(); } ArrayRef GetCode() const { return ArrayRef(code_); } bool NeedsNextThunk() const { return max_next_offset_ != 0u; } uint32_t MaxNextOffset() const { DCHECK(NeedsNextThunk()); return max_next_offset_; } void ClearMaxNextOffset() { DCHECK(NeedsNextThunk()); max_next_offset_ = 0u; } void SetMaxNextOffset(uint32_t max_next_offset) { DCHECK(!NeedsNextThunk()); max_next_offset_ = max_next_offset; } // Adjust the MaxNextOffset() down if needed to fit the code before the next thunk. // Returns true if it was adjusted, false if the old value was kept. bool MakeSpaceBefore(const ThunkData& next_thunk, size_t alignment) { DCHECK(NeedsNextThunk()); DCHECK(next_thunk.NeedsNextThunk()); DCHECK_ALIGNED_PARAM(MaxNextOffset(), alignment); DCHECK_ALIGNED_PARAM(next_thunk.MaxNextOffset(), alignment); if (next_thunk.MaxNextOffset() - CodeSize() < MaxNextOffset()) { max_next_offset_ = RoundDown(next_thunk.MaxNextOffset() - CodeSize(), alignment); return true; } else { return false; } } uint32_t ReserveOffset(size_t offset) { DCHECK(NeedsNextThunk()); DCHECK_LE(offset, max_next_offset_); max_next_offset_ = 0u; // The reserved offset should satisfy all pending references. offsets_.push_back(offset); return offset + CodeSize(); } bool HasReservedOffset() const { return !offsets_.empty(); } uint32_t LastReservedOffset() const { DCHECK(HasReservedOffset()); return offsets_.back(); } bool HasPendingOffset() const { return pending_offset_ != offsets_.size(); } uint32_t GetPendingOffset() const { DCHECK(HasPendingOffset()); return offsets_[pending_offset_]; } void MarkPendingOffsetAsWritten() { DCHECK(HasPendingOffset()); ++pending_offset_; } bool HasWrittenOffset() const { return pending_offset_ != 0u; } uint32_t LastWrittenOffset() const { DCHECK(HasWrittenOffset()); return offsets_[pending_offset_ - 1u]; } private: std::vector code_; // The code of the thunk. std::vector offsets_; // Offsets at which the thunk needs to be written. uint32_t max_next_offset_; // The maximum offset at which the next thunk can be placed. uint32_t pending_offset_; // The index of the next offset to write. }; class ArmBaseRelativePatcher::PendingThunkComparator { public: bool operator()(const ThunkData* lhs, const ThunkData* rhs) const { DCHECK(lhs->HasPendingOffset()); DCHECK(rhs->HasPendingOffset()); // The top of the heap is defined to contain the highest element and we want to pick // the thunk with the smallest pending offset, so use the reverse ordering, i.e. ">". return lhs->GetPendingOffset() > rhs->GetPendingOffset(); } }; uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method, MethodReference method_ref) { return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u); } uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) { // For multi-oat compilations (boot image), ReserveSpaceEnd() is called for each oat file. // Since we do not know here whether this is the last file or whether the next opportunity // to place thunk will be soon enough, we need to reserve all needed thunks now. Code for // subsequent oat files can still call back to them. if (!unprocessed_method_call_patches_.empty()) { ResolveMethodCalls(offset, MethodReference(nullptr, DexFile::kDexNoIndex)); } for (ThunkData* data : unreserved_thunks_) { uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_); offset = data->ReserveOffset(thunk_offset); } unreserved_thunks_.clear(); // We also need to delay initiating the pending_thunks_ until the call to WriteThunks(). // Check that the `pending_thunks_.capacity()` indicates that no WriteThunks() has taken place. DCHECK_EQ(pending_thunks_.capacity(), 0u); return offset; } uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { if (pending_thunks_.capacity() == 0u) { if (thunks_.empty()) { return offset; } // First call to WriteThunks(), prepare the thunks for writing. pending_thunks_.reserve(thunks_.size()); for (auto& entry : thunks_) { ThunkData* data = &entry.second; if (data->HasPendingOffset()) { pending_thunks_.push_back(data); } } std::make_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator()); } uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_); while (!pending_thunks_.empty() && pending_thunks_.front()->GetPendingOffset() == aligned_offset) { // Write alignment bytes and code. uint32_t aligned_code_delta = aligned_offset - offset; if (aligned_code_delta != 0u && UNLIKELY(!WriteCodeAlignment(out, aligned_code_delta))) { return 0u; } if (UNLIKELY(!WriteThunk(out, pending_thunks_.front()->GetCode()))) { return 0u; } offset = aligned_offset + pending_thunks_.front()->CodeSize(); // Mark the thunk as written at the pending offset and update the `pending_thunks_` heap. std::pop_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator()); pending_thunks_.back()->MarkPendingOffsetAsWritten(); if (pending_thunks_.back()->HasPendingOffset()) { std::push_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator()); } else { pending_thunks_.pop_back(); } aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_); } DCHECK(pending_thunks_.empty() || pending_thunks_.front()->GetPendingOffset() > aligned_offset); return offset; } ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, InstructionSet instruction_set) : provider_(provider), instruction_set_(instruction_set), thunks_(), unprocessed_method_call_patches_(), method_call_thunk_(nullptr), pending_thunks_() { } ArmBaseRelativePatcher::~ArmBaseRelativePatcher() { // All work done by member destructors. } uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset, const CompiledMethod* compiled_method, MethodReference method_ref, uint32_t max_extra_space) { // Adjust code size for extra space required by the subclass. uint32_t max_code_size = compiled_method->GetQuickCode().size() + max_extra_space; uint32_t code_offset; uint32_t next_aligned_offset; while (true) { code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader)); next_aligned_offset = compiled_method->AlignCode(code_offset + max_code_size); if (unreserved_thunks_.empty() || unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) { break; } ThunkData* thunk = unreserved_thunks_.front(); if (thunk == method_call_thunk_) { ResolveMethodCalls(code_offset, method_ref); // This may have changed `method_call_thunk_` data, so re-check if we need to reserve. if (unreserved_thunks_.empty() || unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) { break; } // We need to process the new `front()` whether it's still the `method_call_thunk_` or not. thunk = unreserved_thunks_.front(); } unreserved_thunks_.pop_front(); uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_); offset = thunk->ReserveOffset(thunk_offset); if (thunk == method_call_thunk_) { // All remaining method call patches will be handled by this thunk. DCHECK(!unprocessed_method_call_patches_.empty()); DCHECK_LE(thunk_offset - unprocessed_method_call_patches_.front().GetPatchOffset(), MaxPositiveDisplacement(GetMethodCallKey())); unprocessed_method_call_patches_.clear(); } } // Process patches and check that adding thunks for the current method did not push any // thunks (previously existing or newly added) before `next_aligned_offset`. This is // essentially a check that we never compile a method that's too big. The calls or branches // from the method should be able to reach beyond the end of the method and over any pending // thunks. (The number of different thunks should be relatively low and their code short.) ProcessPatches(compiled_method, code_offset); CHECK(unreserved_thunks_.empty() || unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset); return offset; } uint32_t ArmBaseRelativePatcher::CalculateMethodCallDisplacement(uint32_t patch_offset, uint32_t target_offset) { DCHECK(method_call_thunk_ != nullptr); // Unsigned arithmetic with its well-defined overflow behavior is just fine here. uint32_t displacement = target_offset - patch_offset; uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey()); uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey()); // NOTE: With unsigned arithmetic we do mean to use && rather than || below. if (displacement > max_positive_displacement && displacement < -max_negative_displacement) { // Unwritten thunks have higher offsets, check if it's within range. DCHECK(!method_call_thunk_->HasPendingOffset() || method_call_thunk_->GetPendingOffset() > patch_offset); if (method_call_thunk_->HasPendingOffset() && method_call_thunk_->GetPendingOffset() - patch_offset <= max_positive_displacement) { displacement = method_call_thunk_->GetPendingOffset() - patch_offset; } else { // We must have a previous thunk then. DCHECK(method_call_thunk_->HasWrittenOffset()); DCHECK_LT(method_call_thunk_->LastWrittenOffset(), patch_offset); displacement = method_call_thunk_->LastWrittenOffset() - patch_offset; DCHECK_GE(displacement, -max_negative_displacement); } } return displacement; } uint32_t ArmBaseRelativePatcher::GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset) { auto it = thunks_.find(key); CHECK(it != thunks_.end()); const ThunkData& data = it->second; if (data.HasWrittenOffset()) { uint32_t offset = data.LastWrittenOffset(); DCHECK_LT(offset, patch_offset); if (patch_offset - offset <= MaxNegativeDisplacement(key)) { return offset; } } DCHECK(data.HasPendingOffset()); uint32_t offset = data.GetPendingOffset(); DCHECK_GT(offset, patch_offset); DCHECK_LE(offset - patch_offset, MaxPositiveDisplacement(key)); return offset; } ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetMethodCallKey() { return ThunkKey(ThunkType::kMethodCall); } ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetBakerThunkKey( const LinkerPatch& patch) { DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kBakerReadBarrierBranch); return ThunkKey(ThunkType::kBakerReadBarrier, patch.GetBakerCustomValue1(), patch.GetBakerCustomValue2()); } void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_method, uint32_t code_offset) { for (const LinkerPatch& patch : compiled_method->GetPatches()) { uint32_t patch_offset = code_offset + patch.LiteralOffset(); ThunkKey key(static_cast(-1)); ThunkData* old_data = nullptr; if (patch.GetType() == LinkerPatch::Type::kCallRelative) { key = GetMethodCallKey(); unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod()); if (method_call_thunk_ == nullptr) { uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key); auto it = thunks_.Put(key, ThunkData(CompileThunk(key), max_next_offset)); method_call_thunk_ = &it->second; AddUnreservedThunk(method_call_thunk_); } else { old_data = method_call_thunk_; } } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) { key = GetBakerThunkKey(patch); auto lb = thunks_.lower_bound(key); if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) { uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key); auto it = thunks_.PutBefore(lb, key, ThunkData(CompileThunk(key), max_next_offset)); AddUnreservedThunk(&it->second); } else { old_data = &lb->second; } } if (old_data != nullptr) { // Shared path where an old thunk may need an update. DCHECK(key.GetType() != static_cast(-1)); DCHECK(!old_data->HasReservedOffset() || old_data->LastReservedOffset() < patch_offset); if (old_data->NeedsNextThunk()) { // Patches for a method are ordered by literal offset, so if we still need to place // this thunk for a previous patch, that thunk shall be in range for this patch. DCHECK_LE(old_data->MaxNextOffset(), CalculateMaxNextOffset(patch_offset, key)); } else { if (!old_data->HasReservedOffset() || patch_offset - old_data->LastReservedOffset() > MaxNegativeDisplacement(key)) { old_data->SetMaxNextOffset(CalculateMaxNextOffset(patch_offset, key)); AddUnreservedThunk(old_data); } } } } } void ArmBaseRelativePatcher::AddUnreservedThunk(ThunkData* data) { DCHECK(data->NeedsNextThunk()); size_t index = unreserved_thunks_.size(); while (index != 0u && data->MaxNextOffset() < unreserved_thunks_[index - 1u]->MaxNextOffset()) { --index; } unreserved_thunks_.insert(unreserved_thunks_.begin() + index, data); // We may need to update the max next offset(s) if the thunk code would not fit. size_t alignment = GetInstructionSetAlignment(instruction_set_); if (index + 1u != unreserved_thunks_.size()) { // Note: Ignore the return value as we need to process previous thunks regardless. data->MakeSpaceBefore(*unreserved_thunks_[index + 1u], alignment); } // Make space for previous thunks. Once we find a pending thunk that does // not need an adjustment, we can stop. while (index != 0u && unreserved_thunks_[index - 1u]->MakeSpaceBefore(*data, alignment)) { --index; data = unreserved_thunks_[index]; } } void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset, MethodReference method_ref) { DCHECK(!unreserved_thunks_.empty()); DCHECK(!unprocessed_method_call_patches_.empty()); DCHECK(method_call_thunk_ != nullptr); uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey()); uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey()); // Process as many patches as possible, stop only on unresolved targets or calls too far back. while (!unprocessed_method_call_patches_.empty()) { MethodReference target_method = unprocessed_method_call_patches_.front().GetTargetMethod(); uint32_t patch_offset = unprocessed_method_call_patches_.front().GetPatchOffset(); DCHECK(!method_call_thunk_->HasReservedOffset() || method_call_thunk_->LastReservedOffset() <= patch_offset); if (!method_call_thunk_->HasReservedOffset() || patch_offset - method_call_thunk_->LastReservedOffset() > max_negative_displacement) { // No previous thunk in range, check if we can reach the target directly. if (target_method.dex_file == method_ref.dex_file && target_method.dex_method_index == method_ref.dex_method_index) { DCHECK_GT(quick_code_offset, patch_offset); if (quick_code_offset - patch_offset > max_positive_displacement) { break; } } else { auto result = provider_->FindMethodOffset(target_method); if (!result.first) { break; } uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_); if (target_offset >= patch_offset) { DCHECK_LE(target_offset - patch_offset, max_positive_displacement); } else if (patch_offset - target_offset > max_negative_displacement) { break; } } } unprocessed_method_call_patches_.pop_front(); } if (!unprocessed_method_call_patches_.empty()) { // Try to adjust the max next offset in `method_call_thunk_`. Do this conservatively only if // the thunk shall be at the end of the `unreserved_thunks_` to avoid dealing with overlaps. uint32_t new_max_next_offset = unprocessed_method_call_patches_.front().GetPatchOffset() + max_positive_displacement; if (new_max_next_offset > unreserved_thunks_.back()->MaxNextOffset() + unreserved_thunks_.back()->CodeSize()) { method_call_thunk_->ClearMaxNextOffset(); method_call_thunk_->SetMaxNextOffset(new_max_next_offset); if (method_call_thunk_ != unreserved_thunks_.back()) { RemoveElement(unreserved_thunks_, method_call_thunk_); unreserved_thunks_.push_back(method_call_thunk_); } } } else { // We have resolved all method calls, we do not need a new thunk anymore. method_call_thunk_->ClearMaxNextOffset(); RemoveElement(unreserved_thunks_, method_call_thunk_); } } inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_offset, const ThunkKey& key) { return RoundDown(patch_offset + MaxPositiveDisplacement(key), GetInstructionSetAlignment(instruction_set_)); } } // namespace linker } // namespace art android-platform-art-8.1.0+r23/compiler/linker/arm/relative_patcher_arm_base.h000066400000000000000000000116361336577252300273740ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_ARM_BASE_H_ #define ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_ARM_BASE_H_ #include #include #include "linker/relative_patcher.h" #include "method_reference.h" #include "safe_map.h" namespace art { namespace linker { class ArmBaseRelativePatcher : public RelativePatcher { public: uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method, MethodReference method_ref) OVERRIDE; uint32_t ReserveSpaceEnd(uint32_t offset) OVERRIDE; uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE; protected: ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider, InstructionSet instruction_set); ~ArmBaseRelativePatcher(); enum class ThunkType { kMethodCall, // Method call thunk. kBakerReadBarrier, // Baker read barrier. }; class ThunkKey { public: explicit ThunkKey(ThunkType type, uint32_t custom_value1 = 0u, uint32_t custom_value2 = 0u) : type_(type), custom_value1_(custom_value1), custom_value2_(custom_value2) { } ThunkType GetType() const { return type_; } uint32_t GetCustomValue1() const { return custom_value1_; } uint32_t GetCustomValue2() const { return custom_value2_; } private: ThunkType type_; uint32_t custom_value1_; uint32_t custom_value2_; }; class ThunkKeyCompare { public: bool operator()(const ThunkKey& lhs, const ThunkKey& rhs) const { if (lhs.GetType() != rhs.GetType()) { return lhs.GetType() < rhs.GetType(); } if (lhs.GetCustomValue1() != rhs.GetCustomValue1()) { return lhs.GetCustomValue1() < rhs.GetCustomValue1(); } return lhs.GetCustomValue2() < rhs.GetCustomValue2(); } }; static ThunkKey GetMethodCallKey(); static ThunkKey GetBakerThunkKey(const LinkerPatch& patch); uint32_t ReserveSpaceInternal(uint32_t offset, const CompiledMethod* compiled_method, MethodReference method_ref, uint32_t max_extra_space); uint32_t GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset); uint32_t CalculateMethodCallDisplacement(uint32_t patch_offset, uint32_t target_offset); virtual std::vector CompileThunk(const ThunkKey& key) = 0; virtual uint32_t MaxPositiveDisplacement(const ThunkKey& key) = 0; virtual uint32_t MaxNegativeDisplacement(const ThunkKey& key) = 0; private: class ThunkData; void ProcessPatches(const CompiledMethod* compiled_method, uint32_t code_offset); void AddUnreservedThunk(ThunkData* data); void ResolveMethodCalls(uint32_t quick_code_offset, MethodReference method_ref); uint32_t CalculateMaxNextOffset(uint32_t patch_offset, const ThunkKey& key); RelativePatcherTargetProvider* const provider_; const InstructionSet instruction_set_; // The data for all thunks. // SafeMap<> nodes don't move after being inserted, so we can use direct pointers to the data. using ThunkMap = SafeMap; ThunkMap thunks_; // ReserveSpace() tracks unprocessed method call patches. These may be resolved later. class UnprocessedMethodCallPatch { public: UnprocessedMethodCallPatch(uint32_t patch_offset, MethodReference target_method) : patch_offset_(patch_offset), target_method_(target_method) { } uint32_t GetPatchOffset() const { return patch_offset_; } MethodReference GetTargetMethod() const { return target_method_; } private: uint32_t patch_offset_; MethodReference target_method_; }; std::deque unprocessed_method_call_patches_; // Once we have compiled a method call thunk, cache pointer to the data. ThunkData* method_call_thunk_; // Thunks std::deque unreserved_thunks_; class PendingThunkComparator; std::vector pending_thunks_; // Heap with the PendingThunkComparator. friend class Arm64RelativePatcherTest; friend class Thumb2RelativePatcherTest; DISALLOW_COPY_AND_ASSIGN(ArmBaseRelativePatcher); }; } // namespace linker } // namespace art #endif // ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_ARM_BASE_H_ android-platform-art-8.1.0+r23/compiler/linker/arm/relative_patcher_thumb2.cc000066400000000000000000000533371336577252300271660ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "linker/arm/relative_patcher_thumb2.h" #include "arch/arm/asm_support_arm.h" #include "art_method.h" #include "base/bit_utils.h" #include "compiled_method.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "lock_word.h" #include "mirror/object.h" #include "mirror/array-inl.h" #include "read_barrier.h" #include "utils/arm/assembler_arm_vixl.h" namespace art { namespace linker { // PC displacement from patch location; Thumb2 PC is always at instruction address + 4. static constexpr int32_t kPcDisplacement = 4; // Maximum positive and negative displacement for method call measured from the patch location. // (Signed 25 bit displacement with the last bit 0 has range [-2^24, 2^24-2] measured from // the Thumb2 PC pointing right after the BL, i.e. 4 bytes later than the patch location.) constexpr uint32_t kMaxMethodCallPositiveDisplacement = (1u << 24) - 2 + kPcDisplacement; constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 24) - kPcDisplacement; // Maximum positive and negative displacement for a conditional branch measured from the patch // location. (Signed 21 bit displacement with the last bit 0 has range [-2^20, 2^20-2] measured // from the Thumb2 PC pointing right after the B.cond, i.e. 4 bytes later than the patch location.) constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 2u + kPcDisplacement; constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20) - kPcDisplacement; Thumb2RelativePatcher::Thumb2RelativePatcher(RelativePatcherTargetProvider* provider) : ArmBaseRelativePatcher(provider, kThumb2) { } void Thumb2RelativePatcher::PatchCall(std::vector* code, uint32_t literal_offset, uint32_t patch_offset, uint32_t target_offset) { DCHECK_LE(literal_offset + 4u, code->size()); DCHECK_EQ(literal_offset & 1u, 0u); DCHECK_EQ(patch_offset & 1u, 0u); DCHECK_EQ(target_offset & 1u, 1u); // Thumb2 mode bit. uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u); displacement -= kPcDisplacement; // The base PC is at the end of the 4-byte patch. DCHECK_EQ(displacement & 1u, 0u); DCHECK((displacement >> 24) == 0u || (displacement >> 24) == 255u); // 25-bit signed. uint32_t signbit = (displacement >> 31) & 0x1; uint32_t i1 = (displacement >> 23) & 0x1; uint32_t i2 = (displacement >> 22) & 0x1; uint32_t imm10 = (displacement >> 12) & 0x03ff; uint32_t imm11 = (displacement >> 1) & 0x07ff; uint32_t j1 = i1 ^ (signbit ^ 1); uint32_t j2 = i2 ^ (signbit ^ 1); uint32_t value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) | imm11; value |= 0xf000d000; // BL // Check that we're just overwriting an existing BL. DCHECK_EQ(GetInsn32(code, literal_offset) & 0xf800d000, 0xf000d000); // Write the new BL. SetInsn32(code, literal_offset, value); } void Thumb2RelativePatcher::PatchPcRelativeReference(std::vector* code, const LinkerPatch& patch, uint32_t patch_offset, uint32_t target_offset) { uint32_t literal_offset = patch.LiteralOffset(); uint32_t pc_literal_offset = patch.PcInsnOffset(); uint32_t pc_base = patch_offset + (pc_literal_offset - literal_offset) + 4u /* PC adjustment */; uint32_t diff = target_offset - pc_base; uint32_t insn = GetInsn32(code, literal_offset); DCHECK_EQ(insn & 0xff7ff0ffu, 0xf2400000u); // MOVW/MOVT, unpatched (imm16 == 0). uint32_t diff16 = ((insn & 0x00800000u) != 0u) ? (diff >> 16) : (diff & 0xffffu); uint32_t imm4 = (diff16 >> 12) & 0xfu; uint32_t imm = (diff16 >> 11) & 0x1u; uint32_t imm3 = (diff16 >> 8) & 0x7u; uint32_t imm8 = diff16 & 0xffu; insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8; SetInsn32(code, literal_offset, insn); } void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector* code, const LinkerPatch& patch, uint32_t patch_offset) { DCHECK_ALIGNED(patch_offset, 2u); uint32_t literal_offset = patch.LiteralOffset(); DCHECK_ALIGNED(literal_offset, 2u); DCHECK_LT(literal_offset, code->size()); uint32_t insn = GetInsn32(code, literal_offset); DCHECK_EQ(insn, 0xf0408000); // BNE +0 (unpatched) ThunkKey key = GetBakerThunkKey(patch); if (kIsDebugBuild) { const uint32_t encoded_data = key.GetCustomValue1(); BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); // Check that the next instruction matches the expected LDR. switch (kind) { case BakerReadBarrierKind::kField: { BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); if (width == BakerReadBarrierWidth::kWide) { DCHECK_GE(code->size() - literal_offset, 8u); uint32_t next_insn = GetInsn32(code, literal_offset + 4u); // LDR (immediate), encoding T3, with correct base_reg. CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(next_insn & 0xffff0000u, 0xf8d00000u | (base_reg << 16)); } else { DCHECK_GE(code->size() - literal_offset, 6u); uint32_t next_insn = GetInsn16(code, literal_offset + 4u); // LDR (immediate), encoding T1, with correct base_reg. CheckValidReg(next_insn & 0x7u); // Check destination register. const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(next_insn & 0xf838u, 0x6800u | (base_reg << 3)); } break; } case BakerReadBarrierKind::kArray: { DCHECK_GE(code->size() - literal_offset, 8u); uint32_t next_insn = GetInsn32(code, literal_offset + 4u); // LDR (register) with correct base_reg, S=1 and option=011 (LDR Wt, [Xn, Xm, LSL #2]). CheckValidReg((next_insn >> 12) & 0xfu); // Check destination register. const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(next_insn & 0xffff0ff0u, 0xf8500020u | (base_reg << 16)); CheckValidReg(next_insn & 0xf); // Check index register break; } case BakerReadBarrierKind::kGcRoot: { BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); if (width == BakerReadBarrierWidth::kWide) { DCHECK_GE(literal_offset, 4u); uint32_t prev_insn = GetInsn32(code, literal_offset - 4u); // LDR (immediate), encoding T3, with correct root_reg. const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(prev_insn & 0xfff0f000u, 0xf8d00000u | (root_reg << 12)); } else { DCHECK_GE(literal_offset, 2u); uint32_t prev_insn = GetInsn16(code, literal_offset - 2u); // LDR (immediate), encoding T1, with correct root_reg. const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(prev_insn & 0xf807u, 0x6800u | root_reg); } break; } default: LOG(FATAL) << "Unexpected type: " << static_cast(key.GetType()); UNREACHABLE(); } } uint32_t target_offset = GetThunkTargetOffset(key, patch_offset); DCHECK_ALIGNED(target_offset, 4u); uint32_t disp = target_offset - (patch_offset + kPcDisplacement); DCHECK((disp >> 20) == 0u || (disp >> 20) == 0xfffu); // 21-bit signed. insn |= ((disp << (26 - 20)) & 0x04000000u) | // Shift bit 20 to 26, "S". ((disp >> (19 - 11)) & 0x00000800u) | // Shift bit 19 to 13, "J1". ((disp >> (18 - 13)) & 0x00002000u) | // Shift bit 18 to 11, "J2". ((disp << (16 - 12)) & 0x003f0000u) | // Shift bits 12-17 to 16-25, "imm6". ((disp >> (1 - 0)) & 0x000007ffu); // Shift bits 1-12 to 0-11, "imm11". SetInsn32(code, literal_offset, insn); } #define __ assembler.GetVIXLAssembler()-> static void EmitGrayCheckAndFastPath(arm::ArmVIXLAssembler& assembler, vixl::aarch32::Register base_reg, vixl::aarch32::MemOperand& lock_word, vixl::aarch32::Label* slow_path, int32_t raw_ldr_offset) { using namespace vixl::aarch32; // NOLINT(build/namespaces) // Load the lock word containing the rb_state. __ Ldr(ip, lock_word); // Given the numeric representation, it's enough to check the low bit of the rb_state. static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted)); __ B(ne, slow_path, /* is_far_target */ false); __ Add(lr, lr, raw_ldr_offset); // Introduce a dependency on the lock_word including rb_state, // to prevent load-load reordering, and without using // a memory barrier (which would be more expensive). __ Add(base_reg, base_reg, Operand(ip, LSR, 32)); __ Bx(lr); // And return back to the function. // Note: The fake dependency is unnecessary for the slow path. } // Load the read barrier introspection entrypoint in register `entrypoint` static void LoadReadBarrierMarkIntrospectionEntrypoint(arm::ArmVIXLAssembler& assembler, vixl::aarch32::Register entrypoint) { using vixl::aarch32::MemOperand; using vixl::aarch32::ip; // Thread Register. const vixl::aarch32::Register tr = vixl::aarch32::r9; // The register where the read barrier introspection entrypoint is loaded // is fixed: `Thumb2RelativePatcher::kBakerCcEntrypointRegister` (R4). DCHECK_EQ(entrypoint.GetCode(), Thumb2RelativePatcher::kBakerCcEntrypointRegister); // entrypoint = Thread::Current()->pReadBarrierMarkReg12, i.e. pReadBarrierMarkIntrospection. DCHECK_EQ(ip.GetCode(), 12u); const int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset(ip.GetCode()); __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); } void Thumb2RelativePatcher::CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler, uint32_t encoded_data) { using namespace vixl::aarch32; // NOLINT(build/namespaces) BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); switch (kind) { case BakerReadBarrierKind::kField: { // Check if the holder is gray and, if not, add fake dependency to the base register // and return to the LDR instruction to load the reference. Otherwise, use introspection // to load the reference and call the entrypoint (in kBakerCcEntrypointRegister) // that performs further checks on the reference and marks it if needed. Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); CheckValidReg(base_reg.GetCode()); Register holder_reg(BakerReadBarrierSecondRegField::Decode(encoded_data)); CheckValidReg(holder_reg.GetCode()); BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip); // If base_reg differs from holder_reg, the offset was too large and we must have // emitted an explicit null check before the load. Otherwise, we need to null-check // the holder as we do not necessarily do that check before going to the thunk. vixl::aarch32::Label throw_npe; if (holder_reg.Is(base_reg)) { __ CompareAndBranchIfZero(holder_reg, &throw_npe, /* is_far_target */ false); } vixl::aarch32::Label slow_path; MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); const int32_t raw_ldr_offset = (width == BakerReadBarrierWidth::kWide) ? BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET : BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET; EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); __ Bind(&slow_path); const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + raw_ldr_offset; Register ep_reg(kBakerCcEntrypointRegister); LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); if (width == BakerReadBarrierWidth::kWide) { MemOperand ldr_half_address(lr, ldr_offset + 2); __ Ldrh(ip, ldr_half_address); // Load the LDR immediate half-word with "Rt | imm12". __ Ubfx(ip, ip, 0, 12); // Extract the offset imm12. __ Ldr(ip, MemOperand(base_reg, ip)); // Load the reference. } else { MemOperand ldr_address(lr, ldr_offset); __ Ldrh(ip, ldr_address); // Load the LDR immediate, encoding T1. __ Add(ep_reg, // Adjust the entrypoint address to the entrypoint ep_reg, // for narrow LDR. Operand(BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_ENTRYPOINT_OFFSET)); __ Ubfx(ip, ip, 6, 5); // Extract the imm5, i.e. offset / 4. __ Ldr(ip, MemOperand(base_reg, ip, LSL, 2)); // Load the reference. } // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. __ Bx(ep_reg); // Jump to the entrypoint. if (holder_reg.Is(base_reg)) { // Add null check slow path. The stack map is at the address pointed to by LR. __ Bind(&throw_npe); int32_t offset = GetThreadOffset(kQuickThrowNullPointer).Int32Value(); __ Ldr(ip, MemOperand(/* Thread* */ vixl::aarch32::r9, offset)); __ Bx(ip); } break; } case BakerReadBarrierKind::kArray: { Register base_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); CheckValidReg(base_reg.GetCode()); DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); DCHECK(BakerReadBarrierWidth::kWide == BakerReadBarrierWidthField::Decode(encoded_data)); UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip); vixl::aarch32::Label slow_path; int32_t data_offset = mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); DCHECK_LT(lock_word.GetOffsetImmediate(), 0); const int32_t raw_ldr_offset = BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET; EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path, raw_ldr_offset); __ Bind(&slow_path); const int32_t ldr_offset = /* Thumb state adjustment (LR contains Thumb state). */ -1 + raw_ldr_offset; MemOperand ldr_address(lr, ldr_offset + 2); __ Ldrb(ip, ldr_address); // Load the LDR (register) byte with "00 | imm2 | Rm", // i.e. Rm+32 because the scale in imm2 is 2. Register ep_reg(kBakerCcEntrypointRegister); LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); __ Bfi(ep_reg, ip, 3, 6); // Insert ip to the entrypoint address to create // a switch case target based on the index register. __ Mov(ip, base_reg); // Move the base register to ip0. __ Bx(ep_reg); // Jump to the entrypoint's array switch case. break; } case BakerReadBarrierKind::kGcRoot: { // Check if the reference needs to be marked and if so (i.e. not null, not marked yet // and it does not have a forwarding address), call the correct introspection entrypoint; // otherwise return the reference (or the extracted forwarding address). // There is no gray bit check for GC roots. Register root_reg(BakerReadBarrierFirstRegField::Decode(encoded_data)); CheckValidReg(root_reg.GetCode()); DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); BakerReadBarrierWidth width = BakerReadBarrierWidthField::Decode(encoded_data); UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip); vixl::aarch32::Label return_label, not_marked, forwarding_address; __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false); MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value()); __ Ldr(ip, lock_word); __ Tst(ip, LockWord::kMarkBitStateMaskShifted); __ B(eq, ¬_marked); __ Bind(&return_label); __ Bx(lr); __ Bind(¬_marked); static_assert(LockWord::kStateShift == 30 && LockWord::kStateForwardingAddress == 3, "To use 'CMP ip, #modified-immediate; BHS', we need the lock word state in " " the highest bits and the 'forwarding address' state to have all bits set"); __ Cmp(ip, Operand(0xc0000000)); __ B(hs, &forwarding_address); Register ep_reg(kBakerCcEntrypointRegister); LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ep_reg); // Adjust the art_quick_read_barrier_mark_introspection address in kBakerCcEntrypointRegister // to art_quick_read_barrier_mark_introspection_gc_roots. int32_t entrypoint_offset = (width == BakerReadBarrierWidth::kWide) ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_ENTRYPOINT_OFFSET : BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_ENTRYPOINT_OFFSET; __ Add(ep_reg, ep_reg, Operand(entrypoint_offset)); __ Mov(ip, root_reg); __ Bx(ep_reg); __ Bind(&forwarding_address); __ Lsl(root_reg, ip, LockWord::kForwardingAddressShift); __ Bx(lr); break; } default: LOG(FATAL) << "Unexpected kind: " << static_cast(kind); UNREACHABLE(); } } std::vector Thumb2RelativePatcher::CompileThunk(const ThunkKey& key) { ArenaPool pool; ArenaAllocator arena(&pool); arm::ArmVIXLAssembler assembler(&arena); switch (key.GetType()) { case ThunkType::kMethodCall: // The thunk just uses the entry point in the ArtMethod. This works even for calls // to the generic JNI and interpreter trampolines. assembler.LoadFromOffset( arm::kLoadWord, vixl::aarch32::pc, vixl::aarch32::r0, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value()); __ Bkpt(0); break; case ThunkType::kBakerReadBarrier: CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1()); break; } assembler.FinalizeCode(); std::vector thunk_code(assembler.CodeSize()); MemoryRegion code(thunk_code.data(), thunk_code.size()); assembler.FinalizeInstructions(code); return thunk_code; } #undef __ uint32_t Thumb2RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) { switch (key.GetType()) { case ThunkType::kMethodCall: return kMaxMethodCallPositiveDisplacement; case ThunkType::kBakerReadBarrier: return kMaxBcondPositiveDisplacement; } } uint32_t Thumb2RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) { switch (key.GetType()) { case ThunkType::kMethodCall: return kMaxMethodCallNegativeDisplacement; case ThunkType::kBakerReadBarrier: return kMaxBcondNegativeDisplacement; } } void Thumb2RelativePatcher::SetInsn32(std::vector* code, uint32_t offset, uint32_t value) { DCHECK_LE(offset + 4u, code->size()); DCHECK_ALIGNED(offset, 2u); uint8_t* addr = &(*code)[offset]; addr[0] = (value >> 16) & 0xff; addr[1] = (value >> 24) & 0xff; addr[2] = (value >> 0) & 0xff; addr[3] = (value >> 8) & 0xff; } uint32_t Thumb2RelativePatcher::GetInsn32(ArrayRef code, uint32_t offset) { DCHECK_LE(offset + 4u, code.size()); DCHECK_ALIGNED(offset, 2u); const uint8_t* addr = &code[offset]; return (static_cast(addr[0]) << 16) + (static_cast(addr[1]) << 24) + (static_cast(addr[2]) << 0)+ (static_cast(addr[3]) << 8); } template uint32_t Thumb2RelativePatcher::GetInsn32(Vector* code, uint32_t offset) { static_assert(std::is_same::value, "Invalid value type"); return GetInsn32(ArrayRef(*code), offset); } uint32_t Thumb2RelativePatcher::GetInsn16(ArrayRef code, uint32_t offset) { DCHECK_LE(offset + 2u, code.size()); DCHECK_ALIGNED(offset, 2u); const uint8_t* addr = &code[offset]; return (static_cast(addr[0]) << 0) + (static_cast(addr[1]) << 8); } template uint32_t Thumb2RelativePatcher::GetInsn16(Vector* code, uint32_t offset) { static_assert(std::is_same::value, "Invalid value type"); return GetInsn16(ArrayRef(*code), offset); } } // namespace linker } // namespace art android-platform-art-8.1.0+r23/compiler/linker/arm/relative_patcher_thumb2.h000066400000000000000000000135351336577252300270240ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_THUMB2_H_ #define ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_THUMB2_H_ #include "arch/arm/registers_arm.h" #include "base/array_ref.h" #include "base/bit_field.h" #include "base/bit_utils.h" #include "linker/arm/relative_patcher_arm_base.h" namespace art { namespace arm { class ArmVIXLAssembler; } // namespace arm namespace linker { class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher { public: static constexpr uint32_t kBakerCcEntrypointRegister = 4u; static uint32_t EncodeBakerReadBarrierFieldData(uint32_t base_reg, uint32_t holder_reg, bool narrow) { CheckValidReg(base_reg); CheckValidReg(holder_reg); DCHECK(!narrow || base_reg < 8u) << base_reg; BakerReadBarrierWidth width = narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kField) | BakerReadBarrierFirstRegField::Encode(base_reg) | BakerReadBarrierSecondRegField::Encode(holder_reg) | BakerReadBarrierWidthField::Encode(width); } static uint32_t EncodeBakerReadBarrierArrayData(uint32_t base_reg) { CheckValidReg(base_reg); return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kArray) | BakerReadBarrierFirstRegField::Encode(base_reg) | BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg) | BakerReadBarrierWidthField::Encode(BakerReadBarrierWidth::kWide); } static uint32_t EncodeBakerReadBarrierGcRootData(uint32_t root_reg, bool narrow) { CheckValidReg(root_reg); DCHECK(!narrow || root_reg < 8u) << root_reg; BakerReadBarrierWidth width = narrow ? BakerReadBarrierWidth::kNarrow : BakerReadBarrierWidth::kWide; return BakerReadBarrierKindField::Encode(BakerReadBarrierKind::kGcRoot) | BakerReadBarrierFirstRegField::Encode(root_reg) | BakerReadBarrierSecondRegField::Encode(kInvalidEncodedReg) | BakerReadBarrierWidthField::Encode(width); } explicit Thumb2RelativePatcher(RelativePatcherTargetProvider* provider); void PatchCall(std::vector* code, uint32_t literal_offset, uint32_t patch_offset, uint32_t target_offset) OVERRIDE; void PatchPcRelativeReference(std::vector* code, const LinkerPatch& patch, uint32_t patch_offset, uint32_t target_offset) OVERRIDE; void PatchBakerReadBarrierBranch(std::vector* code, const LinkerPatch& patch, uint32_t patch_offset) OVERRIDE; protected: std::vector CompileThunk(const ThunkKey& key) OVERRIDE; uint32_t MaxPositiveDisplacement(const ThunkKey& key) OVERRIDE; uint32_t MaxNegativeDisplacement(const ThunkKey& key) OVERRIDE; private: static constexpr uint32_t kInvalidEncodedReg = /* pc is invalid */ 15u; enum class BakerReadBarrierKind : uint8_t { kField, // Field get or array get with constant offset (i.e. constant index). kArray, // Array get with index in register. kGcRoot, // GC root load. kLast = kGcRoot }; enum class BakerReadBarrierWidth : uint8_t { kWide, // 32-bit LDR (and 32-bit NEG if heap poisoning is enabled). kNarrow, // 16-bit LDR (and 16-bit NEG if heap poisoning is enabled). kLast = kNarrow }; static constexpr size_t kBitsForBakerReadBarrierKind = MinimumBitsToStore(static_cast(BakerReadBarrierKind::kLast)); static constexpr size_t kBitsForRegister = 4u; using BakerReadBarrierKindField = BitField; using BakerReadBarrierFirstRegField = BitField; using BakerReadBarrierSecondRegField = BitField; static constexpr size_t kBitsForBakerReadBarrierWidth = MinimumBitsToStore(static_cast(BakerReadBarrierWidth::kLast)); using BakerReadBarrierWidthField = BitField; static void CheckValidReg(uint32_t reg) { DCHECK(reg < 12u && reg != kBakerCcEntrypointRegister) << reg; } void CompileBakerReadBarrierThunk(arm::ArmVIXLAssembler& assembler, uint32_t encoded_data); void SetInsn32(std::vector* code, uint32_t offset, uint32_t value); static uint32_t GetInsn32(ArrayRef code, uint32_t offset); template static uint32_t GetInsn32(Vector* code, uint32_t offset); static uint32_t GetInsn16(ArrayRef code, uint32_t offset); template static uint32_t GetInsn16(Vector* code, uint32_t offset); friend class Thumb2RelativePatcherTest; DISALLOW_COPY_AND_ASSIGN(Thumb2RelativePatcher); }; } // namespace linker } // namespace art #endif // ART_COMPILER_LINKER_ARM_RELATIVE_PATCHER_THUMB2_H_ android-platform-art-8.1.0+r23/compiler/linker/arm/relative_patcher_thumb2_test.cc000066400000000000000000001673301336577252300302240ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "base/casts.h" #include "linker/relative_patcher_test.h" #include "linker/arm/relative_patcher_thumb2.h" #include "lock_word.h" #include "mirror/array-inl.h" #include "mirror/object.h" #include "oat_quick_method_header.h" namespace art { namespace linker { class Thumb2RelativePatcherTest : public RelativePatcherTest { public: Thumb2RelativePatcherTest() : RelativePatcherTest(kThumb2, "default") { } protected: static const uint8_t kCallRawCode[]; static const ArrayRef kCallCode; static const uint8_t kNopRawCode[]; static const ArrayRef kNopCode; static const uint8_t kUnpatchedPcRelativeRawCode[]; static const ArrayRef kUnpatchedPcRelativeCode; static const uint32_t kPcInsnOffset; // The PC in Thumb mode is 4 bytes after the instruction location. static constexpr uint32_t kPcAdjustment = 4u; // Branches within range [-256, 256) can be created from these by adding the low 8 bits. static constexpr uint32_t kBlPlus0 = 0xf000f800u; static constexpr uint32_t kBlMinus256 = 0xf7ffff00u; // Special BL values. static constexpr uint32_t kBlPlusMax = 0xf3ffd7ffu; static constexpr uint32_t kBlMinusMax = 0xf400d000u; // BNE +0, 32-bit, encoding T3. Bits 0-10, 11, 13, 16-21, 26 are placeholder for target offset. static constexpr uint32_t kBneWPlus0 = 0xf0408000u; // LDR immediate, 16-bit, encoding T1. Bits 6-10 are imm5, 0-2 are Rt, 3-5 are Rn. static constexpr uint32_t kLdrInsn = 0x6800u; // LDR immediate, 32-bit, encoding T3. Bits 0-11 are offset, 12-15 are Rt, 16-20 are Rn. static constexpr uint32_t kLdrWInsn = 0xf8d00000u; // LDR immediate, negative offset, encoding T4. Bits 0-7 are the offset to subtract. static constexpr uint32_t kLdrNegativeOffset = 0xf8500c00u; // LDR register, lsl #2. Bits 4-5 are the imm2, i.e. the lsl shift. static constexpr uint32_t kLdrRegLsl2 = 0xf8500020u; // NOP instructions. static constexpr uint32_t kNopInsn = 0xbf00u; static constexpr uint32_t kNopWInsn = 0xf3af8000u; void InsertInsn(std::vector* code, size_t pos, uint32_t insn) { CHECK_LE(pos, code->size()); if (IsUint<16>(insn)) { const uint8_t insn_code[] = { static_cast(insn), static_cast(insn >> 8), }; static_assert(sizeof(insn_code) == 2u, "Invalid sizeof(insn_code)."); code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code)); } else { const uint8_t insn_code[] = { static_cast(insn >> 16), static_cast(insn >> 24), static_cast(insn), static_cast(insn >> 8), }; static_assert(sizeof(insn_code) == 4u, "Invalid sizeof(insn_code)."); code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code)); } } void PushBackInsn(std::vector* code, uint32_t insn) { InsertInsn(code, code->size(), insn); } std::vector GenNops(size_t num_nops) { std::vector result; result.reserve(num_nops * 2u); for (size_t i = 0; i != num_nops; ++i) { PushBackInsn(&result, kNopInsn); } return result; } std::vector RawCode(std::initializer_list insns) { std::vector raw_code; size_t number_of_16_bit_insns = std::count_if(insns.begin(), insns.end(), [](uint32_t x) { return IsUint<16>(x); }); raw_code.reserve(insns.size() * 4u - number_of_16_bit_insns * 2u); for (uint32_t insn : insns) { PushBackInsn(&raw_code, insn); } return raw_code; } uint32_t BneWWithOffset(uint32_t bne_offset, uint32_t target_offset) { if (!IsAligned<2u>(bne_offset)) { LOG(ERROR) << "Unaligned bne_offset: " << bne_offset; return 0xffffffffu; // Fails code diff later. } if (!IsAligned<2u>(target_offset)) { LOG(ERROR) << "Unaligned target_offset: " << target_offset; return 0xffffffffu; // Fails code diff later. } uint32_t diff = target_offset - bne_offset - kPcAdjustment; DCHECK_ALIGNED(diff, 2u); if ((diff >> 20) != 0 && (diff >> 20) != 0xfffu) { LOG(ERROR) << "Target out of range: " << diff; return 0xffffffffu; // Fails code diff later. } return kBneWPlus0 | ((diff >> 1) & 0x7ffu) // imm11 | (((diff >> 12) & 0x3fu) << 16) // imm6 | (((diff >> 18) & 1) << 13) // J1 | (((diff >> 19) & 1) << 11) // J2 | (((diff >> 20) & 1) << 26); // S } bool Create2MethodsWithGap(const ArrayRef& method1_code, const ArrayRef& method1_patches, const ArrayRef& method3_code, const ArrayRef& method3_patches, uint32_t distance_without_thunks) { CHECK_EQ(distance_without_thunks % kArmAlignment, 0u); uint32_t method1_offset = kTrampolineSize + CodeAlignmentSize(kTrampolineSize) + sizeof(OatQuickMethodHeader); AddCompiledMethod(MethodRef(1u), method1_code, method1_patches); // We want to put the method3 at a very precise offset. const uint32_t method3_offset = method1_offset + distance_without_thunks; CHECK_ALIGNED(method3_offset, kArmAlignment); // Calculate size of method2 so that we put method3 at the correct place. const uint32_t method1_end = method1_offset + method1_code.size(); const uint32_t method2_offset = method1_end + CodeAlignmentSize(method1_end) + sizeof(OatQuickMethodHeader); const uint32_t method2_size = (method3_offset - sizeof(OatQuickMethodHeader) - method2_offset); std::vector method2_raw_code(method2_size); ArrayRef method2_code(method2_raw_code); AddCompiledMethod(MethodRef(2u), method2_code); AddCompiledMethod(MethodRef(3u), method3_code, method3_patches); Link(); // Check assumptions. CHECK_EQ(GetMethodOffset(1), method1_offset); CHECK_EQ(GetMethodOffset(2), method2_offset); auto result3 = method_offset_map_.FindMethodOffset(MethodRef(3)); CHECK(result3.first); // There may be a thunk before method2. if (result3.second == method3_offset + 1 /* thumb mode */) { return false; // No thunk. } else { uint32_t thunk_end = CompiledCode::AlignCode(method3_offset - sizeof(OatQuickMethodHeader), kThumb2) + MethodCallThunkSize(); uint32_t header_offset = thunk_end + CodeAlignmentSize(thunk_end); CHECK_EQ(result3.second, header_offset + sizeof(OatQuickMethodHeader) + 1 /* thumb mode */); return true; // Thunk present. } } uint32_t GetMethodOffset(uint32_t method_idx) { auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx)); CHECK(result.first); CHECK_NE(result.second & 1u, 0u); return result.second - 1 /* thumb mode */; } std::vector CompileMethodCallThunk() { ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetMethodCallKey(); return static_cast(patcher_.get())->CompileThunk(key); } uint32_t MethodCallThunkSize() { return CompileMethodCallThunk().size(); } bool CheckThunk(uint32_t thunk_offset) { const std::vector expected_code = CompileMethodCallThunk(); if (output_.size() < thunk_offset + expected_code.size()) { LOG(ERROR) << "output_.size() == " << output_.size() << " < " << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size()); return false; } ArrayRef linked_code(&output_[thunk_offset], expected_code.size()); if (linked_code == ArrayRef(expected_code)) { return true; } // Log failure info. DumpDiff(ArrayRef(expected_code), linked_code); return false; } std::vector GenNopsAndBl(size_t num_nops, uint32_t bl) { std::vector result; result.reserve(num_nops * 2u + 4u); for (size_t i = 0; i != num_nops; ++i) { PushBackInsn(&result, kNopInsn); } PushBackInsn(&result, bl); return result; } void TestStringBssEntry(uint32_t bss_begin, uint32_t string_entry_offset); void TestStringReference(uint32_t string_offset); void CheckPcRelativePatch(const ArrayRef& patches, uint32_t target_offset); std::vector CompileBakerOffsetThunk(uint32_t base_reg, uint32_t holder_reg, bool narrow) { const LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData(base_reg, holder_reg, narrow)); ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); return down_cast(patcher_.get())->CompileThunk(key); } std::vector CompileBakerArrayThunk(uint32_t base_reg) { LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)); ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); return down_cast(patcher_.get())->CompileThunk(key); } std::vector CompileBakerGcRootThunk(uint32_t root_reg, bool narrow) { LinkerPatch patch = LinkerPatch::BakerReadBarrierBranchPatch( 0u, Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, narrow)); ArmBaseRelativePatcher::ThunkKey key = ArmBaseRelativePatcher::GetBakerThunkKey(patch); return down_cast(patcher_.get())->CompileThunk(key); } uint32_t GetOutputInsn32(uint32_t offset) { CHECK_LE(offset, output_.size()); CHECK_GE(output_.size() - offset, 4u); return (static_cast(output_[offset]) << 16) | (static_cast(output_[offset + 1]) << 24) | (static_cast(output_[offset + 2]) << 0) | (static_cast(output_[offset + 3]) << 8); } uint16_t GetOutputInsn16(uint32_t offset) { CHECK_LE(offset, output_.size()); CHECK_GE(output_.size() - offset, 2u); return (static_cast(output_[offset]) << 0) | (static_cast(output_[offset + 1]) << 8); } void TestBakerFieldWide(uint32_t offset, uint32_t ref_reg); void TestBakerFieldNarrow(uint32_t offset, uint32_t ref_reg); }; const uint8_t Thumb2RelativePatcherTest::kCallRawCode[] = { 0x00, 0xf0, 0x00, 0xf8 }; const ArrayRef Thumb2RelativePatcherTest::kCallCode(kCallRawCode); const uint8_t Thumb2RelativePatcherTest::kNopRawCode[] = { 0x00, 0xbf }; const ArrayRef Thumb2RelativePatcherTest::kNopCode(kNopRawCode); const uint8_t Thumb2RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = { 0x40, 0xf2, 0x00, 0x00, // MOVW r0, #0 (placeholder) 0xc0, 0xf2, 0x00, 0x00, // MOVT r0, #0 (placeholder) 0x78, 0x44, // ADD r0, pc }; const ArrayRef Thumb2RelativePatcherTest::kUnpatchedPcRelativeCode( kUnpatchedPcRelativeRawCode); const uint32_t Thumb2RelativePatcherTest::kPcInsnOffset = 8u; void Thumb2RelativePatcherTest::TestStringBssEntry(uint32_t bss_begin, uint32_t string_entry_offset) { constexpr uint32_t kStringIndex = 1u; string_index_to_offset_map_.Put(kStringIndex, string_entry_offset); bss_begin_ = bss_begin; const LinkerPatch patches[] = { LinkerPatch::StringBssEntryPatch(0u, nullptr, kPcInsnOffset, kStringIndex), LinkerPatch::StringBssEntryPatch(4u, nullptr, kPcInsnOffset, kStringIndex), }; CheckPcRelativePatch(ArrayRef(patches), bss_begin_ + string_entry_offset); } void Thumb2RelativePatcherTest::TestStringReference(uint32_t string_offset) { constexpr uint32_t kStringIndex = 1u; string_index_to_offset_map_.Put(kStringIndex, string_offset); const LinkerPatch patches[] = { LinkerPatch::RelativeStringPatch(0u, nullptr, kPcInsnOffset, kStringIndex), LinkerPatch::RelativeStringPatch(4u, nullptr, kPcInsnOffset, kStringIndex), }; CheckPcRelativePatch(ArrayRef(patches), string_offset); } void Thumb2RelativePatcherTest::CheckPcRelativePatch(const ArrayRef& patches, uint32_t target_offset) { AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef(patches)); Link(); uint32_t method1_offset = GetMethodOffset(1u); uint32_t pc_base_offset = method1_offset + kPcInsnOffset + 4u /* PC adjustment */; uint32_t diff = target_offset - pc_base_offset; // Distribute the bits of the diff between the MOVW and MOVT: uint32_t diffw = diff & 0xffffu; uint32_t difft = diff >> 16; uint32_t movw = 0xf2400000u | // MOVW r0, #0 (placeholder), ((diffw & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, ((diffw & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, ((diffw & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, ((diffw & 0x00ffu)); // keep imm8 at bits 0-7. uint32_t movt = 0xf2c00000u | // MOVT r0, #0 (placeholder), ((difft & 0xf000u) << (16 - 12)) | // move imm4 from bits 12-15 to bits 16-19, ((difft & 0x0800u) << (26 - 11)) | // move imm from bit 11 to bit 26, ((difft & 0x0700u) << (12 - 8)) | // move imm3 from bits 8-10 to bits 12-14, ((difft & 0x00ffu)); // keep imm8 at bits 0-7. const uint8_t expected_code[] = { static_cast(movw >> 16), static_cast(movw >> 24), static_cast(movw >> 0), static_cast(movw >> 8), static_cast(movt >> 16), static_cast(movt >> 24), static_cast(movt >> 0), static_cast(movt >> 8), 0x78, 0x44, }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); } TEST_F(Thumb2RelativePatcherTest, CallSelf) { const LinkerPatch patches[] = { LinkerPatch::RelativeCodePatch(0u, nullptr, 1u), }; AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef(patches)); Link(); static const uint8_t expected_code[] = { 0xff, 0xf7, 0xfe, 0xff }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); } TEST_F(Thumb2RelativePatcherTest, CallOther) { const LinkerPatch method1_patches[] = { LinkerPatch::RelativeCodePatch(0u, nullptr, 2u), }; AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef(method1_patches)); const LinkerPatch method2_patches[] = { LinkerPatch::RelativeCodePatch(0u, nullptr, 1u), }; AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef(method2_patches)); Link(); uint32_t method1_offset = GetMethodOffset(1u); uint32_t method2_offset = GetMethodOffset(2u); uint32_t diff_after = method2_offset - (method1_offset + 4u /* PC adjustment */); ASSERT_EQ(diff_after & 1u, 0u); ASSERT_LT(diff_after >> 1, 1u << 8); // Simple encoding, (diff_after >> 1) fits into 8 bits. static const uint8_t method1_expected_code[] = { 0x00, 0xf0, static_cast(diff_after >> 1), 0xf8 }; EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(method1_expected_code))); uint32_t diff_before = method1_offset - (method2_offset + 4u /* PC adjustment */); ASSERT_EQ(diff_before & 1u, 0u); ASSERT_GE(diff_before, -1u << 9); // Simple encoding, -256 <= (diff >> 1) < 0. auto method2_expected_code = GenNopsAndBl(0u, kBlMinus256 | ((diff_before >> 1) & 0xffu)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef(method2_expected_code))); } TEST_F(Thumb2RelativePatcherTest, CallTrampoline) { const LinkerPatch patches[] = { LinkerPatch::RelativeCodePatch(0u, nullptr, 2u), }; AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef(patches)); Link(); uint32_t method1_offset = GetMethodOffset(1u); uint32_t diff = kTrampolineOffset - (method1_offset + 4u); ASSERT_EQ(diff & 1u, 0u); ASSERT_GE(diff, -1u << 9); // Simple encoding, -256 <= (diff >> 1) < 0 (checked as unsigned). auto expected_code = GenNopsAndBl(0u, kBlMinus256 | ((diff >> 1) & 0xffu)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); } TEST_F(Thumb2RelativePatcherTest, CallTrampolineTooFar) { constexpr uint32_t missing_method_index = 1024u; auto method3_raw_code = GenNopsAndBl(3u, kBlPlus0); constexpr uint32_t bl_offset_in_method3 = 3u * 2u; // After NOPs. ArrayRef method3_code(method3_raw_code); ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size()); const LinkerPatch method3_patches[] = { LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, missing_method_index), }; constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */; bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef(), method3_code, ArrayRef(method3_patches), just_over_max_negative_disp - bl_offset_in_method3); ASSERT_FALSE(thunk_in_gap); // There should be a thunk but it should be after the method2. ASSERT_FALSE(method_offset_map_.FindMethodOffset(MethodRef(missing_method_index)).first); // Check linked code. uint32_t method3_offset = GetMethodOffset(3u); uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2); uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */); ASSERT_EQ(diff & 1u, 0u); ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits. auto expected_code = GenNopsAndBl(3u, kBlPlus0 | ((diff >> 1) & 0xffu)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef(expected_code))); EXPECT_TRUE(CheckThunk(thunk_offset)); } TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarAfter) { auto method1_raw_code = GenNopsAndBl(3u, kBlPlus0); constexpr uint32_t bl_offset_in_method1 = 3u * 2u; // After NOPs. ArrayRef method1_code(method1_raw_code); ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size()); const LinkerPatch method1_patches[] = { LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, 3u), }; constexpr uint32_t max_positive_disp = 16 * MB - 2u + 4u /* PC adjustment */; bool thunk_in_gap = Create2MethodsWithGap(method1_code, ArrayRef(method1_patches), kNopCode, ArrayRef(), bl_offset_in_method1 + max_positive_disp); ASSERT_FALSE(thunk_in_gap); // There should be no thunk. // Check linked code. auto expected_code = GenNopsAndBl(3u, kBlPlusMax); EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); } TEST_F(Thumb2RelativePatcherTest, CallOtherAlmostTooFarBefore) { auto method3_raw_code = GenNopsAndBl(2u, kBlPlus0); constexpr uint32_t bl_offset_in_method3 = 2u * 2u; // After NOPs. ArrayRef method3_code(method3_raw_code); ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size()); const LinkerPatch method3_patches[] = { LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, 1u), }; constexpr uint32_t just_over_max_negative_disp = 16 * MB - 4u /* PC adjustment */; bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef(), method3_code, ArrayRef(method3_patches), just_over_max_negative_disp - bl_offset_in_method3); ASSERT_FALSE(thunk_in_gap); // There should be no thunk. // Check linked code. auto expected_code = GenNopsAndBl(2u, kBlMinusMax); EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef(expected_code))); } TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarAfter) { auto method1_raw_code = GenNopsAndBl(2u, kBlPlus0); constexpr uint32_t bl_offset_in_method1 = 2u * 2u; // After NOPs. ArrayRef method1_code(method1_raw_code); ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size()); const LinkerPatch method1_patches[] = { LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, 3u), }; constexpr uint32_t just_over_max_positive_disp = 16 * MB + 4u /* PC adjustment */; bool thunk_in_gap = Create2MethodsWithGap(method1_code, ArrayRef(method1_patches), kNopCode, ArrayRef(), bl_offset_in_method1 + just_over_max_positive_disp); ASSERT_TRUE(thunk_in_gap); uint32_t method1_offset = GetMethodOffset(1u); uint32_t method3_offset = GetMethodOffset(3u); ASSERT_TRUE(IsAligned(method3_offset)); uint32_t method3_header_offset = method3_offset - sizeof(OatQuickMethodHeader); uint32_t thunk_size = MethodCallThunkSize(); uint32_t thunk_offset = RoundDown(method3_header_offset - thunk_size, GetInstructionSetAlignment(kThumb2)); DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size), method3_header_offset); ASSERT_TRUE(IsAligned(thunk_offset)); uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1 + 4u /* PC adjustment */); ASSERT_EQ(diff & 1u, 0u); ASSERT_GE(diff, 16 * MB - (1u << 9)); // Simple encoding, unknown bits fit into the low 8 bits. auto expected_code = GenNopsAndBl(2u, 0xf3ffd700 | ((diff >> 1) & 0xffu)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); CheckThunk(thunk_offset); } TEST_F(Thumb2RelativePatcherTest, CallOtherJustTooFarBefore) { auto method3_raw_code = GenNopsAndBl(3u, kBlPlus0); constexpr uint32_t bl_offset_in_method3 = 3u * 2u; // After NOPs. ArrayRef method3_code(method3_raw_code); ASSERT_EQ(bl_offset_in_method3 + 4u, method3_code.size()); const LinkerPatch method3_patches[] = { LinkerPatch::RelativeCodePatch(bl_offset_in_method3, nullptr, 1u), }; constexpr uint32_t just_over_max_negative_disp = 16 * MB + 2 - 4u /* PC adjustment */; bool thunk_in_gap = Create2MethodsWithGap(kNopCode, ArrayRef(), method3_code, ArrayRef(method3_patches), just_over_max_negative_disp - bl_offset_in_method3); ASSERT_FALSE(thunk_in_gap); // There should be a thunk but it should be after the method2. // Check linked code. uint32_t method3_offset = GetMethodOffset(3u); uint32_t thunk_offset = CompiledCode::AlignCode(method3_offset + method3_code.size(), kThumb2); uint32_t diff = thunk_offset - (method3_offset + bl_offset_in_method3 + 4u /* PC adjustment */); ASSERT_EQ(diff & 1u, 0u); ASSERT_LT(diff >> 1, 1u << 8); // Simple encoding, (diff >> 1) fits into 8 bits. auto expected_code = GenNopsAndBl(3u, kBlPlus0 | ((diff >> 1) & 0xffu)); EXPECT_TRUE(CheckLinkedMethod(MethodRef(3u), ArrayRef(expected_code))); EXPECT_TRUE(CheckThunk(thunk_offset)); } TEST_F(Thumb2RelativePatcherTest, StringBssEntry1) { TestStringBssEntry(0x00ff0000u, 0x00fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringBssEntry2) { TestStringBssEntry(0x02ff0000u, 0x05fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringBssEntry3) { TestStringBssEntry(0x08ff0000u, 0x08fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringBssEntry4) { TestStringBssEntry(0xd0ff0000u, 0x60fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringReference1) { TestStringReference(0x00ff00fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringReference2) { TestStringReference(0x02ff05fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringReference3) { TestStringReference(0x08ff08fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } TEST_F(Thumb2RelativePatcherTest, StringReference4) { TestStringReference(0xd0ff60fcu); ASSERT_LT(GetMethodOffset(1u), 0xfcu); } void Thumb2RelativePatcherTest::TestBakerFieldWide(uint32_t offset, uint32_t ref_reg) { uint32_t valid_regs[] = { 0, 1, 2, 3, 5, 6, 7, // R4 is reserved for entrypoint address. 8, 9, 10, 11, // IP, SP, LR and PC are reserved. }; DCHECK_ALIGNED(offset, 4u); DCHECK_LT(offset, 4 * KB); constexpr size_t kMethodCodeSize = 8u; constexpr size_t kLiteralOffset = 0u; uint32_t method_idx = 0u; for (uint32_t base_reg : valid_regs) { for (uint32_t holder_reg : valid_regs) { uint32_t ldr = kLdrWInsn | offset | (base_reg << 16) | (ref_reg << 12); const std::vector raw_code = RawCode({kBneWPlus0, ldr}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef code(raw_code); uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( base_reg, holder_reg, /* narrow */ false); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data), }; ++method_idx; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef(patches)); } } Link(); // All thunks are at the end. uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); method_idx = 0u; for (uint32_t base_reg : valid_regs) { for (uint32_t holder_reg : valid_regs) { ++method_idx; uint32_t bne = BneWWithOffset(GetMethodOffset(method_idx) + kLiteralOffset, thunk_offset); uint32_t ldr = kLdrWInsn | offset | (base_reg << 16) | (ref_reg << 12); const std::vector expected_code = RawCode({bne, ldr}); ASSERT_EQ(kMethodCodeSize, expected_code.size()) << "bne=0x" << std::hex << bne; ASSERT_TRUE( CheckLinkedMethod(MethodRef(method_idx), ArrayRef(expected_code))); std::vector expected_thunk = CompileBakerOffsetThunk(base_reg, holder_reg, /* narrow */ false); ASSERT_GT(output_.size(), thunk_offset); ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); ArrayRef compiled_thunk(output_.data() + thunk_offset, expected_thunk.size()); if (ArrayRef(expected_thunk) != compiled_thunk) { DumpDiff(ArrayRef(expected_thunk), compiled_thunk); ASSERT_TRUE(false); } size_t gray_check_offset = thunk_offset; if (holder_reg == base_reg) { // Verify that the null-check uses the correct register, i.e. holder_reg. if (holder_reg < 8) { ASSERT_GE(output_.size() - gray_check_offset, 2u); ASSERT_EQ(0xb100 | holder_reg, GetOutputInsn16(thunk_offset) & 0xfd07u); gray_check_offset +=2u; } else { ASSERT_GE(output_.size() - gray_check_offset, 6u); ASSERT_EQ(0xf1b00f00u | (holder_reg << 16), GetOutputInsn32(thunk_offset) & 0xfbff8f00u); ASSERT_EQ(0xd000u, GetOutputInsn16(thunk_offset + 4u) & 0xff00u); // BEQ gray_check_offset += 6u; } } // Verify that the lock word for gray bit check is loaded from the holder address. ASSERT_GE(output_.size() - gray_check_offset, 4u * /* 32-bit instructions */ 4u + 2u * /* 16-bit instructions */ 2u); const uint32_t load_lock_word = kLdrWInsn | (holder_reg << 16) | (/* IP */ 12 << 12) | mirror::Object::MonitorOffset().Uint32Value(); ASSERT_EQ(load_lock_word, GetOutputInsn32(gray_check_offset)); // Verify the gray bit check. DCHECK_GE(LockWord::kReadBarrierStateShift, 8u); // ROR modified immediate. uint32_t ror_shift = 7 + (32 - LockWord::kReadBarrierStateShift); const uint32_t tst_gray_bit_without_offset = 0xf0100f00 | (/* IP */ 12 << 16) | (((ror_shift >> 4) & 1) << 26) // i | (((ror_shift >> 1) & 7) << 12) // imm3 | ((ror_shift & 1) << 7); // imm8, ROR('1':imm8<7:0>, ror_shift). EXPECT_EQ(tst_gray_bit_without_offset, GetOutputInsn32(gray_check_offset + 4u)); EXPECT_EQ(0xd100u, GetOutputInsn16(gray_check_offset + 8u) & 0xff00u); // BNE // Verify the fake dependency (skip "ADD LR, LR, #ldr_offset"). const uint32_t fake_dependency = 0xeb000010 | // ADD Rd, Rn, Rm, LSR 32 (type=01, imm3=000, imm2=00) (/* IP */ 12) | // Rm = IP (base_reg << 16) | // Rn = base_reg (base_reg << 8); // Rd = base_reg EXPECT_EQ(fake_dependency, GetOutputInsn32(gray_check_offset + 14u)); // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); } } } void Thumb2RelativePatcherTest::TestBakerFieldNarrow(uint32_t offset, uint32_t ref_reg) { uint32_t valid_regs[] = { 0, 1, 2, 3, 5, 6, 7, // R4 is reserved for entrypoint address. 8, 9, 10, 11, // IP, SP, LR and PC are reserved. }; DCHECK_ALIGNED(offset, 4u); DCHECK_LT(offset, 32u); constexpr size_t kMethodCodeSize = 6u; constexpr size_t kLiteralOffset = 0u; uint32_t method_idx = 0u; for (uint32_t base_reg : valid_regs) { if (base_reg >= 8u) { continue; } for (uint32_t holder_reg : valid_regs) { uint32_t ldr = kLdrInsn | (offset << (6 - 2)) | (base_reg << 3) | ref_reg; const std::vector raw_code = RawCode({kBneWPlus0, ldr}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef code(raw_code); uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( base_reg, holder_reg, /* narrow */ true); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset, encoded_data), }; ++method_idx; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef(patches)); } } Link(); // All thunks are at the end. uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); method_idx = 0u; for (uint32_t base_reg : valid_regs) { if (base_reg >= 8u) { continue; } for (uint32_t holder_reg : valid_regs) { ++method_idx; uint32_t bne = BneWWithOffset(GetMethodOffset(method_idx) + kLiteralOffset, thunk_offset); uint32_t ldr = kLdrInsn | (offset << (6 - 2)) | (base_reg << 3) | ref_reg; const std::vector expected_code = RawCode({bne, ldr}); ASSERT_EQ(kMethodCodeSize, expected_code.size()) << "bne=0x" << std::hex << bne; ASSERT_TRUE( CheckLinkedMethod(MethodRef(method_idx), ArrayRef(expected_code))); std::vector expected_thunk = CompileBakerOffsetThunk(base_reg, holder_reg, /* narrow */ true); ASSERT_GT(output_.size(), thunk_offset); ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); ArrayRef compiled_thunk(output_.data() + thunk_offset, expected_thunk.size()); if (ArrayRef(expected_thunk) != compiled_thunk) { DumpDiff(ArrayRef(expected_thunk), compiled_thunk); ASSERT_TRUE(false); } size_t gray_check_offset = thunk_offset; if (holder_reg == base_reg) { // Verify that the null-check uses the correct register, i.e. holder_reg. if (holder_reg < 8) { ASSERT_GE(output_.size() - gray_check_offset, 2u); ASSERT_EQ(0xb100 | holder_reg, GetOutputInsn16(thunk_offset) & 0xfd07u); gray_check_offset +=2u; } else { ASSERT_GE(output_.size() - gray_check_offset, 6u); ASSERT_EQ(0xf1b00f00u | (holder_reg << 16), GetOutputInsn32(thunk_offset) & 0xfbff8f00u); ASSERT_EQ(0xd000u, GetOutputInsn16(thunk_offset + 4u) & 0xff00u); // BEQ gray_check_offset += 6u; } } // Verify that the lock word for gray bit check is loaded from the holder address. ASSERT_GE(output_.size() - gray_check_offset, 4u * /* 32-bit instructions */ 4u + 2u * /* 16-bit instructions */ 2u); const uint32_t load_lock_word = kLdrWInsn | (holder_reg << 16) | (/* IP */ 12 << 12) | mirror::Object::MonitorOffset().Uint32Value(); ASSERT_EQ(load_lock_word, GetOutputInsn32(gray_check_offset)); // Verify the gray bit check. DCHECK_GE(LockWord::kReadBarrierStateShift, 8u); // ROR modified immediate. uint32_t ror_shift = 7 + (32 - LockWord::kReadBarrierStateShift); const uint32_t tst_gray_bit_without_offset = 0xf0100f00 | (/* IP */ 12 << 16) | (((ror_shift >> 4) & 1) << 26) // i | (((ror_shift >> 1) & 7) << 12) // imm3 | ((ror_shift & 1) << 7); // imm8, ROR('1':imm8<7:0>, ror_shift). EXPECT_EQ(tst_gray_bit_without_offset, GetOutputInsn32(gray_check_offset + 4u)); EXPECT_EQ(0xd100u, GetOutputInsn16(gray_check_offset + 8u) & 0xff00u); // BNE // Verify the fake dependency (skip "ADD LR, LR, #ldr_offset"). const uint32_t fake_dependency = 0xeb000010 | // ADD Rd, Rn, Rm, LSR 32 (type=01, imm3=000, imm2=00) (/* IP */ 12) | // Rm = IP (base_reg << 16) | // Rn = base_reg (base_reg << 8); // Rd = base_reg EXPECT_EQ(fake_dependency, GetOutputInsn32(gray_check_offset + 14u)); // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); } } } #define TEST_BAKER_FIELD_WIDE(offset, ref_reg) \ TEST_F(Thumb2RelativePatcherTest, \ BakerOffsetWide##offset##_##ref_reg) { \ TestBakerFieldWide(offset, ref_reg); \ } TEST_BAKER_FIELD_WIDE(/* offset */ 0, /* ref_reg */ 0) TEST_BAKER_FIELD_WIDE(/* offset */ 8, /* ref_reg */ 3) TEST_BAKER_FIELD_WIDE(/* offset */ 28, /* ref_reg */ 7) TEST_BAKER_FIELD_WIDE(/* offset */ 0xffc, /* ref_reg */ 11) #define TEST_BAKER_FIELD_NARROW(offset, ref_reg) \ TEST_F(Thumb2RelativePatcherTest, \ BakerOffsetNarrow##offset##_##ref_reg) { \ TestBakerFieldNarrow(offset, ref_reg); \ } TEST_BAKER_FIELD_NARROW(/* offset */ 0, /* ref_reg */ 0) TEST_BAKER_FIELD_NARROW(/* offset */ 8, /* ref_reg */ 3) TEST_BAKER_FIELD_NARROW(/* offset */ 28, /* ref_reg */ 7) TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddle) { // One thunk in the middle with maximum distance branches to it from both sides. // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`. constexpr uint32_t kLiteralOffset1 = 6u; const std::vector raw_code1 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn}); ArrayRef code1(raw_code1); uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( /* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), }; AddCompiledMethod(MethodRef(1u), code1, ArrayRef(patches1)); constexpr uint32_t expected_thunk_offset = kLiteralOffset1 + kPcAdjustment + /* kMaxBcondPositiveDisplacement */ ((1 << 20) - 2u); static_assert(IsAligned(expected_thunk_offset), "Target offset must be aligned."); size_t filler1_size = expected_thunk_offset - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment); std::vector raw_filler1_code = GenNops(filler1_size / 2u); ArrayRef filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); // Enforce thunk reservation with a tiny method. AddCompiledMethod(MethodRef(3u), kNopCode); constexpr uint32_t kLiteralOffset2 = 4; static_assert(IsAligned(kLiteralOffset2 + kPcAdjustment), "PC for BNE must be aligned."); // Allow reaching the thunk from the very beginning of a method almost 1MiB away. Backward branch // reaches the full 1MiB but we need to take PC adjustment into account. Things to subtract: // - thunk size and method 3 pre-header, rounded up (padding in between if needed) // - method 3 code and method 4 pre-header, rounded up (padding in between if needed) // - method 4 header (let there be no padding between method 4 code and method 5 pre-header). size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false).size(); size_t filler2_size = 1 * MB - (kLiteralOffset2 + kPcAdjustment) - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArmAlignment) - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArmAlignment) - sizeof(OatQuickMethodHeader); std::vector raw_filler2_code = GenNops(filler2_size / 2u); ArrayRef filler2_code(raw_filler2_code); AddCompiledMethod(MethodRef(4u), filler2_code); const std::vector raw_code2 = RawCode({kNopWInsn, kBneWPlus0, kLdrWInsn}); ArrayRef code2(raw_code2); const LinkerPatch patches2[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset2, encoded_data), }; AddCompiledMethod(MethodRef(5u), code2, ArrayRef(patches2)); Link(); uint32_t first_method_offset = GetMethodOffset(1u); uint32_t last_method_offset = GetMethodOffset(5u); EXPECT_EQ(2 * MB, last_method_offset - first_method_offset); const uint32_t bne_max_forward = kBneWPlus0 | 0x003f2fff; const uint32_t bne_max_backward = kBneWPlus0 | 0x04000000; const std::vector expected_code1 = RawCode({kNopWInsn, kNopInsn, bne_max_forward, kLdrWInsn}); const std::vector expected_code2 = RawCode({kNopWInsn, bne_max_backward, kLdrWInsn}); ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef(expected_code1))); ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef(expected_code2))); } TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkBeforeFiller) { // Based on the first part of BakerOffsetThunkInTheMiddle but the BNE is one instruction // earlier, so the thunk is emitted before the filler. // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`. constexpr uint32_t kLiteralOffset1 = 4u; const std::vector raw_code1 = RawCode({kNopWInsn, kBneWPlus0, kLdrWInsn, kNopInsn}); ArrayRef code1(raw_code1); uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( /* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), }; AddCompiledMethod(MethodRef(1u), code1, ArrayRef(patches1)); constexpr uint32_t expected_thunk_offset = kLiteralOffset1 + kPcAdjustment + /* kMaxBcondPositiveDisplacement + 2 */ (1u << 20); static_assert(IsAligned(expected_thunk_offset), "Target offset must be aligned."); size_t filler1_size = expected_thunk_offset - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment); std::vector raw_filler1_code = GenNops(filler1_size / 2u); ArrayRef filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); Link(); const uint32_t bne = BneWWithOffset(kLiteralOffset1, RoundUp(raw_code1.size(), kArmAlignment)); const std::vector expected_code1 = RawCode({kNopWInsn, bne, kLdrWInsn, kNopInsn}); ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef(expected_code1))); } TEST_F(Thumb2RelativePatcherTest, BakerOffsetThunkInTheMiddleUnreachableFromLast) { // Based on the BakerOffsetThunkInTheMiddle but the BNE in the last method is preceded // by NOP and cannot reach the thunk in the middle, so we emit an extra thunk at the end. // Use offset = 0, base_reg = 0, ref_reg = 0, the LDR is simply `kLdrWInsn`. constexpr uint32_t kLiteralOffset1 = 6u; const std::vector raw_code1 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn}); ArrayRef code1(raw_code1); uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierFieldData( /* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false); const LinkerPatch patches1[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset1, encoded_data), }; AddCompiledMethod(MethodRef(1u), code1, ArrayRef(patches1)); constexpr uint32_t expected_thunk_offset = kLiteralOffset1 + kPcAdjustment + /* kMaxBcondPositiveDisplacement */ ((1 << 20) - 2u); static_assert(IsAligned(expected_thunk_offset), "Target offset must be aligned."); size_t filler1_size = expected_thunk_offset - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment); std::vector raw_filler1_code = GenNops(filler1_size / 2u); ArrayRef filler1_code(raw_filler1_code); AddCompiledMethod(MethodRef(2u), filler1_code); // Enforce thunk reservation with a tiny method. AddCompiledMethod(MethodRef(3u), kNopCode); constexpr uint32_t kReachableFromOffset2 = 4; constexpr uint32_t kLiteralOffset2 = kReachableFromOffset2 + 2; static_assert(IsAligned(kReachableFromOffset2 + kPcAdjustment), "PC for BNE must be aligned."); // If not for the extra NOP, this would allow reaching the thunk from the BNE // of a method 1MiB away. Backward branch reaches the full 1MiB but we need to take // PC adjustment into account. Things to subtract: // - thunk size and method 3 pre-header, rounded up (padding in between if needed) // - method 3 code and method 4 pre-header, rounded up (padding in between if needed) // - method 4 header (let there be no padding between method 4 code and method 5 pre-header). size_t thunk_size = CompileBakerOffsetThunk(/* base_reg */ 0, /* holder_reg */ 0, /* narrow */ false).size(); size_t filler2_size = 1 * MB - (kReachableFromOffset2 + kPcAdjustment) - RoundUp(thunk_size + sizeof(OatQuickMethodHeader), kArmAlignment) - RoundUp(kNopCode.size() + sizeof(OatQuickMethodHeader), kArmAlignment) - sizeof(OatQuickMethodHeader); std::vector raw_filler2_code = GenNops(filler2_size / 2u); ArrayRef filler2_code(raw_filler2_code); AddCompiledMethod(MethodRef(4u), filler2_code); // Extra 16-bit NOP compared to BakerOffsetThunkInTheMiddle. const std::vector raw_code2 = RawCode({kNopWInsn, kNopInsn, kBneWPlus0, kLdrWInsn}); ArrayRef code2(raw_code2); const LinkerPatch patches2[] = { LinkerPatch::BakerReadBarrierBranchPatch(kLiteralOffset2, encoded_data), }; AddCompiledMethod(MethodRef(5u), code2, ArrayRef(patches2)); Link(); uint32_t first_method_offset = GetMethodOffset(1u); uint32_t last_method_offset = GetMethodOffset(5u); EXPECT_EQ(2 * MB, last_method_offset - first_method_offset); const uint32_t bne_max_forward = kBneWPlus0 | 0x003f2fff; const uint32_t bne_last = BneWWithOffset(kLiteralOffset2, RoundUp(raw_code2.size(), kArmAlignment)); const std::vector expected_code1 = RawCode({kNopWInsn, kNopInsn, bne_max_forward, kLdrWInsn}); const std::vector expected_code2 = RawCode({kNopWInsn, kNopInsn, bne_last, kLdrWInsn}); ASSERT_TRUE(CheckLinkedMethod(MethodRef(1), ArrayRef(expected_code1))); ASSERT_TRUE(CheckLinkedMethod(MethodRef(5), ArrayRef(expected_code2))); } TEST_F(Thumb2RelativePatcherTest, BakerArray) { uint32_t valid_regs[] = { 0, 1, 2, 3, 5, 6, 7, // R4 is reserved for entrypoint address. 8, 9, 10, 11, // IP, SP, LR and PC are reserved. }; auto ldr = [](uint32_t base_reg) { uint32_t index_reg = (base_reg == 0u) ? 1u : 0u; uint32_t ref_reg = (base_reg == 2) ? 3u : 2u; return kLdrRegLsl2 | index_reg | (base_reg << 16) | (ref_reg << 12); }; constexpr size_t kMethodCodeSize = 8u; constexpr size_t kLiteralOffset = 0u; uint32_t method_idx = 0u; for (uint32_t base_reg : valid_regs) { ++method_idx; const std::vector raw_code = RawCode({kBneWPlus0, ldr(base_reg)}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( kLiteralOffset, Thumb2RelativePatcher::EncodeBakerReadBarrierArrayData(base_reg)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef(patches)); } Link(); // All thunks are at the end. uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); method_idx = 0u; for (uint32_t base_reg : valid_regs) { ++method_idx; uint32_t bne = BneWWithOffset(GetMethodOffset(method_idx) + kLiteralOffset, thunk_offset); const std::vector expected_code = RawCode({bne, ldr(base_reg)}); ASSERT_EQ(kMethodCodeSize, expected_code.size()); EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef(expected_code))); std::vector expected_thunk = CompileBakerArrayThunk(base_reg); ASSERT_GT(output_.size(), thunk_offset); ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); ArrayRef compiled_thunk(output_.data() + thunk_offset, expected_thunk.size()); if (ArrayRef(expected_thunk) != compiled_thunk) { DumpDiff(ArrayRef(expected_thunk), compiled_thunk); ASSERT_TRUE(false); } // Verify that the lock word for gray bit check is loaded from the correct address // before the base_reg which points to the array data. ASSERT_GE(output_.size() - thunk_offset, 4u * /* 32-bit instructions */ 4u + 2u * /* 16-bit instructions */ 2u); int32_t data_offset = mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); int32_t offset = mirror::Object::MonitorOffset().Int32Value() - data_offset; ASSERT_LT(offset, 0); ASSERT_GT(offset, -256); const uint32_t load_lock_word = kLdrNegativeOffset | (-offset & 0xffu) | (base_reg << 16) | (/* IP */ 12 << 12); EXPECT_EQ(load_lock_word, GetOutputInsn32(thunk_offset)); // Verify the gray bit check. DCHECK_GE(LockWord::kReadBarrierStateShift, 8u); // ROR modified immediate. uint32_t ror_shift = 7 + (32 - LockWord::kReadBarrierStateShift); const uint32_t tst_gray_bit_without_offset = 0xf0100f00 | (/* IP */ 12 << 16) | (((ror_shift >> 4) & 1) << 26) // i | (((ror_shift >> 1) & 7) << 12) // imm3 | ((ror_shift & 1) << 7); // imm8, ROR('1':imm8<7:0>, ror_shift). EXPECT_EQ(tst_gray_bit_without_offset, GetOutputInsn32(thunk_offset + 4u)); EXPECT_EQ(0xd100u, GetOutputInsn16(thunk_offset + 8u) & 0xff00u); // BNE // Verify the fake dependency. const uint32_t fake_dependency = 0xeb000010 | // ADD Rd, Rn, Rm, LSR 32 (type=01, imm3=000, imm2=00) (/* IP */ 12) | // Rm = IP (base_reg << 16) | // Rn = base_reg (base_reg << 8); // Rd = base_reg EXPECT_EQ(fake_dependency, GetOutputInsn32(thunk_offset + 14u)); // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); } } TEST_F(Thumb2RelativePatcherTest, BakerGcRootWide) { uint32_t valid_regs[] = { 0, 1, 2, 3, 5, 6, 7, // R4 is reserved for entrypoint address. 8, 9, 10, 11, // IP, SP, LR and PC are reserved. }; constexpr size_t kMethodCodeSize = 8u; constexpr size_t kLiteralOffset = 4u; uint32_t method_idx = 0u; for (uint32_t root_reg : valid_regs) { ++method_idx; uint32_t ldr = kLdrWInsn | (/* offset */ 8) | (/* base_reg */ 0 << 16) | (root_reg << 12); const std::vector raw_code = RawCode({ldr, kBneWPlus0}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( kLiteralOffset, Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ false)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef(patches)); } Link(); // All thunks are at the end. uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); method_idx = 0u; for (uint32_t root_reg : valid_regs) { ++method_idx; uint32_t bne = BneWWithOffset(GetMethodOffset(method_idx) + kLiteralOffset, thunk_offset); uint32_t ldr = kLdrWInsn | (/* offset */ 8) | (/* base_reg */ 0 << 16) | (root_reg << 12); const std::vector expected_code = RawCode({ldr, bne}); ASSERT_EQ(kMethodCodeSize, expected_code.size()); EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef(expected_code))); std::vector expected_thunk = CompileBakerGcRootThunk(root_reg, /* narrow */ false); ASSERT_GT(output_.size(), thunk_offset); ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); ArrayRef compiled_thunk(output_.data() + thunk_offset, expected_thunk.size()); if (ArrayRef(expected_thunk) != compiled_thunk) { DumpDiff(ArrayRef(expected_thunk), compiled_thunk); ASSERT_TRUE(false); } // Verify that the fast-path null-check uses the correct register, i.e. root_reg. if (root_reg < 8) { ASSERT_GE(output_.size() - thunk_offset, 2u); ASSERT_EQ(0xb100 | root_reg, GetOutputInsn16(thunk_offset) & 0xfd07u); } else { ASSERT_GE(output_.size() - thunk_offset, 6u); ASSERT_EQ(0xf1b00f00u | (root_reg << 16), GetOutputInsn32(thunk_offset) & 0xfbff8f00u); ASSERT_EQ(0xd000u, GetOutputInsn16(thunk_offset + 4u) & 0xff00u); // BEQ } // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); } } TEST_F(Thumb2RelativePatcherTest, BakerGcRootNarrow) { uint32_t valid_regs[] = { 0, 1, 2, 3, 5, 6, 7, // R4 is reserved for entrypoint address. // Not appplicable to high registers. }; constexpr size_t kMethodCodeSize = 6u; constexpr size_t kLiteralOffset = 2u; uint32_t method_idx = 0u; for (uint32_t root_reg : valid_regs) { ++method_idx; uint32_t ldr = kLdrInsn | (/* offset */ 8 << (6 - 2)) | (/* base_reg */ 0 << 3) | root_reg; const std::vector raw_code = RawCode({ldr, kBneWPlus0}); ASSERT_EQ(kMethodCodeSize, raw_code.size()); ArrayRef code(raw_code); const LinkerPatch patches[] = { LinkerPatch::BakerReadBarrierBranchPatch( kLiteralOffset, Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(root_reg, /* narrow */ true)), }; AddCompiledMethod(MethodRef(method_idx), code, ArrayRef(patches)); } Link(); // All thunks are at the end. uint32_t thunk_offset = GetMethodOffset(method_idx) + RoundUp(kMethodCodeSize, kArmAlignment); method_idx = 0u; for (uint32_t root_reg : valid_regs) { ++method_idx; uint32_t bne = BneWWithOffset(GetMethodOffset(method_idx) + kLiteralOffset, thunk_offset); uint32_t ldr = kLdrInsn | (/* offset */ 8 << (6 - 2)) | (/* base_reg */ 0 << 3) | root_reg; const std::vector expected_code = RawCode({ldr, bne}); ASSERT_EQ(kMethodCodeSize, expected_code.size()); EXPECT_TRUE(CheckLinkedMethod(MethodRef(method_idx), ArrayRef(expected_code))); std::vector expected_thunk = CompileBakerGcRootThunk(root_reg, /* narrow */ true); ASSERT_GT(output_.size(), thunk_offset); ASSERT_GE(output_.size() - thunk_offset, expected_thunk.size()); ArrayRef compiled_thunk(output_.data() + thunk_offset, expected_thunk.size()); if (ArrayRef(expected_thunk) != compiled_thunk) { DumpDiff(ArrayRef(expected_thunk), compiled_thunk); ASSERT_TRUE(false); } // Verify that the fast-path null-check CBZ uses the correct register, i.e. root_reg. ASSERT_GE(output_.size() - thunk_offset, 2u); ASSERT_EQ(0xb100 | root_reg, GetOutputInsn16(thunk_offset) & 0xfd07u); // Do not check the rest of the implementation. // The next thunk follows on the next aligned offset. thunk_offset += RoundUp(expected_thunk.size(), kArmAlignment); } } TEST_F(Thumb2RelativePatcherTest, BakerGcRootOffsetBits) { // Test 1MiB of patches to the same thunk to stress-test different large offsets. // (The low bits are not that important but the location of the high bits is easy to get wrong.) std::vector code; code.reserve(1 * MB); const size_t num_patches = 1 * MB / 8u; std::vector patches; patches.reserve(num_patches); const uint32_t ldr = kLdrWInsn | (/* offset */ 8) | (/* base_reg */ 0 << 16) | (/* root_reg */ 0 << 12); uint32_t encoded_data = Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 0, /* narrow */ false); for (size_t i = 0; i != num_patches; ++i) { PushBackInsn(&code, ldr); PushBackInsn(&code, kBneWPlus0); patches.push_back(LinkerPatch::BakerReadBarrierBranchPatch(8u * i + 4u, encoded_data)); } ASSERT_EQ(1 * MB, code.size()); ASSERT_EQ(num_patches, patches.size()); AddCompiledMethod(MethodRef(1u), ArrayRef(code), ArrayRef(patches)); Link(); // The thunk is right after the method code. DCHECK_ALIGNED(1 * MB, kArmAlignment); std::vector expected_code; for (size_t i = 0; i != num_patches; ++i) { PushBackInsn(&expected_code, ldr); PushBackInsn(&expected_code, BneWWithOffset(8u * i + 4u, 1 * MB)); patches.push_back(LinkerPatch::BakerReadBarrierBranchPatch(8u * i + 4u, encoded_data)); } EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef(expected_code))); } TEST_F(Thumb2RelativePatcherTest, BakerAndMethodCallInteraction) { // During development, there was a `DCHECK_LE(MaxNextOffset(), next_thunk.MaxNextOffset());` // in `ArmBaseRelativePatcher::ThunkData::MakeSpaceBefore()` which does not necessarily // hold when we're reserving thunks of different sizes. This test exposes the situation // by using Baker thunks and a method call thunk. // Add a method call patch that can reach to method 1 offset + 16MiB. uint32_t method_idx = 0u; constexpr size_t kMethodCallLiteralOffset = 2u; constexpr uint32_t kMissingMethodIdx = 2u; const std::vector raw_code1 = RawCode({kNopInsn, kBlPlus0}); const LinkerPatch method1_patches[] = { LinkerPatch::RelativeCodePatch(kMethodCallLiteralOffset, nullptr, 2u), }; ArrayRef code1(raw_code1); ++method_idx; AddCompiledMethod(MethodRef(1u), code1, ArrayRef(method1_patches)); // Skip kMissingMethodIdx. ++method_idx; ASSERT_EQ(kMissingMethodIdx, method_idx); // Add a method with the right size that the method code for the next one starts 1MiB // after code for method 1. size_t filler_size = 1 * MB - RoundUp(raw_code1.size() + sizeof(OatQuickMethodHeader), kArmAlignment) - sizeof(OatQuickMethodHeader); std::vector filler_code = GenNops(filler_size / 2u); ++method_idx; AddCompiledMethod(MethodRef(method_idx), ArrayRef(filler_code)); // Add 14 methods with 1MiB code+header, making the code for the next method start 1MiB // before the currently scheduled MaxNextOffset() for the method call thunk. for (uint32_t i = 0; i != 14; ++i) { filler_size = 1 * MB - sizeof(OatQuickMethodHeader); filler_code = GenNops(filler_size / 2u); ++method_idx; AddCompiledMethod(MethodRef(method_idx), ArrayRef(filler_code)); } // Add 2 Baker GC root patches to the last method, one that would allow the thunk at // 1MiB + kArmAlignment, i.e. kArmAlignment after the method call thunk, and the // second that needs it kArmAlignment after that. Given the size of the GC root thunk // is more than the space required by the method call thunk plus kArmAlignment, // this pushes the first GC root thunk's pending MaxNextOffset() before the method call // thunk's pending MaxNextOffset() which needs to be adjusted. ASSERT_LT(RoundUp(CompileMethodCallThunk().size(), kArmAlignment) + kArmAlignment, CompileBakerGcRootThunk(/* root_reg */ 0, /* narrow */ false).size()); static_assert(kArmAlignment == 8, "Code below assumes kArmAlignment == 8"); constexpr size_t kBakerLiteralOffset1 = kArmAlignment + 2u - kPcAdjustment; constexpr size_t kBakerLiteralOffset2 = kBakerLiteralOffset1 + kArmAlignment; // Use offset = 0, base_reg = 0, the LDR is simply `kLdrWInsn | (root_reg << 12)`. const uint32_t ldr1 = kLdrWInsn | (/* root_reg */ 1 << 12); const uint32_t ldr2 = kLdrWInsn | (/* root_reg */ 2 << 12); const std::vector last_method_raw_code = RawCode({ kNopInsn, // Padding before first GC root read barrier. ldr1, kBneWPlus0, // First GC root LDR with read barrier. ldr2, kBneWPlus0, // Second GC root LDR with read barrier. }); uint32_t encoded_data1 = Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 1, /* narrow */ false); uint32_t encoded_data2 = Thumb2RelativePatcher::EncodeBakerReadBarrierGcRootData(/* root_reg */ 2, /* narrow */ false); const LinkerPatch last_method_patches[] = { LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset1, encoded_data1), LinkerPatch::BakerReadBarrierBranchPatch(kBakerLiteralOffset2, encoded_data2), }; ++method_idx; AddCompiledMethod(MethodRef(method_idx), ArrayRef(last_method_raw_code), ArrayRef(last_method_patches)); // The main purpose of the test is to check that Link() does not cause a crash. Link(); ASSERT_EQ(15 * MB, GetMethodOffset(method_idx) - GetMethodOffset(1u)); } } // namespace linker } // namespace art android-platform-art-8.1.0+r23/compiler/linker/arm64/000077500000000000000000000000001336577252300222145ustar00rootroot00000000000000android-platform-art-8.1.0+r23/compiler/linker/arm64/relative_patcher_arm64.cc000066400000000000000000000721061336577252300270630ustar00rootroot00000000000000/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "linker/arm64/relative_patcher_arm64.h" #include "arch/arm64/asm_support_arm64.h" #include "arch/arm64/instruction_set_features_arm64.h" #include "art_method.h" #include "base/bit_utils.h" #include "compiled_method.h" #include "driver/compiler_driver.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "linker/output_stream.h" #include "lock_word.h" #include "mirror/object.h" #include "mirror/array-inl.h" #include "oat.h" #include "oat_quick_method_header.h" #include "read_barrier.h" #include "utils/arm64/assembler_arm64.h" namespace art { namespace linker { namespace { // Maximum positive and negative displacement for method call measured from the patch location. // (Signed 28 bit displacement with the last two bits 0 has range [-2^27, 2^27-4] measured from // the ARM64 PC pointing to the BL.) constexpr uint32_t kMaxMethodCallPositiveDisplacement = (1u << 27) - 4u; constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 27); // Maximum positive and negative displacement for a conditional branch measured from the patch // location. (Signed 21 bit displacement with the last two bits 0 has range [-2^20, 2^20-4] // measured from the ARM64 PC pointing to the B.cond.) constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 4u; constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20); // The ADRP thunk for erratum 843419 is 2 instructions, i.e. 8 bytes. constexpr uint32_t kAdrpThunkSize = 8u; inline bool IsAdrpPatch(const LinkerPatch& patch) { switch (patch.GetType()) { case LinkerPatch::Type::kCall: case LinkerPatch::Type::kCallRelative: case LinkerPatch::Type::kBakerReadBarrierBranch: return false; case LinkerPatch::Type::kMethodRelative: case LinkerPatch::Type::kMethodBssEntry: case LinkerPatch::Type::kTypeRelative: case LinkerPatch::Type::kTypeBssEntry: case LinkerPatch::Type::kStringRelative: case LinkerPatch::Type::kStringBssEntry: return patch.LiteralOffset() == patch.PcInsnOffset(); } } inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) { if (num_adrp == 0u) { return 0u; } uint32_t alignment_bytes = CompiledMethod::AlignCode(code_size, kArm64) - code_size; return kAdrpThunkSize * num_adrp + alignment_bytes; } } // anonymous namespace Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider, const Arm64InstructionSetFeatures* features) : ArmBaseRelativePatcher(provider, kArm64), fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()), reserved_adrp_thunks_(0u), processed_adrp_thunks_(0u) { if (fix_cortex_a53_843419_) { adrp_thunk_locations_.reserve(16u); current_method_thunks_.reserve(16u * kAdrpThunkSize); } } uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method, MethodReference method_ref) { if (!fix_cortex_a53_843419_) { DCHECK(adrp_thunk_locations_.empty()); return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u); } // Add thunks for previous method if any. if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; reserved_adrp_thunks_ = adrp_thunk_locations_.size(); } // Count the number of ADRP insns as the upper bound on the number of thunks needed // and use it to reserve space for other linker patches. size_t num_adrp = 0u; DCHECK(compiled_method != nullptr); for (const LinkerPatch& patch : compiled_method->GetPatches()) { if (IsAdrpPatch(patch)) { ++num_adrp; } } ArrayRef code = compiled_method->GetQuickCode(); uint32_t max_extra_space = MaxExtraSpace(num_adrp, code.size()); offset = ReserveSpaceInternal(offset, compiled_method, method_ref, max_extra_space); if (num_adrp == 0u) { return offset; } // Now that we have the actual offset where the code will be placed, locate the ADRP insns // that actually require the thunk. uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader)); uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size()); DCHECK(compiled_method != nullptr); for (const LinkerPatch& patch : compiled_method->GetPatches()) { if (IsAdrpPatch(patch)) { uint32_t patch_offset = quick_code_offset + patch.LiteralOffset(); if (NeedsErratum843419Thunk(code, patch.LiteralOffset(), patch_offset)) { adrp_thunk_locations_.emplace_back(patch_offset, thunk_offset); thunk_offset += kAdrpThunkSize; } } } return offset; } uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) { if (!fix_cortex_a53_843419_) { DCHECK(adrp_thunk_locations_.empty()); } else { // Add thunks for the last method if any. if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) { size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_; offset = CompiledMethod::AlignCode(offset, kArm64) + kAdrpThunkSize * num_adrp_thunks; reserved_adrp_thunks_ = adrp_thunk_locations_.size(); } } return ArmBaseRelativePatcher::ReserveSpaceEnd(offset); } uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) { if (fix_cortex_a53_843419_) { if (!current_method_thunks_.empty()) { uint32_t aligned_offset = CompiledMethod::AlignCode(offset, kArm64); if (kIsDebugBuild) { CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; CHECK_LE(num_thunks, processed_adrp_thunks_); for (size_t i = 0u; i != num_thunks; ++i) { const auto& entry = adrp_thunk_locations_[processed_adrp_thunks_ - num_thunks + i]; CHECK_EQ(entry.second, aligned_offset + i * kAdrpThunkSize); } } uint32_t aligned_code_delta = aligned_offset - offset; if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) { return 0u; } if (!WriteMiscThunk(out, ArrayRef(current_method_thunks_))) { return 0u; } offset = aligned_offset + current_method_thunks_.size(); current_method_thunks_.clear(); } } return ArmBaseRelativePatcher::WriteThunks(out, offset); } void Arm64RelativePatcher::PatchCall(std::vector* code, uint32_t literal_offset, uint32_t patch_offset, uint32_t target_offset) { DCHECK_LE(literal_offset + 4u, code->size()); DCHECK_EQ(literal_offset & 3u, 0u); DCHECK_EQ(patch_offset & 3u, 0u); DCHECK_EQ(target_offset & 3u, 0u); uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u); DCHECK_EQ(displacement & 3u, 0u); DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u); // 28-bit signed. uint32_t insn = (displacement & 0x0fffffffu) >> 2; insn |= 0x94000000; // BL // Check that we're just overwriting an existing BL. DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u); // Write the new BL. SetInsn(code, literal_offset, insn); } void Arm64RelativePatcher::PatchPcRelativeReference(std::vector* code, const LinkerPatch& patch, uint32_t patch_offset, uint32_t target_offset) { DCHECK_EQ(patch_offset & 3u, 0u); DCHECK_EQ(target_offset & 3u, 0u); uint32_t literal_offset = patch.LiteralOffset(); uint32_t insn = GetInsn(code, literal_offset); uint32_t pc_insn_offset = patch.PcInsnOffset(); uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu); bool wide = (insn & 0x40000000) != 0; uint32_t shift = wide ? 3u : 2u; if (literal_offset == pc_insn_offset) { // Check it's an ADRP with imm == 0 (unset). DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u) << literal_offset << ", " << pc_insn_offset << ", 0x" << std::hex << insn; if (fix_cortex_a53_843419_ && processed_adrp_thunks_ != adrp_thunk_locations_.size() && adrp_thunk_locations_[processed_adrp_thunks_].first == patch_offset) { DCHECK(NeedsErratum843419Thunk(ArrayRef(*code), literal_offset, patch_offset)); uint32_t thunk_offset = adrp_thunk_locations_[processed_adrp_thunks_].second; uint32_t adrp_disp = target_offset - (thunk_offset & ~0xfffu); uint32_t adrp = PatchAdrp(insn, adrp_disp); uint32_t out_disp = thunk_offset - patch_offset; DCHECK_EQ(out_disp & 3u, 0u); DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed. insn = (out_disp & 0x0fffffffu) >> shift; insn |= 0x14000000; // B uint32_t back_disp = -out_disp; DCHECK_EQ(back_disp & 3u, 0u); DCHECK((back_disp >> 27) == 0u || (back_disp >> 27) == 31u); // 28-bit signed. uint32_t b_back = (back_disp & 0x0fffffffu) >> 2; b_back |= 0x14000000; // B size_t thunks_code_offset = current_method_thunks_.size(); current_method_thunks_.resize(thunks_code_offset + kAdrpThunkSize); SetInsn(¤t_method_thunks_, thunks_code_offset, adrp); SetInsn(¤t_method_thunks_, thunks_code_offset + 4u, b_back); static_assert(kAdrpThunkSize == 2 * 4u, "thunk has 2 instructions"); processed_adrp_thunks_ += 1u; } else { insn = PatchAdrp(insn, disp); } // Write the new ADRP (or B to the erratum 843419 thunk). SetInsn(code, literal_offset, insn); } else { if ((insn & 0xfffffc00) == 0x91000000) { // ADD immediate, 64-bit with imm12 == 0 (unset). if (!kEmitCompilerReadBarrier) { DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative || patch.GetType() == LinkerPatch::Type::kTypeRelative || patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType(); } else { // With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry. DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative || patch.GetType() == LinkerPatch::Type::kTypeRelative || patch.GetType() == LinkerPatch::Type::kStringRelative || patch.GetType() == LinkerPatch::Type::kTypeBssEntry || patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType(); } shift = 0u; // No shift for ADD. } else { // LDR/STR 32-bit or 64-bit with imm12 == 0 (unset). DCHECK(patch.GetType() == LinkerPatch::Type::kMethodBssEntry || patch.GetType() == LinkerPatch::Type::kTypeBssEntry || patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType(); DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn; } if (kIsDebugBuild) { uint32_t adrp = GetInsn(code, pc_insn_offset); if ((adrp & 0x9f000000u) != 0x90000000u) { CHECK(fix_cortex_a53_843419_); CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize); size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize; CHECK_LE(num_thunks, processed_adrp_thunks_); uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset; for (size_t i = processed_adrp_thunks_ - num_thunks; ; ++i) { CHECK_NE(i, processed_adrp_thunks_); if (adrp_thunk_locations_[i].first == b_offset) { size_t idx = num_thunks - (processed_adrp_thunks_ - i); adrp = GetInsn(¤t_method_thunks_, idx * kAdrpThunkSize); break; } } } CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points 0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register. } uint32_t imm12 = (disp & 0xfffu) >> shift; insn = (insn & ~(0xfffu << 10)) | (imm12 << 10); SetInsn(code, literal_offset, insn); } } void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector* code, const LinkerPatch& patch, uint32_t patch_offset) { DCHECK_ALIGNED(patch_offset, 4u); uint32_t literal_offset = patch.LiteralOffset(); DCHECK_ALIGNED(literal_offset, 4u); DCHECK_LT(literal_offset, code->size()); uint32_t insn = GetInsn(code, literal_offset); DCHECK_EQ(insn & 0xffffffe0u, 0xb5000000); // CBNZ Xt, +0 (unpatched) ThunkKey key = GetBakerThunkKey(patch); if (kIsDebugBuild) { const uint32_t encoded_data = key.GetCustomValue1(); BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); // Check that the next instruction matches the expected LDR. switch (kind) { case BakerReadBarrierKind::kField: { DCHECK_GE(code->size() - literal_offset, 8u); uint32_t next_insn = GetInsn(code, literal_offset + 4u); // LDR (immediate) with correct base_reg. CheckValidReg(next_insn & 0x1fu); // Check destination register. const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5)); break; } case BakerReadBarrierKind::kArray: { DCHECK_GE(code->size() - literal_offset, 8u); uint32_t next_insn = GetInsn(code, literal_offset + 4u); // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL), // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2]. CheckValidReg(next_insn & 0x1fu); // Check destination register. const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5)); CheckValidReg((next_insn >> 16) & 0x1f); // Check index register break; } case BakerReadBarrierKind::kGcRoot: { DCHECK_GE(literal_offset, 4u); uint32_t prev_insn = GetInsn(code, literal_offset - 4u); // LDR (immediate) with correct root_reg. const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data); CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); break; } default: LOG(FATAL) << "Unexpected kind: " << static_cast(kind); UNREACHABLE(); } } uint32_t target_offset = GetThunkTargetOffset(key, patch_offset); DCHECK_ALIGNED(target_offset, 4u); uint32_t disp = target_offset - patch_offset; DCHECK((disp >> 20) == 0u || (disp >> 20) == 4095u); // 21-bit signed. insn |= (disp << (5 - 2)) & 0x00ffffe0u; // Shift bits 2-20 to 5-23. SetInsn(code, literal_offset, insn); } #define __ assembler.GetVIXLAssembler()-> static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler, vixl::aarch64::Register base_reg, vixl::aarch64::MemOperand& lock_word, vixl::aarch64::Label* slow_path) { using namespace vixl::aarch64; // NOLINT(build/namespaces) // Load the lock word containing the rb_state. __ Ldr(ip0.W(), lock_word); // Given the numeric representation, it's enough to check the low bit of the rb_state. static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path); static_assert( BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET, "Field and array LDR offsets must be the same to reuse the same code."); // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning). static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4), "Field LDR must be 1 instruction (4B) before the return address label; " " 2 instructions (8B) for heap poisoning."); __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); // Introduce a dependency on the lock_word including rb_state, // to prevent load-load reordering, and without using // a memory barrier (which would be more expensive). __ Add(base_reg, base_reg, Operand(ip0, LSR, 32)); __ Br(lr); // And return back to the function. // Note: The fake dependency is unnecessary for the slow path. } // Load the read barrier introspection entrypoint in register `entrypoint`. static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler, vixl::aarch64::Register entrypoint) { using vixl::aarch64::MemOperand; using vixl::aarch64::ip0; // Thread Register. const vixl::aarch64::Register tr = vixl::aarch64::x19; // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection. DCHECK_EQ(ip0.GetCode(), 16u); const int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset(ip0.GetCode()); __ Ldr(entrypoint, MemOperand(tr, entry_point_offset)); } void Arm64RelativePatcher::CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler, uint32_t encoded_data) { using namespace vixl::aarch64; // NOLINT(build/namespaces) BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data); switch (kind) { case BakerReadBarrierKind::kField: { // Check if the holder is gray and, if not, add fake dependency to the base register // and return to the LDR instruction to load the reference. Otherwise, use introspection // to load the reference and call the entrypoint (in IP1) that performs further checks // on the reference and marks it if needed. auto base_reg = Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); CheckValidReg(base_reg.GetCode()); auto holder_reg = Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data)); CheckValidReg(holder_reg.GetCode()); UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip0, ip1); // If base_reg differs from holder_reg, the offset was too large and we must have // emitted an explicit null check before the load. Otherwise, we need to null-check // the holder as we do not necessarily do that check before going to the thunk. vixl::aarch64::Label throw_npe; if (holder_reg.Is(base_reg)) { __ Cbz(holder_reg.W(), &throw_npe); } vixl::aarch64::Label slow_path; MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value()); EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); __ Bind(&slow_path); MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET); __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset. LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset. __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference. // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference. __ Br(ip1); // Jump to the entrypoint. if (holder_reg.Is(base_reg)) { // Add null check slow path. The stack map is at the address pointed to by LR. __ Bind(&throw_npe); int32_t offset = GetThreadOffset(kQuickThrowNullPointer).Int32Value(); __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset)); __ Br(ip0); } break; } case BakerReadBarrierKind::kArray: { auto base_reg = Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); CheckValidReg(base_reg.GetCode()); DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip0, ip1); vixl::aarch64::Label slow_path; int32_t data_offset = mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value(); MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset); DCHECK_LT(lock_word.GetOffset(), 0); EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path); __ Bind(&slow_path); MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset. LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set). __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create // a switch case target based on the index register. __ Mov(ip0, base_reg); // Move the base register to ip0. __ Br(ip1); // Jump to the entrypoint's array switch case. break; } case BakerReadBarrierKind::kGcRoot: { // Check if the reference needs to be marked and if so (i.e. not null, not marked yet // and it does not have a forwarding address), call the correct introspection entrypoint; // otherwise return the reference (or the extracted forwarding address). // There is no gray bit check for GC roots. auto root_reg = Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data)); CheckValidReg(root_reg.GetCode()); DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data)); UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip0, ip1); vixl::aarch64::Label return_label, not_marked, forwarding_address; __ Cbz(root_reg, &return_label); MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value()); __ Ldr(ip0.W(), lock_word); __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked); __ Bind(&return_label); __ Br(lr); __ Bind(¬_marked); __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1)); __ B(&forwarding_address, mi); LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1); // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to // art_quick_read_barrier_mark_introspection_gc_roots. __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET)); __ Mov(ip0.W(), root_reg); __ Br(ip1); __ Bind(&forwarding_address); __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift); __ Br(lr); break; } default: LOG(FATAL) << "Unexpected kind: " << static_cast(kind); UNREACHABLE(); } } std::vector Arm64RelativePatcher::CompileThunk(const ThunkKey& key) { ArenaPool pool; ArenaAllocator arena(&pool); arm64::Arm64Assembler assembler(&arena); switch (key.GetType()) { case ThunkType::kMethodCall: { // The thunk just uses the entry point in the ArtMethod. This works even for calls // to the generic JNI and interpreter trampolines. Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArm64PointerSize).Int32Value()); assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0)); break; } case ThunkType::kBakerReadBarrier: { CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1()); break; } } // Ensure we emit the literal pool. assembler.FinalizeCode(); std::vector thunk_code(assembler.CodeSize()); MemoryRegion code(thunk_code.data(), thunk_code.size()); assembler.FinalizeInstructions(code); return thunk_code; } #undef __ uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) { switch (key.GetType()) { case ThunkType::kMethodCall: return kMaxMethodCallPositiveDisplacement; case ThunkType::kBakerReadBarrier: return kMaxBcondPositiveDisplacement; } } uint32_t Arm64RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) { switch (key.GetType()) { case ThunkType::kMethodCall: return kMaxMethodCallNegativeDisplacement; case ThunkType::kBakerReadBarrier: return kMaxBcondNegativeDisplacement; } } uint32_t Arm64RelativePatcher::PatchAdrp(uint32_t adrp, uint32_t disp) { return (adrp & 0x9f00001fu) | // Clear offset bits, keep ADRP with destination reg. // Bottom 12 bits are ignored, the next 2 lowest bits are encoded in bits 29-30. ((disp & 0x00003000u) << (29 - 12)) | // The next 16 bits are encoded in bits 5-22. ((disp & 0xffffc000u) >> (12 + 2 - 5)) | // Since the target_offset is based on the beginning of the oat file and the // image space precedes the oat file, the target_offset into image space will // be negative yet passed as uint32_t. Therefore we limit the displacement // to +-2GiB (rather than the maximim +-4GiB) and determine the sign bit from // the highest bit of the displacement. This is encoded in bit 23. ((disp & 0x80000000u) >> (31 - 23)); } bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef code, uint32_t literal_offset, uint32_t patch_offset) { DCHECK_EQ(patch_offset & 0x3u, 0u); if ((patch_offset & 0xff8) == 0xff8) { // ...ff8 or ...ffc uint32_t adrp = GetInsn(code, literal_offset); DCHECK_EQ(adrp & 0x9f000000, 0x90000000); uint32_t next_offset = patch_offset + 4u; uint32_t next_insn = GetInsn(code, literal_offset + 4u); // Below we avoid patching sequences where the adrp is followed by a load which can easily // be proved to be aligned. // First check if the next insn is the LDR using the result of the ADRP. // LDR , [, #pimm], where == ADRP destination reg. if ((next_insn & 0xffc00000) == 0xb9400000 && (((next_insn >> 5) ^ adrp) & 0x1f) == 0) { return false; } // And since LinkerPatch::Type::k{Method,Type,String}Relative is using the result // of the ADRP for an ADD immediate, check for that as well. We generalize a bit // to include ADD/ADDS/SUB/SUBS immediate that either uses the ADRP destination // or stores the result to a different register. if ((next_insn & 0x1f000000) == 0x11000000 && ((((next_insn >> 5) ^ adrp) & 0x1f) == 0 || ((next_insn ^ adrp) & 0x1f) != 0)) { return false; } // LDR ,